xref: /wlan-driver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  * DOC: qdf_mem
22*5113495bSYour Name  * QCA driver framework (QDF) memory management APIs
23*5113495bSYour Name  */
24*5113495bSYour Name 
25*5113495bSYour Name #if !defined(__QDF_MEMORY_H)
26*5113495bSYour Name #define __QDF_MEMORY_H
27*5113495bSYour Name 
28*5113495bSYour Name /* Include Files */
29*5113495bSYour Name #include <qdf_types.h>
30*5113495bSYour Name #include <i_qdf_mem.h>
31*5113495bSYour Name #include <i_qdf_trace.h>
32*5113495bSYour Name #include <qdf_atomic.h>
33*5113495bSYour Name 
34*5113495bSYour Name #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
35*5113495bSYour Name 
36*5113495bSYour Name /**
37*5113495bSYour Name  * qdf_align() - align to the given size.
38*5113495bSYour Name  * @a: input that needs to be aligned.
39*5113495bSYour Name  * @align_size: boundary on which 'a' has to be aligned.
40*5113495bSYour Name  *
41*5113495bSYour Name  * Return: aligned value.
42*5113495bSYour Name  */
43*5113495bSYour Name #define qdf_align(a, align_size)   __qdf_align(a, align_size)
44*5113495bSYour Name #define qdf_page_size __page_size
45*5113495bSYour Name 
46*5113495bSYour Name /**
47*5113495bSYour Name  * struct qdf_mem_dma_page_t - Allocated dmaable page
48*5113495bSYour Name  * @page_v_addr_start: Page start virtual address
49*5113495bSYour Name  * @page_v_addr_end: Page end virtual address
50*5113495bSYour Name  * @page_p_addr: Page start physical address
51*5113495bSYour Name  */
52*5113495bSYour Name struct qdf_mem_dma_page_t {
53*5113495bSYour Name 	char *page_v_addr_start;
54*5113495bSYour Name 	char *page_v_addr_end;
55*5113495bSYour Name 	qdf_dma_addr_t page_p_addr;
56*5113495bSYour Name };
57*5113495bSYour Name 
58*5113495bSYour Name /**
59*5113495bSYour Name  * struct qdf_mem_multi_page_t - multiple page allocation information storage
60*5113495bSYour Name  * @num_element_per_page: Number of element in single page
61*5113495bSYour Name  * @num_pages: Number of allocation needed pages
62*5113495bSYour Name  * @dma_pages: page information storage in case of coherent memory
63*5113495bSYour Name  * @cacheable_pages: page information storage in case of cacheable memory
64*5113495bSYour Name  * @page_size: page size
65*5113495bSYour Name  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
66*5113495bSYour Name  * @contiguous_dma_pages: flag for contiguous dma pages or not
67*5113495bSYour Name  */
68*5113495bSYour Name struct qdf_mem_multi_page_t {
69*5113495bSYour Name 	uint16_t num_element_per_page;
70*5113495bSYour Name 	uint16_t num_pages;
71*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
72*5113495bSYour Name 	void **cacheable_pages;
73*5113495bSYour Name 	qdf_size_t page_size;
74*5113495bSYour Name #ifdef DP_MEM_PRE_ALLOC
75*5113495bSYour Name 	uint8_t is_mem_prealloc;
76*5113495bSYour Name #endif
77*5113495bSYour Name #ifdef ALLOC_CONTIGUOUS_MULTI_PAGE
78*5113495bSYour Name 	bool contiguous_dma_pages;
79*5113495bSYour Name #endif
80*5113495bSYour Name };
81*5113495bSYour Name 
82*5113495bSYour Name 
83*5113495bSYour Name /* Preprocessor definitions and constants */
84*5113495bSYour Name 
85*5113495bSYour Name typedef __qdf_mempool_t qdf_mempool_t;
86*5113495bSYour Name 
87*5113495bSYour Name /**
88*5113495bSYour Name  * qdf_mem_init() - Initialize QDF memory module
89*5113495bSYour Name  *
90*5113495bSYour Name  * Return: None
91*5113495bSYour Name  *
92*5113495bSYour Name  */
93*5113495bSYour Name void qdf_mem_init(void);
94*5113495bSYour Name 
95*5113495bSYour Name /**
96*5113495bSYour Name  * qdf_mem_exit() - Exit QDF memory module
97*5113495bSYour Name  *
98*5113495bSYour Name  * Return: None
99*5113495bSYour Name  *
100*5113495bSYour Name  */
101*5113495bSYour Name void qdf_mem_exit(void);
102*5113495bSYour Name 
103*5113495bSYour Name #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
104*5113495bSYour Name #define qdf_untracked_mem_malloc(size) \
105*5113495bSYour Name 	__qdf_untracked_mem_malloc(size, __func__, __LINE__)
106*5113495bSYour Name 
107*5113495bSYour Name #define qdf_untracked_mem_free(ptr) \
108*5113495bSYour Name 	__qdf_untracked_mem_free(ptr)
109*5113495bSYour Name #endif
110*5113495bSYour Name 
111*5113495bSYour Name #define QDF_MEM_FUNC_NAME_SIZE 48
112*5113495bSYour Name 
113*5113495bSYour Name #ifdef MEMORY_DEBUG
114*5113495bSYour Name /**
115*5113495bSYour Name  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
116*5113495bSYour Name  *
117*5113495bSYour Name  * Return: value of mem_debug_disabled qdf module argument
118*5113495bSYour Name  */
119*5113495bSYour Name bool qdf_mem_debug_config_get(void);
120*5113495bSYour Name 
121*5113495bSYour Name #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
122*5113495bSYour Name /**
123*5113495bSYour Name  * qdf_mem_debug_disabled_config_set() - Set mem_debug_disabled
124*5113495bSYour Name  * @str_value: value of the module param
125*5113495bSYour Name  *
126*5113495bSYour Name  * This function will set qdf module param mem_debug_disabled
127*5113495bSYour Name  *
128*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS on Success
129*5113495bSYour Name  */
130*5113495bSYour Name QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value);
131*5113495bSYour Name #endif
132*5113495bSYour Name 
133*5113495bSYour Name /**
134*5113495bSYour Name  * qdf_mem_malloc_atomic_debug() - debug version of QDF memory allocation API
135*5113495bSYour Name  * @size: Number of bytes of memory to allocate.
136*5113495bSYour Name  * @func: Function name of the call site
137*5113495bSYour Name  * @line: Line number of the call site
138*5113495bSYour Name  * @caller: Address of the caller function
139*5113495bSYour Name  *
140*5113495bSYour Name  * This function will dynamically allocate the specified number of bytes of
141*5113495bSYour Name  * memory and add it to the qdf tracking list to check for memory leaks and
142*5113495bSYour Name  * corruptions
143*5113495bSYour Name  *
144*5113495bSYour Name  * Return: A valid memory location on success, or NULL on failure
145*5113495bSYour Name  */
146*5113495bSYour Name void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
147*5113495bSYour Name 				  uint32_t line, void *caller);
148*5113495bSYour Name 
149*5113495bSYour Name /**
150*5113495bSYour Name  * qdf_mem_malloc_atomic_debug_fl() - allocation QDF memory atomically
151*5113495bSYour Name  * @size: Number of bytes of memory to allocate.
152*5113495bSYour Name  * @func: Function name of the call site
153*5113495bSYour Name  * @line: Line number of the call site
154*5113495bSYour Name  *
155*5113495bSYour Name  * This function will dynamically allocate the specified number of bytes of
156*5113495bSYour Name  * memory.
157*5113495bSYour Name  *
158*5113495bSYour Name  * Return:
159*5113495bSYour Name  * Upon successful allocate, returns a non-NULL pointer to the allocated
160*5113495bSYour Name  * memory.  If this function is unable to allocate the amount of memory
161*5113495bSYour Name  * specified (for any reason) it returns NULL.
162*5113495bSYour Name  */
163*5113495bSYour Name void *qdf_mem_malloc_atomic_debug_fl(qdf_size_t size, const char *func,
164*5113495bSYour Name 				     uint32_t line);
165*5113495bSYour Name 
166*5113495bSYour Name /**
167*5113495bSYour Name  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
168*5113495bSYour Name  * @size: Number of bytes of memory to allocate.
169*5113495bSYour Name  * @func: Function name of the call site
170*5113495bSYour Name  * @line: Line number of the call site
171*5113495bSYour Name  * @caller: Address of the caller function
172*5113495bSYour Name  * @flag: GFP flag
173*5113495bSYour Name  *
174*5113495bSYour Name  * This function will dynamically allocate the specified number of bytes of
175*5113495bSYour Name  * memory and add it to the qdf tracking list to check for memory leaks and
176*5113495bSYour Name  * corruptions
177*5113495bSYour Name  *
178*5113495bSYour Name  * Return: A valid memory location on success, or NULL on failure
179*5113495bSYour Name  */
180*5113495bSYour Name void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
181*5113495bSYour Name 			   void *caller, uint32_t flag);
182*5113495bSYour Name 
183*5113495bSYour Name #define qdf_mem_malloc(size) \
184*5113495bSYour Name 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
185*5113495bSYour Name 
186*5113495bSYour Name #define qdf_mem_malloc_fl(size, func, line) \
187*5113495bSYour Name 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
188*5113495bSYour Name 
189*5113495bSYour Name #define qdf_mem_malloc_atomic(size) \
190*5113495bSYour Name 	qdf_mem_malloc_atomic_debug(size, __func__, __LINE__, QDF_RET_IP)
191*5113495bSYour Name 
192*5113495bSYour Name /**
193*5113495bSYour Name  * qdf_mem_free() - free allocate memory
194*5113495bSYour Name  * @ptr: Pointer to the starting address of the memory to be freed.
195*5113495bSYour Name  *
196*5113495bSYour Name  * This function will free the memory pointed to by 'ptr'. It also checks for
197*5113495bSYour Name  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
198*5113495bSYour Name  *
199*5113495bSYour Name  * Return: none
200*5113495bSYour Name  */
201*5113495bSYour Name #define qdf_mem_free(ptr) \
202*5113495bSYour Name 	qdf_mem_free_debug(ptr, __func__, __LINE__)
203*5113495bSYour Name void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
204*5113495bSYour Name 
205*5113495bSYour Name /**
206*5113495bSYour Name  * qdf_mem_multi_pages_alloc_debug() - Debug version of
207*5113495bSYour Name  * qdf_mem_multi_pages_alloc
208*5113495bSYour Name  * @osdev: OS device handle pointer
209*5113495bSYour Name  * @pages: Multi page information storage
210*5113495bSYour Name  * @element_size: Each element size
211*5113495bSYour Name  * @element_num: Total number of elements should be allocated
212*5113495bSYour Name  * @memctxt: Memory context
213*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
214*5113495bSYour Name  * @func: Caller of this allocator
215*5113495bSYour Name  * @line: Line number of the caller
216*5113495bSYour Name  * @caller: Return address of the caller
217*5113495bSYour Name  *
218*5113495bSYour Name  * This function will allocate large size of memory over multiple pages.
219*5113495bSYour Name  * Large size of contiguous memory allocation will fail frequently, then
220*5113495bSYour Name  * instead of allocate large memory by one shot, allocate through multiple, non
221*5113495bSYour Name  * contiguous memory and combine pages when actual usage
222*5113495bSYour Name  *
223*5113495bSYour Name  * Return: None
224*5113495bSYour Name  */
225*5113495bSYour Name void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
226*5113495bSYour Name 				     struct qdf_mem_multi_page_t *pages,
227*5113495bSYour Name 				     size_t element_size, uint32_t element_num,
228*5113495bSYour Name 				     qdf_dma_context_t memctxt, bool cacheable,
229*5113495bSYour Name 				     const char *func, uint32_t line,
230*5113495bSYour Name 				     void *caller);
231*5113495bSYour Name 
232*5113495bSYour Name /**
233*5113495bSYour Name  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
234*5113495bSYour Name  * @osdev: OS device handle pointer
235*5113495bSYour Name  * @pages: Multi page information storage
236*5113495bSYour Name  * @element_size: Each element size
237*5113495bSYour Name  * @element_num: Total number of elements should be allocated
238*5113495bSYour Name  * @memctxt: Memory context
239*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
240*5113495bSYour Name  *
241*5113495bSYour Name  * This function will allocate large size of memory over multiple pages.
242*5113495bSYour Name  * Large size of contiguous memory allocation will fail frequently, then
243*5113495bSYour Name  * instead of allocate large memory by one shot, allocate through multiple, non
244*5113495bSYour Name  * contiguous memory and combine pages when actual usage
245*5113495bSYour Name  *
246*5113495bSYour Name  * Return: None
247*5113495bSYour Name  */
248*5113495bSYour Name #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
249*5113495bSYour Name 				  memctxt, cacheable) \
250*5113495bSYour Name 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
251*5113495bSYour Name 					element_num, memctxt, cacheable, \
252*5113495bSYour Name 					__func__, __LINE__, QDF_RET_IP)
253*5113495bSYour Name 
254*5113495bSYour Name /**
255*5113495bSYour Name  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
256*5113495bSYour Name  * @osdev: OS device handle pointer
257*5113495bSYour Name  * @pages: Multi page information storage
258*5113495bSYour Name  * @memctxt: Memory context
259*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
260*5113495bSYour Name  * @func: Caller of this allocator
261*5113495bSYour Name  * @line: Line number of the caller
262*5113495bSYour Name  *
263*5113495bSYour Name  * This function will free large size of memory over multiple pages.
264*5113495bSYour Name  *
265*5113495bSYour Name  * Return: None
266*5113495bSYour Name  */
267*5113495bSYour Name void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
268*5113495bSYour Name 				    struct qdf_mem_multi_page_t *pages,
269*5113495bSYour Name 				    qdf_dma_context_t memctxt, bool cacheable,
270*5113495bSYour Name 				    const char *func, uint32_t line);
271*5113495bSYour Name 
272*5113495bSYour Name /**
273*5113495bSYour Name  * qdf_mem_multi_pages_free() - free large size of kernel memory
274*5113495bSYour Name  * @osdev: OS device handle pointer
275*5113495bSYour Name  * @pages: Multi page information storage
276*5113495bSYour Name  * @memctxt: Memory context
277*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
278*5113495bSYour Name  *
279*5113495bSYour Name  * This function will free large size of memory over multiple pages.
280*5113495bSYour Name  *
281*5113495bSYour Name  * Return: None
282*5113495bSYour Name  */
283*5113495bSYour Name #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
284*5113495bSYour Name 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
285*5113495bSYour Name 				       __func__, __LINE__)
286*5113495bSYour Name 
287*5113495bSYour Name /**
288*5113495bSYour Name  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
289*5113495bSYour Name  *
290*5113495bSYour Name  * Call this to ensure there are no active memory allocations being tracked
291*5113495bSYour Name  * against the current debug domain. For example, one should call this function
292*5113495bSYour Name  * immediately before a call to qdf_debug_domain_set() as a memory leak
293*5113495bSYour Name  * detection mechanism.
294*5113495bSYour Name  *
295*5113495bSYour Name  * e.g.
296*5113495bSYour Name  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
297*5113495bSYour Name  *
298*5113495bSYour Name  *	...
299*5113495bSYour Name  *
300*5113495bSYour Name  *	// memory is allocated and freed
301*5113495bSYour Name  *
302*5113495bSYour Name  *	...
303*5113495bSYour Name  *
304*5113495bSYour Name  *	// before transitioning back to inactive state,
305*5113495bSYour Name  *	// make sure all active memory has been freed
306*5113495bSYour Name  *	qdf_mem_check_for_leaks();
307*5113495bSYour Name  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
308*5113495bSYour Name  *
309*5113495bSYour Name  *	...
310*5113495bSYour Name  *
311*5113495bSYour Name  *	// also, before program exit, make sure init time memory is freed
312*5113495bSYour Name  *	qdf_mem_check_for_leaks();
313*5113495bSYour Name  *	exit();
314*5113495bSYour Name  *
315*5113495bSYour Name  * Return: None
316*5113495bSYour Name  */
317*5113495bSYour Name void qdf_mem_check_for_leaks(void);
318*5113495bSYour Name 
319*5113495bSYour Name /**
320*5113495bSYour Name  * qdf_mem_alloc_consistent() - allocates consistent qdf memory
321*5113495bSYour Name  * @osdev: OS device handle
322*5113495bSYour Name  * @dev: Pointer to device handle
323*5113495bSYour Name  * @size: Size to be allocated
324*5113495bSYour Name  * @paddr: Physical address
325*5113495bSYour Name  *
326*5113495bSYour Name  * Return: pointer of allocated memory or null if memory alloc fails
327*5113495bSYour Name  */
328*5113495bSYour Name #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
329*5113495bSYour Name 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
330*5113495bSYour Name 				       __func__, __LINE__, QDF_RET_IP)
331*5113495bSYour Name void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
332*5113495bSYour Name 				     qdf_size_t size, qdf_dma_addr_t *paddr,
333*5113495bSYour Name 				     const char *func, uint32_t line,
334*5113495bSYour Name 				     void *caller);
335*5113495bSYour Name 
336*5113495bSYour Name /**
337*5113495bSYour Name  * qdf_mem_free_consistent() - free consistent qdf memory
338*5113495bSYour Name  * @osdev: OS device handle
339*5113495bSYour Name  * @dev: OS device
340*5113495bSYour Name  * @size: Size to be allocated
341*5113495bSYour Name  * @vaddr: virtual address
342*5113495bSYour Name  * @paddr: Physical address
343*5113495bSYour Name  * @memctx: Pointer to DMA context
344*5113495bSYour Name  *
345*5113495bSYour Name  * Return: none
346*5113495bSYour Name  */
347*5113495bSYour Name #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
348*5113495bSYour Name 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
349*5113495bSYour Name 				  __func__, __LINE__)
350*5113495bSYour Name void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
351*5113495bSYour Name 				   qdf_size_t size, void *vaddr,
352*5113495bSYour Name 				   qdf_dma_addr_t paddr,
353*5113495bSYour Name 				   qdf_dma_context_t memctx,
354*5113495bSYour Name 				   const char *func, uint32_t line);
355*5113495bSYour Name 
356*5113495bSYour Name #else
qdf_mem_debug_config_get(void)357*5113495bSYour Name static inline bool qdf_mem_debug_config_get(void)
358*5113495bSYour Name {
359*5113495bSYour Name 	return false;
360*5113495bSYour Name }
361*5113495bSYour Name 
362*5113495bSYour Name static inline
qdf_mem_debug_disabled_config_set(const char * str_value)363*5113495bSYour Name QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
364*5113495bSYour Name {
365*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
366*5113495bSYour Name }
367*5113495bSYour Name 
368*5113495bSYour Name /**
369*5113495bSYour Name  * qdf_mem_malloc() - allocation QDF memory
370*5113495bSYour Name  * @size: Number of bytes of memory to allocate.
371*5113495bSYour Name  *
372*5113495bSYour Name  * This function will dynamically allocate the specified number of bytes of
373*5113495bSYour Name  * memory.
374*5113495bSYour Name  *
375*5113495bSYour Name  * Return:
376*5113495bSYour Name  * Upon successful allocate, returns a non-NULL pointer to the allocated
377*5113495bSYour Name  * memory.  If this function is unable to allocate the amount of memory
378*5113495bSYour Name  * specified (for any reason) it returns NULL.
379*5113495bSYour Name  */
380*5113495bSYour Name #define qdf_mem_malloc(size) \
381*5113495bSYour Name 	__qdf_mem_malloc(size, __func__, __LINE__)
382*5113495bSYour Name 
383*5113495bSYour Name #define qdf_mem_malloc_fl(size, func, line) \
384*5113495bSYour Name 	__qdf_mem_malloc(size, func, line)
385*5113495bSYour Name 
386*5113495bSYour Name /**
387*5113495bSYour Name  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
388*5113495bSYour Name  * @size: Number of bytes of memory to allocate.
389*5113495bSYour Name  *
390*5113495bSYour Name  * This function will dynamically allocate the specified number of bytes of
391*5113495bSYour Name  * memory.
392*5113495bSYour Name  *
393*5113495bSYour Name  * Return:
394*5113495bSYour Name  * Upon successful allocate, returns a non-NULL pointer to the allocated
395*5113495bSYour Name  * memory.  If this function is unable to allocate the amount of memory
396*5113495bSYour Name  * specified (for any reason) it returns NULL.
397*5113495bSYour Name  */
398*5113495bSYour Name #define qdf_mem_malloc_atomic(size) \
399*5113495bSYour Name 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
400*5113495bSYour Name 
401*5113495bSYour Name void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
402*5113495bSYour Name 			       const char *func,
403*5113495bSYour Name 			       uint32_t line);
404*5113495bSYour Name 
405*5113495bSYour Name #define qdf_mem_free(ptr) \
406*5113495bSYour Name 	__qdf_mem_free(ptr)
407*5113495bSYour Name 
qdf_mem_check_for_leaks(void)408*5113495bSYour Name static inline void qdf_mem_check_for_leaks(void) { }
409*5113495bSYour Name 
410*5113495bSYour Name #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
411*5113495bSYour Name 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
412*5113495bSYour Name 
413*5113495bSYour Name #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
414*5113495bSYour Name 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
415*5113495bSYour Name 
416*5113495bSYour Name void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
417*5113495bSYour Name 			       struct qdf_mem_multi_page_t *pages,
418*5113495bSYour Name 			       size_t element_size, uint32_t element_num,
419*5113495bSYour Name 			       qdf_dma_context_t memctxt, bool cacheable);
420*5113495bSYour Name 
421*5113495bSYour Name void qdf_mem_multi_pages_free(qdf_device_t osdev,
422*5113495bSYour Name 			      struct qdf_mem_multi_page_t *pages,
423*5113495bSYour Name 			      qdf_dma_context_t memctxt, bool cacheable);
424*5113495bSYour Name 
425*5113495bSYour Name #endif /* MEMORY_DEBUG */
426*5113495bSYour Name 
427*5113495bSYour Name /**
428*5113495bSYour Name  * qdf_mem_malloc_flags: Get mem allocation flags
429*5113495bSYour Name  *
430*5113495bSYour Name  * Return the flag to be use for memory allocation
431*5113495bSYour Name  * based on the context
432*5113495bSYour Name  *
433*5113495bSYour Name  * Returns: Based on the context, returns the GFP flag
434*5113495bSYour Name  * for memory alloaction
435*5113495bSYour Name  */
436*5113495bSYour Name int qdf_mem_malloc_flags(void);
437*5113495bSYour Name 
438*5113495bSYour Name /**
439*5113495bSYour Name  * qdf_prealloc_disabled_config_get() - Get the user configuration of
440*5113495bSYour Name  *                                      prealloc_disabled
441*5113495bSYour Name  *
442*5113495bSYour Name  * Return: value of prealloc_disabled qdf module argument
443*5113495bSYour Name  */
444*5113495bSYour Name bool qdf_prealloc_disabled_config_get(void);
445*5113495bSYour Name 
446*5113495bSYour Name #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
447*5113495bSYour Name /**
448*5113495bSYour Name  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
449*5113495bSYour Name  * @str_value: value of the module param
450*5113495bSYour Name  *
451*5113495bSYour Name  * This function will set qdf module param prealloc_disabled
452*5113495bSYour Name  *
453*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS on Success
454*5113495bSYour Name  */
455*5113495bSYour Name QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value);
456*5113495bSYour Name #endif
457*5113495bSYour Name 
458*5113495bSYour Name /**
459*5113495bSYour Name  * qdf_mem_multi_pages_zero() - zero out each page memory
460*5113495bSYour Name  * @pages: Multi page information storage
461*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
462*5113495bSYour Name  *
463*5113495bSYour Name  * This function will zero out each page memory
464*5113495bSYour Name  *
465*5113495bSYour Name  * Return: None
466*5113495bSYour Name  */
467*5113495bSYour Name void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
468*5113495bSYour Name 			      bool cacheable);
469*5113495bSYour Name 
470*5113495bSYour Name /**
471*5113495bSYour Name  * qdf_aligned_malloc() - allocates aligned QDF memory.
472*5113495bSYour Name  * @size: Size to be allocated
473*5113495bSYour Name  * @vaddr_unaligned: Unaligned virtual address.
474*5113495bSYour Name  * @paddr_unaligned: Unaligned physical address.
475*5113495bSYour Name  * @paddr_aligned: Aligned physical address.
476*5113495bSYour Name  * @align: Base address alignment.
477*5113495bSYour Name  *
478*5113495bSYour Name  * This function will dynamically allocate the specified number of bytes of
479*5113495bSYour Name  * memory. Checks if the allocated base address is aligned with base_align.
480*5113495bSYour Name  * If not, it frees the allocated memory, adds base_align to alloc size and
481*5113495bSYour Name  * re-allocates the memory.
482*5113495bSYour Name  *
483*5113495bSYour Name  * Return:
484*5113495bSYour Name  * Upon successful allocate, returns an aligned base address of the allocated
485*5113495bSYour Name  * memory.  If this function is unable to allocate the amount of memory
486*5113495bSYour Name  * specified (for any reason) it returns NULL.
487*5113495bSYour Name  */
488*5113495bSYour Name #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
489*5113495bSYour Name 			   paddr_aligned, align) \
490*5113495bSYour Name 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
491*5113495bSYour Name 			   paddr_aligned, align, __func__, __LINE__)
492*5113495bSYour Name 
493*5113495bSYour Name void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
494*5113495bSYour Name 			    qdf_dma_addr_t *paddr_unaligned,
495*5113495bSYour Name 			    qdf_dma_addr_t *paddr_aligned,
496*5113495bSYour Name 			    uint32_t align,
497*5113495bSYour Name 			    const char *func, uint32_t line);
498*5113495bSYour Name 
499*5113495bSYour Name /**
500*5113495bSYour Name  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
501*5113495bSYour Name  * @osdev: OS device handle
502*5113495bSYour Name  * @size: Size to be allocated
503*5113495bSYour Name  * @vaddr_unaligned: Unaligned virtual address.
504*5113495bSYour Name  * @paddr_unaligned: Unaligned physical address.
505*5113495bSYour Name  * @paddr_aligned: Aligned physical address.
506*5113495bSYour Name  * @align: Base address alignment.
507*5113495bSYour Name  *
508*5113495bSYour Name  * Return: pointer of allocated memory or null if memory alloc fails.
509*5113495bSYour Name  */
510*5113495bSYour Name #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
511*5113495bSYour Name 					 paddr_unaligned, paddr_aligned, \
512*5113495bSYour Name 					 align) \
513*5113495bSYour Name 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
514*5113495bSYour Name 					    paddr_unaligned, paddr_aligned, \
515*5113495bSYour Name 					    align, __func__, __LINE__)
516*5113495bSYour Name 
517*5113495bSYour Name void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
518*5113495bSYour Name 					  void **vaddr_unaligned,
519*5113495bSYour Name 					  qdf_dma_addr_t *paddr_unaligned,
520*5113495bSYour Name 					  qdf_dma_addr_t *paddr_aligned,
521*5113495bSYour Name 					  uint32_t align, const char *func,
522*5113495bSYour Name 					  uint32_t line);
523*5113495bSYour Name 
524*5113495bSYour Name /**
525*5113495bSYour Name  * qdf_mem_virt_to_phys() - Convert virtual address to physical
526*5113495bSYour Name  * @vaddr: virtual address
527*5113495bSYour Name  *
528*5113495bSYour Name  * Return: physical address
529*5113495bSYour Name  */
530*5113495bSYour Name #define qdf_mem_virt_to_phys(vaddr) __qdf_mem_virt_to_phys(vaddr)
531*5113495bSYour Name 
532*5113495bSYour Name /**
533*5113495bSYour Name  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
534*5113495bSYour Name  * @ptr: Pointer to memory that will be set
535*5113495bSYour Name  * @value: Byte set in memory
536*5113495bSYour Name  * @num_bytes: Number of bytes to be set
537*5113495bSYour Name  *
538*5113495bSYour Name  * Return: None
539*5113495bSYour Name  */
540*5113495bSYour Name void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
541*5113495bSYour Name 
542*5113495bSYour Name /**
543*5113495bSYour Name  * qdf_mem_copy_toio() - copy memory
544*5113495bSYour Name  * @dst_addr: Pointer to destination memory location (to copy to)
545*5113495bSYour Name  * @src_addr: Pointer to source memory location (to copy from)
546*5113495bSYour Name  * @num_bytes: Number of bytes to copy.
547*5113495bSYour Name  *
548*5113495bSYour Name  * Return: none
549*5113495bSYour Name  */
550*5113495bSYour Name void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
551*5113495bSYour Name 					   uint32_t num_bytes);
552*5113495bSYour Name 
553*5113495bSYour Name /**
554*5113495bSYour Name  * qdf_mem_set() - set (fill) memory with a specified byte value.
555*5113495bSYour Name  * @ptr: Pointer to memory that will be set
556*5113495bSYour Name  * @num_bytes: Number of bytes to be set
557*5113495bSYour Name  * @value: Byte set in memory
558*5113495bSYour Name  *
559*5113495bSYour Name  * WARNING: parameter @num_bytes and @value are swapped comparing with
560*5113495bSYour Name  * standard C function "memset", please ensure correct usage of this function!
561*5113495bSYour Name  *
562*5113495bSYour Name  * Return: None
563*5113495bSYour Name  */
564*5113495bSYour Name void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
565*5113495bSYour Name 
566*5113495bSYour Name /**
567*5113495bSYour Name  * qdf_mem_zero() - zero out memory
568*5113495bSYour Name  * @ptr: pointer to memory that will be set to zero
569*5113495bSYour Name  * @num_bytes: number of bytes zero
570*5113495bSYour Name  *
571*5113495bSYour Name  * This function sets the memory location to all zeros, essentially clearing
572*5113495bSYour Name  * the memory.
573*5113495bSYour Name  *
574*5113495bSYour Name  * Return: None
575*5113495bSYour Name  */
qdf_mem_zero(void * ptr,uint32_t num_bytes)576*5113495bSYour Name static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
577*5113495bSYour Name {
578*5113495bSYour Name 	qdf_mem_set(ptr, num_bytes, 0);
579*5113495bSYour Name }
580*5113495bSYour Name 
581*5113495bSYour Name /**
582*5113495bSYour Name  * qdf_mem_copy() - copy memory
583*5113495bSYour Name  * @dst_addr: Pointer to destination memory location (to copy to)
584*5113495bSYour Name  * @src_addr: Pointer to source memory location (to copy from)
585*5113495bSYour Name  * @num_bytes: Number of bytes to copy.
586*5113495bSYour Name  *
587*5113495bSYour Name  * Copy host memory from one location to another, similar to memcpy in
588*5113495bSYour Name  * standard C.  Note this function does not specifically handle overlapping
589*5113495bSYour Name  * source and destination memory locations.  Calling this function with
590*5113495bSYour Name  * overlapping source and destination memory locations will result in
591*5113495bSYour Name  * unpredictable results.  Use qdf_mem_move() if the memory locations
592*5113495bSYour Name  * for the source and destination are overlapping (or could be overlapping!)
593*5113495bSYour Name  *
594*5113495bSYour Name  * Return: none
595*5113495bSYour Name  */
596*5113495bSYour Name void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
597*5113495bSYour Name 
598*5113495bSYour Name /**
599*5113495bSYour Name  * qdf_mem_move() - move memory
600*5113495bSYour Name  * @dst_addr: pointer to destination memory location (to move to)
601*5113495bSYour Name  * @src_addr: pointer to source memory location (to move from)
602*5113495bSYour Name  * @num_bytes: number of bytes to move.
603*5113495bSYour Name  *
604*5113495bSYour Name  * Move host memory from one location to another, similar to memmove in
605*5113495bSYour Name  * standard C.  Note this function *does* handle overlapping
606*5113495bSYour Name  * source and destination memory locations.
607*5113495bSYour Name  *
608*5113495bSYour Name  * Return: None
609*5113495bSYour Name  */
610*5113495bSYour Name void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
611*5113495bSYour Name 
612*5113495bSYour Name /**
613*5113495bSYour Name  * qdf_mem_cmp() - memory compare
614*5113495bSYour Name  * @left: pointer to one location in memory to compare
615*5113495bSYour Name  * @right: pointer to second location in memory to compare
616*5113495bSYour Name  * @size: the number of bytes to compare
617*5113495bSYour Name  *
618*5113495bSYour Name  * Function to compare two pieces of memory, similar to memcmp function
619*5113495bSYour Name  * in standard C.
620*5113495bSYour Name  *
621*5113495bSYour Name  * Return:
622*5113495bSYour Name  *	0 -- equal
623*5113495bSYour Name  *	< 0 -- *memory1 is less than *memory2
624*5113495bSYour Name  *	> 0 -- *memory1 is bigger than *memory2
625*5113495bSYour Name  */
626*5113495bSYour Name int qdf_mem_cmp(const void *left, const void *right, size_t size);
627*5113495bSYour Name 
628*5113495bSYour Name /**
629*5113495bSYour Name  * qdf_ether_addr_copy() - copy an Ethernet address
630*5113495bSYour Name  * @dst_addr: A six-byte array Ethernet address destination
631*5113495bSYour Name  * @src_addr: A six-byte array Ethernet address source
632*5113495bSYour Name  *
633*5113495bSYour Name  * Please note: dst & src must both be aligned to u16.
634*5113495bSYour Name  *
635*5113495bSYour Name  * Return: none
636*5113495bSYour Name  */
637*5113495bSYour Name void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
638*5113495bSYour Name 
639*5113495bSYour Name /**
640*5113495bSYour Name  * qdf_mem_map_nbytes_single - Map memory for DMA
641*5113495bSYour Name  * @osdev: pomter OS device context
642*5113495bSYour Name  * @buf: pointer to memory to be dma mapped
643*5113495bSYour Name  * @dir: DMA map direction
644*5113495bSYour Name  * @nbytes: number of bytes to be mapped.
645*5113495bSYour Name  * @phy_addr: pointer to receive physical address.
646*5113495bSYour Name  *
647*5113495bSYour Name  * Return: success/failure
648*5113495bSYour Name  */
qdf_mem_map_nbytes_single(qdf_device_t osdev,void * buf,qdf_dma_dir_t dir,int nbytes,qdf_dma_addr_t * phy_addr)649*5113495bSYour Name static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
650*5113495bSYour Name 						 qdf_dma_dir_t dir, int nbytes,
651*5113495bSYour Name 						 qdf_dma_addr_t *phy_addr)
652*5113495bSYour Name {
653*5113495bSYour Name #if defined(HIF_PCI) || defined(HIF_IPCI)
654*5113495bSYour Name 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
655*5113495bSYour Name #else
656*5113495bSYour Name 	return 0;
657*5113495bSYour Name #endif
658*5113495bSYour Name }
659*5113495bSYour Name 
qdf_mem_dma_cache_sync(qdf_device_t osdev,qdf_dma_addr_t buf,qdf_dma_dir_t dir,int nbytes)660*5113495bSYour Name static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
661*5113495bSYour Name 					  qdf_dma_addr_t buf,
662*5113495bSYour Name 					  qdf_dma_dir_t dir,
663*5113495bSYour Name 					  int nbytes)
664*5113495bSYour Name {
665*5113495bSYour Name 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
666*5113495bSYour Name }
667*5113495bSYour Name 
668*5113495bSYour Name /**
669*5113495bSYour Name  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
670*5113495bSYour Name  * @osdev: pomter OS device context
671*5113495bSYour Name  * @phy_addr: physical address of memory to be dma unmapped
672*5113495bSYour Name  * @dir: DMA unmap direction
673*5113495bSYour Name  * @nbytes: number of bytes to be unmapped.
674*5113495bSYour Name  *
675*5113495bSYour Name  * Return: none
676*5113495bSYour Name  */
qdf_mem_unmap_nbytes_single(qdf_device_t osdev,qdf_dma_addr_t phy_addr,qdf_dma_dir_t dir,int nbytes)677*5113495bSYour Name static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
678*5113495bSYour Name 					       qdf_dma_addr_t phy_addr,
679*5113495bSYour Name 					       qdf_dma_dir_t dir,
680*5113495bSYour Name 					       int nbytes)
681*5113495bSYour Name {
682*5113495bSYour Name #if defined(HIF_PCI) || defined(HIF_IPCI)
683*5113495bSYour Name 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
684*5113495bSYour Name #endif
685*5113495bSYour Name }
686*5113495bSYour Name 
687*5113495bSYour Name /**
688*5113495bSYour Name  * qdf_mempool_init - Create and initialize memory pool
689*5113495bSYour Name  * @osdev: platform device object
690*5113495bSYour Name  * @pool_addr: address of the pool created
691*5113495bSYour Name  * @elem_cnt: no. of elements in pool
692*5113495bSYour Name  * @elem_size: size of each pool element in bytes
693*5113495bSYour Name  * @flags: flags
694*5113495bSYour Name  * Return: Handle to memory pool or NULL if allocation failed
695*5113495bSYour Name  */
qdf_mempool_init(qdf_device_t osdev,qdf_mempool_t * pool_addr,int elem_cnt,size_t elem_size,uint32_t flags)696*5113495bSYour Name static inline int qdf_mempool_init(qdf_device_t osdev,
697*5113495bSYour Name 				   qdf_mempool_t *pool_addr, int elem_cnt,
698*5113495bSYour Name 				   size_t elem_size, uint32_t flags)
699*5113495bSYour Name {
700*5113495bSYour Name 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
701*5113495bSYour Name 				  flags);
702*5113495bSYour Name }
703*5113495bSYour Name 
704*5113495bSYour Name /**
705*5113495bSYour Name  * qdf_mempool_destroy() - Destroy memory pool
706*5113495bSYour Name  * @osdev: platform device object
707*5113495bSYour Name  * @pool: to memory pool
708*5113495bSYour Name  *
709*5113495bSYour Name  * Return: none
710*5113495bSYour Name  */
qdf_mempool_destroy(qdf_device_t osdev,qdf_mempool_t pool)711*5113495bSYour Name static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
712*5113495bSYour Name {
713*5113495bSYour Name 	__qdf_mempool_destroy(osdev, pool);
714*5113495bSYour Name }
715*5113495bSYour Name 
716*5113495bSYour Name /**
717*5113495bSYour Name  * qdf_mempool_alloc() - Allocate an element memory pool
718*5113495bSYour Name  * @osdev: platform device object
719*5113495bSYour Name  * @pool: to memory pool
720*5113495bSYour Name  *
721*5113495bSYour Name  * Return: Pointer to the allocated element or NULL if the pool is empty
722*5113495bSYour Name  */
qdf_mempool_alloc(qdf_device_t osdev,qdf_mempool_t pool)723*5113495bSYour Name static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
724*5113495bSYour Name {
725*5113495bSYour Name 	return (void *)__qdf_mempool_alloc(osdev, pool);
726*5113495bSYour Name }
727*5113495bSYour Name 
728*5113495bSYour Name /**
729*5113495bSYour Name  * qdf_mempool_free() - Free a memory pool element
730*5113495bSYour Name  * @osdev: Platform device object
731*5113495bSYour Name  * @pool: Handle to memory pool
732*5113495bSYour Name  * @buf: Element to be freed
733*5113495bSYour Name  *
734*5113495bSYour Name  * Return: none
735*5113495bSYour Name  */
qdf_mempool_free(qdf_device_t osdev,qdf_mempool_t pool,void * buf)736*5113495bSYour Name static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
737*5113495bSYour Name 				    void *buf)
738*5113495bSYour Name {
739*5113495bSYour Name 	__qdf_mempool_free(osdev, pool, buf);
740*5113495bSYour Name }
741*5113495bSYour Name 
742*5113495bSYour Name /**
743*5113495bSYour Name  * qdf_kmem_cache_create() - OS abstraction for cache creation
744*5113495bSYour Name  * @c: Cache name
745*5113495bSYour Name  * @z: Size of the object to be created
746*5113495bSYour Name  *
747*5113495bSYour Name  * Return: Cache address on successful creation, else NULL
748*5113495bSYour Name  */
749*5113495bSYour Name #ifdef QCA_KMEM_CACHE_SUPPORT
750*5113495bSYour Name #define qdf_kmem_cache_create(c, z) __qdf_kmem_cache_create(c, z)
751*5113495bSYour Name #else
752*5113495bSYour Name #define qdf_kmem_cache_create(c, z) NULL
753*5113495bSYour Name #endif
754*5113495bSYour Name 
755*5113495bSYour Name /**
756*5113495bSYour Name  * qdf_kmem_cache_destroy() - OS abstraction for cache destruction
757*5113495bSYour Name  * @cache: Cache pointer
758*5113495bSYour Name  *
759*5113495bSYour Name  * Return: void
760*5113495bSYour Name  */
qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)761*5113495bSYour Name static inline void qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
762*5113495bSYour Name {
763*5113495bSYour Name 	__qdf_kmem_cache_destroy(cache);
764*5113495bSYour Name }
765*5113495bSYour Name 
766*5113495bSYour Name /**
767*5113495bSYour Name  * qdf_kmem_cache_alloc() - Function to allocation object from a cache
768*5113495bSYour Name  * @cache: Cache address
769*5113495bSYour Name  *
770*5113495bSYour Name  * Return: Object from cache
771*5113495bSYour Name  *
772*5113495bSYour Name  */
qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)773*5113495bSYour Name static inline void *qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
774*5113495bSYour Name {
775*5113495bSYour Name 	return __qdf_kmem_cache_alloc(cache);
776*5113495bSYour Name }
777*5113495bSYour Name 
778*5113495bSYour Name /**
779*5113495bSYour Name  * qdf_kmem_cache_free() - Function to free cache object
780*5113495bSYour Name  * @cache: Cache address
781*5113495bSYour Name  * @node: Object to be returned to cache
782*5113495bSYour Name  *
783*5113495bSYour Name  * Return: void
784*5113495bSYour Name  */
qdf_kmem_cache_free(qdf_kmem_cache_t cache,void * node)785*5113495bSYour Name static inline void qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
786*5113495bSYour Name {
787*5113495bSYour Name 	__qdf_kmem_cache_free(cache, node);
788*5113495bSYour Name }
789*5113495bSYour Name 
790*5113495bSYour Name /**
791*5113495bSYour Name  * qdf_mem_dma_sync_single_for_device() - assign memory to device
792*5113495bSYour Name  * @osdev: OS device handle
793*5113495bSYour Name  * @bus_addr: dma address to give to the device
794*5113495bSYour Name  * @size: Size of the memory block
795*5113495bSYour Name  * @direction: direction data will be DMAed
796*5113495bSYour Name  *
797*5113495bSYour Name  * Assign memory to the remote device.
798*5113495bSYour Name  * The cache lines are flushed to ram or invalidated as needed.
799*5113495bSYour Name  *
800*5113495bSYour Name  * Return: none
801*5113495bSYour Name  */
802*5113495bSYour Name void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
803*5113495bSYour Name 					qdf_dma_addr_t bus_addr,
804*5113495bSYour Name 					qdf_size_t size,
805*5113495bSYour Name 					__dma_data_direction direction);
806*5113495bSYour Name 
807*5113495bSYour Name /**
808*5113495bSYour Name  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
809*5113495bSYour Name  * @osdev: OS device handle
810*5113495bSYour Name  * @bus_addr: dma address to give to the cpu
811*5113495bSYour Name  * @size: Size of the memory block
812*5113495bSYour Name  * @direction: direction data will be DMAed
813*5113495bSYour Name  *
814*5113495bSYour Name  * Assign memory to the CPU.
815*5113495bSYour Name  *
816*5113495bSYour Name  * Return: none
817*5113495bSYour Name  */
818*5113495bSYour Name void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
819*5113495bSYour Name 					qdf_dma_addr_t bus_addr,
820*5113495bSYour Name 					qdf_size_t size,
821*5113495bSYour Name 					__dma_data_direction direction);
822*5113495bSYour Name 
823*5113495bSYour Name /**
824*5113495bSYour Name  * qdf_mem_multi_page_link() - Make links for multi page elements
825*5113495bSYour Name  * @osdev: OS device handle pointer
826*5113495bSYour Name  * @pages: Multi page information storage
827*5113495bSYour Name  * @elem_size: Single element size
828*5113495bSYour Name  * @elem_count: elements count should be linked
829*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
830*5113495bSYour Name  *
831*5113495bSYour Name  * This function will make links for multi page allocated structure
832*5113495bSYour Name  *
833*5113495bSYour Name  * Return: 0 success
834*5113495bSYour Name  */
835*5113495bSYour Name int qdf_mem_multi_page_link(qdf_device_t osdev,
836*5113495bSYour Name 			    struct qdf_mem_multi_page_t *pages,
837*5113495bSYour Name 			    uint32_t elem_size, uint32_t elem_count,
838*5113495bSYour Name 			    uint8_t cacheable);
839*5113495bSYour Name 
840*5113495bSYour Name /**
841*5113495bSYour Name  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
842*5113495bSYour Name  * @size: number of bytes to increment by
843*5113495bSYour Name  *
844*5113495bSYour Name  * Return: None
845*5113495bSYour Name  */
846*5113495bSYour Name void qdf_mem_kmalloc_inc(qdf_size_t size);
847*5113495bSYour Name 
848*5113495bSYour Name /**
849*5113495bSYour Name  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
850*5113495bSYour Name  * @size: number of bytes to decrement by
851*5113495bSYour Name  *
852*5113495bSYour Name  * Return: None
853*5113495bSYour Name  */
854*5113495bSYour Name void qdf_mem_kmalloc_dec(qdf_size_t size);
855*5113495bSYour Name 
856*5113495bSYour Name #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
857*5113495bSYour Name /**
858*5113495bSYour Name  * qdf_mem_skb_inc() - increment total skb allocation size
859*5113495bSYour Name  * @size: size to be added
860*5113495bSYour Name  *
861*5113495bSYour Name  * Return: none
862*5113495bSYour Name  */
863*5113495bSYour Name void qdf_mem_skb_inc(qdf_size_t size);
864*5113495bSYour Name 
865*5113495bSYour Name /**
866*5113495bSYour Name  * qdf_mem_skb_dec() - decrement total skb allocation size
867*5113495bSYour Name  * @size: size to be decremented
868*5113495bSYour Name  *
869*5113495bSYour Name  * Return: none
870*5113495bSYour Name  */
871*5113495bSYour Name void qdf_mem_skb_dec(qdf_size_t size);
872*5113495bSYour Name 
873*5113495bSYour Name /**
874*5113495bSYour Name  * qdf_mem_skb_total_inc() - increment total skb allocation size
875*5113495bSYour Name  * in host driver in both debug and perf builds
876*5113495bSYour Name  * @size: size to be added
877*5113495bSYour Name  *
878*5113495bSYour Name  * Return: none
879*5113495bSYour Name  */
880*5113495bSYour Name void qdf_mem_skb_total_inc(qdf_size_t size);
881*5113495bSYour Name 
882*5113495bSYour Name /**
883*5113495bSYour Name  * qdf_mem_skb_total_dec() - decrement total skb allocation size
884*5113495bSYour Name  * in the host driver in debug and perf flavors
885*5113495bSYour Name  * @size: size to be decremented
886*5113495bSYour Name  *
887*5113495bSYour Name  * Return: none
888*5113495bSYour Name  */
889*5113495bSYour Name void qdf_mem_skb_total_dec(qdf_size_t size);
890*5113495bSYour Name 
891*5113495bSYour Name /**
892*5113495bSYour Name  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
893*5113495bSYour Name  * @size: size to be added
894*5113495bSYour Name  *
895*5113495bSYour Name  * Return: none
896*5113495bSYour Name  */
897*5113495bSYour Name void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
898*5113495bSYour Name 
899*5113495bSYour Name /**
900*5113495bSYour Name  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
901*5113495bSYour Name  * @size: size to be decreased
902*5113495bSYour Name  *
903*5113495bSYour Name  * Return: none
904*5113495bSYour Name  */
905*5113495bSYour Name void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
906*5113495bSYour Name 
907*5113495bSYour Name /**
908*5113495bSYour Name  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
909*5113495bSYour Name  * @size: size to be added
910*5113495bSYour Name  *
911*5113495bSYour Name  * Return: none
912*5113495bSYour Name  */
913*5113495bSYour Name void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
914*5113495bSYour Name 
915*5113495bSYour Name /**
916*5113495bSYour Name  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
917*5113495bSYour Name  * @size: size to be decreased
918*5113495bSYour Name  *
919*5113495bSYour Name  * Return: none
920*5113495bSYour Name  */
921*5113495bSYour Name void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
922*5113495bSYour Name 
923*5113495bSYour Name /**
924*5113495bSYour Name  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
925*5113495bSYour Name  *
926*5113495bSYour Name  * Return: none
927*5113495bSYour Name  */
928*5113495bSYour Name void qdf_mem_dp_tx_skb_cnt_inc(void);
929*5113495bSYour Name 
930*5113495bSYour Name /**
931*5113495bSYour Name  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
932*5113495bSYour Name  *
933*5113495bSYour Name  * Return: none
934*5113495bSYour Name  */
935*5113495bSYour Name void qdf_mem_dp_tx_skb_cnt_dec(void);
936*5113495bSYour Name 
937*5113495bSYour Name /**
938*5113495bSYour Name  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
939*5113495bSYour Name  *
940*5113495bSYour Name  * Return: none
941*5113495bSYour Name  */
942*5113495bSYour Name void qdf_mem_dp_rx_skb_cnt_inc(void);
943*5113495bSYour Name 
944*5113495bSYour Name /**
945*5113495bSYour Name  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
946*5113495bSYour Name  *
947*5113495bSYour Name  * Return: none
948*5113495bSYour Name  */
949*5113495bSYour Name void qdf_mem_dp_rx_skb_cnt_dec(void);
950*5113495bSYour Name #else
951*5113495bSYour Name 
qdf_mem_skb_inc(qdf_size_t size)952*5113495bSYour Name static inline void qdf_mem_skb_inc(qdf_size_t size)
953*5113495bSYour Name {
954*5113495bSYour Name }
955*5113495bSYour Name 
qdf_mem_skb_dec(qdf_size_t size)956*5113495bSYour Name static inline void qdf_mem_skb_dec(qdf_size_t size)
957*5113495bSYour Name {
958*5113495bSYour Name }
959*5113495bSYour Name 
qdf_mem_skb_total_inc(qdf_size_t size)960*5113495bSYour Name static inline void qdf_mem_skb_total_inc(qdf_size_t size)
961*5113495bSYour Name {
962*5113495bSYour Name }
963*5113495bSYour Name 
qdf_mem_skb_total_dec(qdf_size_t size)964*5113495bSYour Name static inline void qdf_mem_skb_total_dec(qdf_size_t size)
965*5113495bSYour Name {
966*5113495bSYour Name }
967*5113495bSYour Name 
qdf_mem_dp_tx_skb_inc(qdf_size_t size)968*5113495bSYour Name static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
969*5113495bSYour Name {
970*5113495bSYour Name }
971*5113495bSYour Name 
qdf_mem_dp_tx_skb_dec(qdf_size_t size)972*5113495bSYour Name static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
973*5113495bSYour Name {
974*5113495bSYour Name }
975*5113495bSYour Name 
qdf_mem_dp_rx_skb_inc(qdf_size_t size)976*5113495bSYour Name static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
977*5113495bSYour Name {
978*5113495bSYour Name }
979*5113495bSYour Name 
qdf_mem_dp_rx_skb_dec(qdf_size_t size)980*5113495bSYour Name static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
981*5113495bSYour Name {
982*5113495bSYour Name }
983*5113495bSYour Name 
qdf_mem_dp_tx_skb_cnt_inc(void)984*5113495bSYour Name static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
985*5113495bSYour Name {
986*5113495bSYour Name }
987*5113495bSYour Name 
qdf_mem_dp_tx_skb_cnt_dec(void)988*5113495bSYour Name static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
989*5113495bSYour Name {
990*5113495bSYour Name }
991*5113495bSYour Name 
qdf_mem_dp_rx_skb_cnt_inc(void)992*5113495bSYour Name static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
993*5113495bSYour Name {
994*5113495bSYour Name }
995*5113495bSYour Name 
qdf_mem_dp_rx_skb_cnt_dec(void)996*5113495bSYour Name static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
997*5113495bSYour Name {
998*5113495bSYour Name }
999*5113495bSYour Name #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1000*5113495bSYour Name 
1001*5113495bSYour Name /**
1002*5113495bSYour Name  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
1003*5113495bSYour Name  * @num: number of required storage
1004*5113495bSYour Name  *
1005*5113495bSYour Name  * Allocate mapping table for DMA memory allocation. This is needed for
1006*5113495bSYour Name  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
1007*5113495bSYour Name  *
1008*5113495bSYour Name  * Return: shared memory info storage table pointer
1009*5113495bSYour Name  */
qdf_mem_map_table_alloc(uint32_t num)1010*5113495bSYour Name static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
1011*5113495bSYour Name {
1012*5113495bSYour Name 	qdf_mem_info_t *mem_info_arr;
1013*5113495bSYour Name 
1014*5113495bSYour Name 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
1015*5113495bSYour Name 	return mem_info_arr;
1016*5113495bSYour Name }
1017*5113495bSYour Name 
1018*5113495bSYour Name #ifdef ENHANCED_OS_ABSTRACTION
1019*5113495bSYour Name /**
1020*5113495bSYour Name  * qdf_update_mem_map_table() - Update DMA memory map info
1021*5113495bSYour Name  * @osdev: Parent device instance
1022*5113495bSYour Name  * @mem_info: Pointer to shared memory information
1023*5113495bSYour Name  * @dma_addr: dma address
1024*5113495bSYour Name  * @mem_size: memory size allocated
1025*5113495bSYour Name  *
1026*5113495bSYour Name  * Store DMA shared memory information
1027*5113495bSYour Name  *
1028*5113495bSYour Name  * Return: none
1029*5113495bSYour Name  */
1030*5113495bSYour Name void qdf_update_mem_map_table(qdf_device_t osdev,
1031*5113495bSYour Name 			      qdf_mem_info_t *mem_info,
1032*5113495bSYour Name 			      qdf_dma_addr_t dma_addr,
1033*5113495bSYour Name 			      uint32_t mem_size);
1034*5113495bSYour Name 
1035*5113495bSYour Name /**
1036*5113495bSYour Name  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
1037*5113495bSYour Name  * @osdev: Parent device instance
1038*5113495bSYour Name  * @dma_addr: DMA/IOVA address
1039*5113495bSYour Name  *
1040*5113495bSYour Name  * Get actual physical address from dma_addr based on SMMU enablement status.
1041*5113495bSYour Name  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
1042*5113495bSYour Name  * (IOVA) otherwise returns physical address. So get SMMU physical address
1043*5113495bSYour Name  * mapping from IOVA.
1044*5113495bSYour Name  *
1045*5113495bSYour Name  * Return: dmaable physical address
1046*5113495bSYour Name  */
1047*5113495bSYour Name qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
1048*5113495bSYour Name 					  qdf_dma_addr_t dma_addr);
1049*5113495bSYour Name #else
1050*5113495bSYour Name static inline
qdf_update_mem_map_table(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_addr,uint32_t mem_size)1051*5113495bSYour Name void qdf_update_mem_map_table(qdf_device_t osdev,
1052*5113495bSYour Name 			      qdf_mem_info_t *mem_info,
1053*5113495bSYour Name 			      qdf_dma_addr_t dma_addr,
1054*5113495bSYour Name 			      uint32_t mem_size)
1055*5113495bSYour Name {
1056*5113495bSYour Name 	if (!mem_info) {
1057*5113495bSYour Name 		qdf_nofl_err("%s: NULL mem_info", __func__);
1058*5113495bSYour Name 		return;
1059*5113495bSYour Name 	}
1060*5113495bSYour Name 
1061*5113495bSYour Name 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
1062*5113495bSYour Name }
1063*5113495bSYour Name 
1064*5113495bSYour Name static inline
qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,qdf_dma_addr_t dma_addr)1065*5113495bSYour Name qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
1066*5113495bSYour Name 					  qdf_dma_addr_t dma_addr)
1067*5113495bSYour Name {
1068*5113495bSYour Name 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1069*5113495bSYour Name }
1070*5113495bSYour Name #endif
1071*5113495bSYour Name 
1072*5113495bSYour Name /**
1073*5113495bSYour Name  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
1074*5113495bSYour Name  * @osdev: parent device instance
1075*5113495bSYour Name  *
1076*5113495bSYour Name  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
1077*5113495bSYour Name  */
qdf_mem_smmu_s1_enabled(qdf_device_t osdev)1078*5113495bSYour Name static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
1079*5113495bSYour Name {
1080*5113495bSYour Name 	return __qdf_mem_smmu_s1_enabled(osdev);
1081*5113495bSYour Name }
1082*5113495bSYour Name 
1083*5113495bSYour Name /**
1084*5113495bSYour Name  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
1085*5113495bSYour Name  * @dev: device instance
1086*5113495bSYour Name  * @sgt: scatter gather table pointer
1087*5113495bSYour Name  * @cpu_addr: HLOS virtual address
1088*5113495bSYour Name  * @dma_addr: dma address
1089*5113495bSYour Name  * @size: allocated memory size
1090*5113495bSYour Name  *
1091*5113495bSYour Name  * Return: physical address
1092*5113495bSYour Name  */
1093*5113495bSYour Name static inline int
qdf_mem_dma_get_sgtable(struct device * dev,void * sgt,void * cpu_addr,qdf_dma_addr_t dma_addr,size_t size)1094*5113495bSYour Name qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
1095*5113495bSYour Name 			qdf_dma_addr_t dma_addr, size_t size)
1096*5113495bSYour Name {
1097*5113495bSYour Name 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
1098*5113495bSYour Name }
1099*5113495bSYour Name 
1100*5113495bSYour Name /**
1101*5113495bSYour Name  * qdf_mem_free_sgtable() - Free a previously allocated sg table
1102*5113495bSYour Name  * @sgt: the mapped sg table header
1103*5113495bSYour Name  *
1104*5113495bSYour Name  * Return: None
1105*5113495bSYour Name  */
1106*5113495bSYour Name static inline void
qdf_mem_free_sgtable(struct sg_table * sgt)1107*5113495bSYour Name qdf_mem_free_sgtable(struct sg_table *sgt)
1108*5113495bSYour Name {
1109*5113495bSYour Name 	__qdf_os_mem_free_sgtable(sgt);
1110*5113495bSYour Name }
1111*5113495bSYour Name 
1112*5113495bSYour Name /**
1113*5113495bSYour Name  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
1114*5113495bSYour Name  * @sgt: scatter gather table pointer
1115*5113495bSYour Name  *
1116*5113495bSYour Name  * Return: None
1117*5113495bSYour Name  */
1118*5113495bSYour Name static inline void
qdf_dma_get_sgtable_dma_addr(struct sg_table * sgt)1119*5113495bSYour Name qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
1120*5113495bSYour Name {
1121*5113495bSYour Name 	__qdf_dma_get_sgtable_dma_addr(sgt);
1122*5113495bSYour Name }
1123*5113495bSYour Name 
1124*5113495bSYour Name /**
1125*5113495bSYour Name  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
1126*5113495bSYour Name  * @osdev: Parent device instance
1127*5113495bSYour Name  * @mem_info: Pointer to allocated memory information
1128*5113495bSYour Name  *
1129*5113495bSYour Name  * Get dma address based on SMMU enablement status. If SMMU Stage 1
1130*5113495bSYour Name  * translation is enabled, DMA APIs return IO virtual address otherwise
1131*5113495bSYour Name  * returns physical address.
1132*5113495bSYour Name  *
1133*5113495bSYour Name  * Return: dma address
1134*5113495bSYour Name  */
qdf_mem_get_dma_addr(qdf_device_t osdev,qdf_mem_info_t * mem_info)1135*5113495bSYour Name static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
1136*5113495bSYour Name 						  qdf_mem_info_t *mem_info)
1137*5113495bSYour Name {
1138*5113495bSYour Name 	return __qdf_mem_get_dma_addr(osdev, mem_info);
1139*5113495bSYour Name }
1140*5113495bSYour Name 
1141*5113495bSYour Name /**
1142*5113495bSYour Name  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
1143*5113495bSYour Name  * @osdev: Parent device instance
1144*5113495bSYour Name  * @mem_info: Pointer to allocated memory information
1145*5113495bSYour Name  *
1146*5113495bSYour Name  * Based on smmu stage 1 translation enablement, return corresponding dma
1147*5113495bSYour Name  * address storage pointer.
1148*5113495bSYour Name  *
1149*5113495bSYour Name  * Return: dma address storage pointer
1150*5113495bSYour Name  */
qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,qdf_mem_info_t * mem_info)1151*5113495bSYour Name static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
1152*5113495bSYour Name 						       qdf_mem_info_t *mem_info)
1153*5113495bSYour Name {
1154*5113495bSYour Name 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
1155*5113495bSYour Name }
1156*5113495bSYour Name 
1157*5113495bSYour Name 
1158*5113495bSYour Name /**
1159*5113495bSYour Name  * qdf_mem_get_dma_size() - Return DMA memory size
1160*5113495bSYour Name  * @osdev: parent device instance
1161*5113495bSYour Name  * @mem_info: Pointer to allocated memory information
1162*5113495bSYour Name  *
1163*5113495bSYour Name  * Return: DMA memory size
1164*5113495bSYour Name  */
1165*5113495bSYour Name static inline uint32_t
qdf_mem_get_dma_size(qdf_device_t osdev,qdf_mem_info_t * mem_info)1166*5113495bSYour Name qdf_mem_get_dma_size(qdf_device_t osdev,
1167*5113495bSYour Name 		       qdf_mem_info_t *mem_info)
1168*5113495bSYour Name {
1169*5113495bSYour Name 	return __qdf_mem_get_dma_size(osdev, mem_info);
1170*5113495bSYour Name }
1171*5113495bSYour Name 
1172*5113495bSYour Name /**
1173*5113495bSYour Name  * qdf_mem_set_dma_size() - Set DMA memory size
1174*5113495bSYour Name  * @osdev: parent device instance
1175*5113495bSYour Name  * @mem_info: Pointer to allocated memory information
1176*5113495bSYour Name  * @mem_size: memory size allocated
1177*5113495bSYour Name  *
1178*5113495bSYour Name  * Return: none
1179*5113495bSYour Name  */
1180*5113495bSYour Name static inline void
qdf_mem_set_dma_size(qdf_device_t osdev,qdf_mem_info_t * mem_info,uint32_t mem_size)1181*5113495bSYour Name qdf_mem_set_dma_size(qdf_device_t osdev,
1182*5113495bSYour Name 		       qdf_mem_info_t *mem_info,
1183*5113495bSYour Name 		       uint32_t mem_size)
1184*5113495bSYour Name {
1185*5113495bSYour Name 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
1186*5113495bSYour Name }
1187*5113495bSYour Name 
1188*5113495bSYour Name /**
1189*5113495bSYour Name  * qdf_mem_get_dma_pa() - Return DMA physical address
1190*5113495bSYour Name  * @osdev: parent device instance
1191*5113495bSYour Name  * @mem_info: Pointer to allocated memory information
1192*5113495bSYour Name  *
1193*5113495bSYour Name  * Return: DMA physical address
1194*5113495bSYour Name  */
1195*5113495bSYour Name static inline qdf_dma_addr_t
qdf_mem_get_dma_pa(qdf_device_t osdev,qdf_mem_info_t * mem_info)1196*5113495bSYour Name qdf_mem_get_dma_pa(qdf_device_t osdev,
1197*5113495bSYour Name 		     qdf_mem_info_t *mem_info)
1198*5113495bSYour Name {
1199*5113495bSYour Name 	return __qdf_mem_get_dma_pa(osdev, mem_info);
1200*5113495bSYour Name }
1201*5113495bSYour Name 
1202*5113495bSYour Name /**
1203*5113495bSYour Name  * qdf_mem_set_dma_pa() - Set DMA physical address
1204*5113495bSYour Name  * @osdev: parent device instance
1205*5113495bSYour Name  * @mem_info: Pointer to allocated memory information
1206*5113495bSYour Name  * @dma_pa: DMA phsical address
1207*5113495bSYour Name  *
1208*5113495bSYour Name  * Return: none
1209*5113495bSYour Name  */
1210*5113495bSYour Name static inline void
qdf_mem_set_dma_pa(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_pa)1211*5113495bSYour Name qdf_mem_set_dma_pa(qdf_device_t osdev,
1212*5113495bSYour Name 		     qdf_mem_info_t *mem_info,
1213*5113495bSYour Name 		     qdf_dma_addr_t dma_pa)
1214*5113495bSYour Name {
1215*5113495bSYour Name 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
1216*5113495bSYour Name }
1217*5113495bSYour Name 
1218*5113495bSYour Name /**
1219*5113495bSYour Name  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
1220*5113495bSYour Name  * @osdev: parent device instance
1221*5113495bSYour Name  * @size: size to be allocated
1222*5113495bSYour Name  *
1223*5113495bSYour Name  * Allocate DMA memory which will be shared with external kernel module. This
1224*5113495bSYour Name  * information is needed for SMMU mapping.
1225*5113495bSYour Name  *
1226*5113495bSYour Name  * Return: Pointer to allocated DMA memory on success, NULL on failure
1227*5113495bSYour Name  */
1228*5113495bSYour Name qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
1229*5113495bSYour Name 
1230*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
1231*5113495bSYour Name /**
1232*5113495bSYour Name  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
1233*5113495bSYour Name  * @ctxt: Context to be passed to the cb
1234*5113495bSYour Name  * @pages: Multi page information storage
1235*5113495bSYour Name  * @elem_size: Each element size
1236*5113495bSYour Name  * @elem_count: Total number of elements in the pool.
1237*5113495bSYour Name  * @cacheable: Coherent memory or cacheable memory
1238*5113495bSYour Name  * @cb: Callback to free the elements
1239*5113495bSYour Name  * @elem_list: elem list for delayed free
1240*5113495bSYour Name  *
1241*5113495bSYour Name  * Return: 0 on Succscc, or Error code
1242*5113495bSYour Name  */
1243*5113495bSYour Name int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
1244*5113495bSYour Name 			       uint32_t elem_size, uint32_t elem_count,
1245*5113495bSYour Name 			       uint8_t cacheable, qdf_mem_release_cb cb,
1246*5113495bSYour Name 			       void *elem_list);
1247*5113495bSYour Name #endif
1248*5113495bSYour Name 
1249*5113495bSYour Name /**
1250*5113495bSYour Name  * qdf_mem_shared_mem_free() - Free shared memory
1251*5113495bSYour Name  * @osdev: parent device instance
1252*5113495bSYour Name  * @shared_mem: shared memory information storage
1253*5113495bSYour Name  *
1254*5113495bSYour Name  * Free DMA shared memory resource
1255*5113495bSYour Name  *
1256*5113495bSYour Name  * Return: None
1257*5113495bSYour Name  */
qdf_mem_shared_mem_free(qdf_device_t osdev,qdf_shared_mem_t * shared_mem)1258*5113495bSYour Name static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
1259*5113495bSYour Name 					   qdf_shared_mem_t *shared_mem)
1260*5113495bSYour Name {
1261*5113495bSYour Name 	if (!shared_mem) {
1262*5113495bSYour Name 		qdf_nofl_err("%s: NULL shared mem struct passed",
1263*5113495bSYour Name 			     __func__);
1264*5113495bSYour Name 		return;
1265*5113495bSYour Name 	}
1266*5113495bSYour Name 
1267*5113495bSYour Name 	if (shared_mem->vaddr) {
1268*5113495bSYour Name 		qdf_mem_free_consistent(osdev, osdev->dev,
1269*5113495bSYour Name 					qdf_mem_get_dma_size(osdev,
1270*5113495bSYour Name 						&shared_mem->mem_info),
1271*5113495bSYour Name 					shared_mem->vaddr,
1272*5113495bSYour Name 					qdf_mem_get_dma_addr(osdev,
1273*5113495bSYour Name 						&shared_mem->mem_info),
1274*5113495bSYour Name 					qdf_get_dma_mem_context(shared_mem,
1275*5113495bSYour Name 								memctx));
1276*5113495bSYour Name 	}
1277*5113495bSYour Name 	qdf_mem_free_sgtable(&shared_mem->sgtable);
1278*5113495bSYour Name 	qdf_mem_free(shared_mem);
1279*5113495bSYour Name }
1280*5113495bSYour Name 
1281*5113495bSYour Name /**
1282*5113495bSYour Name  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
1283*5113495bSYour Name  * host driver
1284*5113495bSYour Name  *
1285*5113495bSYour Name  * Return: Total DMA memory allocated
1286*5113495bSYour Name  */
1287*5113495bSYour Name int32_t qdf_dma_mem_stats_read(void);
1288*5113495bSYour Name 
1289*5113495bSYour Name /**
1290*5113495bSYour Name  * qdf_heap_mem_stats_read() - Return the heap memory allocated
1291*5113495bSYour Name  * in host driver
1292*5113495bSYour Name  *
1293*5113495bSYour Name  * Return: Total heap memory allocated
1294*5113495bSYour Name  */
1295*5113495bSYour Name int32_t qdf_heap_mem_stats_read(void);
1296*5113495bSYour Name 
1297*5113495bSYour Name /**
1298*5113495bSYour Name  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1299*5113495bSYour Name  * host driver
1300*5113495bSYour Name  *
1301*5113495bSYour Name  * Return: Total SKB memory allocated
1302*5113495bSYour Name  */
1303*5113495bSYour Name int32_t qdf_skb_mem_stats_read(void);
1304*5113495bSYour Name 
1305*5113495bSYour Name /**
1306*5113495bSYour Name  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1307*5113495bSYour Name  * in the host driver tracked in both debug and perf builds
1308*5113495bSYour Name  *
1309*5113495bSYour Name  * Return: Total SKB memory allocated
1310*5113495bSYour Name  */
1311*5113495bSYour Name int32_t qdf_skb_total_mem_stats_read(void);
1312*5113495bSYour Name 
1313*5113495bSYour Name /**
1314*5113495bSYour Name  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1315*5113495bSYour Name  * allocated in host driver. This is the high watermark for the
1316*5113495bSYour Name  * total SKB allocated in the host driver
1317*5113495bSYour Name  *
1318*5113495bSYour Name  * Return: None
1319*5113495bSYour Name  */
1320*5113495bSYour Name int32_t qdf_skb_max_mem_stats_read(void);
1321*5113495bSYour Name 
1322*5113495bSYour Name /**
1323*5113495bSYour Name  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1324*5113495bSYour Name  * which are waiting on Tx completions
1325*5113495bSYour Name  *
1326*5113495bSYour Name  * Return: Outstanding Tx desc count
1327*5113495bSYour Name  */
1328*5113495bSYour Name int32_t qdf_mem_tx_desc_cnt_read(void);
1329*5113495bSYour Name 
1330*5113495bSYour Name /**
1331*5113495bSYour Name  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1332*5113495bSYour Name  * descs which are waiting on Tx completions. This is the high
1333*5113495bSYour Name  * watermark for the pending desc count
1334*5113495bSYour Name  *
1335*5113495bSYour Name  * Return: Max outstanding Tx desc count
1336*5113495bSYour Name  */
1337*5113495bSYour Name int32_t qdf_mem_tx_desc_max_read(void);
1338*5113495bSYour Name 
1339*5113495bSYour Name /**
1340*5113495bSYour Name  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1341*5113495bSYour Name  * creating the sysfs node
1342*5113495bSYour Name  *
1343*5113495bSYour Name  * Return: None
1344*5113495bSYour Name  */
1345*5113495bSYour Name void qdf_mem_stats_init(void);
1346*5113495bSYour Name 
1347*5113495bSYour Name /**
1348*5113495bSYour Name  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1349*5113495bSYour Name  * allocated for Tx data path
1350*5113495bSYour Name  *
1351*5113495bSYour Name  * Return: Tx SKB memory allocated
1352*5113495bSYour Name  */
1353*5113495bSYour Name int32_t qdf_dp_tx_skb_mem_stats_read(void);
1354*5113495bSYour Name 
1355*5113495bSYour Name /**
1356*5113495bSYour Name  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1357*5113495bSYour Name  * allocated for Rx data path
1358*5113495bSYour Name  *
1359*5113495bSYour Name  * Return: Rx SKB memory allocated
1360*5113495bSYour Name  */
1361*5113495bSYour Name int32_t qdf_dp_rx_skb_mem_stats_read(void);
1362*5113495bSYour Name 
1363*5113495bSYour Name /**
1364*5113495bSYour Name  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1365*5113495bSYour Name  * watermark for the SKB memory allocated for Tx data path
1366*5113495bSYour Name  *
1367*5113495bSYour Name  * Return: Max Tx SKB memory allocated
1368*5113495bSYour Name  */
1369*5113495bSYour Name int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1370*5113495bSYour Name 
1371*5113495bSYour Name /**
1372*5113495bSYour Name  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1373*5113495bSYour Name  * watermark for the SKB memory allocated for Rx data path
1374*5113495bSYour Name  *
1375*5113495bSYour Name  * Return: Max Rx SKB memory allocated
1376*5113495bSYour Name  */
1377*5113495bSYour Name int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1378*5113495bSYour Name 
1379*5113495bSYour Name /**
1380*5113495bSYour Name  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1381*5113495bSYour Name  * allocated in the Tx data path by the host driver or
1382*5113495bSYour Name  * buffers coming from the n/w stack
1383*5113495bSYour Name  *
1384*5113495bSYour Name  * Return: Number of DP Tx buffers allocated
1385*5113495bSYour Name  */
1386*5113495bSYour Name int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1387*5113495bSYour Name 
1388*5113495bSYour Name /**
1389*5113495bSYour Name  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1390*5113495bSYour Name  * buffers allocated in the Tx data path
1391*5113495bSYour Name  *
1392*5113495bSYour Name  * Return: Max number of DP Tx buffers allocated
1393*5113495bSYour Name  */
1394*5113495bSYour Name int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1395*5113495bSYour Name 
1396*5113495bSYour Name /**
1397*5113495bSYour Name  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1398*5113495bSYour Name  * allocated in the Rx data path
1399*5113495bSYour Name  *
1400*5113495bSYour Name  * Return: Number of DP Rx buffers allocated
1401*5113495bSYour Name  */
1402*5113495bSYour Name int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1403*5113495bSYour Name 
1404*5113495bSYour Name /**
1405*5113495bSYour Name  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1406*5113495bSYour Name  * buffers allocated in the Rx data path
1407*5113495bSYour Name  *
1408*5113495bSYour Name  * Return: Max number of DP Rx buffers allocated
1409*5113495bSYour Name  */
1410*5113495bSYour Name int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1411*5113495bSYour Name 
1412*5113495bSYour Name /**
1413*5113495bSYour Name  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1414*5113495bSYour Name  * count and the high watermark for pending tx desc count
1415*5113495bSYour Name  *
1416*5113495bSYour Name  * @pending_tx_descs: outstanding Tx desc count
1417*5113495bSYour Name  * @tx_descs_max: high watermark for outstanding Tx desc count
1418*5113495bSYour Name  *
1419*5113495bSYour Name  * Return: None
1420*5113495bSYour Name  */
1421*5113495bSYour Name void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1422*5113495bSYour Name 				int32_t tx_descs_max);
1423*5113495bSYour Name 
1424*5113495bSYour Name /**
1425*5113495bSYour Name  * qdf_mem_vfree() - Free the virtual memory pointed to by ptr
1426*5113495bSYour Name  * @ptr: Pointer to the starting address of the memory to
1427*5113495bSYour Name  * be freed.
1428*5113495bSYour Name  *
1429*5113495bSYour Name  * Return: None
1430*5113495bSYour Name  */
1431*5113495bSYour Name #define qdf_mem_vfree(ptr)   __qdf_mem_vfree(ptr)
1432*5113495bSYour Name 
1433*5113495bSYour Name /**
1434*5113495bSYour Name  * qdf_mem_valloc() - Allocate virtual memory for the given
1435*5113495bSYour Name  * size
1436*5113495bSYour Name  * @size: Number of bytes of memory to be allocated
1437*5113495bSYour Name  *
1438*5113495bSYour Name  * Return: Pointer to the starting address of the allocated virtual memory
1439*5113495bSYour Name  */
1440*5113495bSYour Name #define qdf_mem_valloc(size) __qdf_mem_valloc(size, __func__, __LINE__)
1441*5113495bSYour Name 
1442*5113495bSYour Name #ifdef ENABLE_VALLOC_REPLACE_MALLOC
1443*5113495bSYour Name /**
1444*5113495bSYour Name  * qdf_mem_common_alloc() - Common function to allocate memory for the
1445*5113495bSYour Name  * given size, allocation method decided by ENABLE_VALLOC_REPLACE_MALLOC
1446*5113495bSYour Name  * @size: Number of bytes of memory to be allocated
1447*5113495bSYour Name  *
1448*5113495bSYour Name  * Return: Pointer to the starting address of the allocated memory
1449*5113495bSYour Name  */
1450*5113495bSYour Name #define qdf_mem_common_alloc(size) qdf_mem_valloc(size)
1451*5113495bSYour Name 
1452*5113495bSYour Name /**
1453*5113495bSYour Name  * qdf_mem_common_free() - Common function to free the memory pointed
1454*5113495bSYour Name  * to by ptr, memory free method decided by ENABLE_VALLOC_REPLACE_MALLOC
1455*5113495bSYour Name  * @ptr: Pointer to the starting address of the memory to
1456*5113495bSYour Name  * be freed.
1457*5113495bSYour Name  *
1458*5113495bSYour Name  * Return: None
1459*5113495bSYour Name  */
1460*5113495bSYour Name #define qdf_mem_common_free(ptr) qdf_mem_vfree(ptr)
1461*5113495bSYour Name #else
1462*5113495bSYour Name #define qdf_mem_common_alloc(size) qdf_mem_malloc(size)
1463*5113495bSYour Name #define qdf_mem_common_free(ptr) qdf_mem_free(ptr)
1464*5113495bSYour Name #endif
1465*5113495bSYour Name 
1466*5113495bSYour Name /**
1467*5113495bSYour Name  * qdf_ioremap() - map bus memory into cpu space
1468*5113495bSYour Name  * @HOST_CE_ADDRESS: bus address of the memory
1469*5113495bSYour Name  * @HOST_CE_SIZE: memory size to map
1470*5113495bSYour Name  */
1471*5113495bSYour Name #define qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
1472*5113495bSYour Name 			__qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
1473*5113495bSYour Name 
1474*5113495bSYour Name #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
1475*5113495bSYour Name /*
1476*5113495bSYour Name  * typedef qdf_iommu_domain_t: Platform independent iommu domain
1477*5113495bSYour Name  * abstraction
1478*5113495bSYour Name  */
1479*5113495bSYour Name typedef __qdf_iommu_domain_t qdf_iommu_domain_t;
1480*5113495bSYour Name 
1481*5113495bSYour Name /**
1482*5113495bSYour Name  * qdf_iommu_domain_get_attr() - API to get iommu domain attributes
1483*5113495bSYour Name  * @domain: iommu domain
1484*5113495bSYour Name  * @attr: iommu attribute
1485*5113495bSYour Name  * @data: data pointer
1486*5113495bSYour Name  *
1487*5113495bSYour Name  * Return: 0 on success, else errno
1488*5113495bSYour Name  */
1489*5113495bSYour Name int
1490*5113495bSYour Name qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
1491*5113495bSYour Name 			  enum qdf_iommu_attr attr, void *data);
1492*5113495bSYour Name #endif
1493*5113495bSYour Name 
1494*5113495bSYour Name #define DEFAULT_DEBUG_DOMAIN_INIT 0
1495*5113495bSYour Name #ifdef QCA_DMA_PADDR_CHECK
1496*5113495bSYour Name /**
1497*5113495bSYour Name  * qdf_dma_invalid_buf_list_init() - Initialize dma invalid buffer list
1498*5113495bSYour Name  *
1499*5113495bSYour Name  * Return: none
1500*5113495bSYour Name  */
1501*5113495bSYour Name void qdf_dma_invalid_buf_list_init(void);
1502*5113495bSYour Name 
1503*5113495bSYour Name /**
1504*5113495bSYour Name  * qdf_dma_invalid_buf_list_deinit() - Deinitialize dma invalid buffer list
1505*5113495bSYour Name  *
1506*5113495bSYour Name  * Return: none
1507*5113495bSYour Name  */
1508*5113495bSYour Name void qdf_dma_invalid_buf_list_deinit(void);
1509*5113495bSYour Name 
1510*5113495bSYour Name /**
1511*5113495bSYour Name  * qdf_dma_invalid_buf_free() - Free dma invalid buffer
1512*5113495bSYour Name  * @dev: Pointer to device handle
1513*5113495bSYour Name  * @domain: Debug domain
1514*5113495bSYour Name  *
1515*5113495bSYour Name  * Return: none
1516*5113495bSYour Name  */
1517*5113495bSYour Name void qdf_dma_invalid_buf_free(void *dev, uint8_t domain);
1518*5113495bSYour Name #else
1519*5113495bSYour Name static inline void
qdf_dma_invalid_buf_list_init(void)1520*5113495bSYour Name qdf_dma_invalid_buf_list_init(void)
1521*5113495bSYour Name {
1522*5113495bSYour Name }
1523*5113495bSYour Name 
1524*5113495bSYour Name static inline void
qdf_dma_invalid_buf_list_deinit(void)1525*5113495bSYour Name qdf_dma_invalid_buf_list_deinit(void)
1526*5113495bSYour Name {
1527*5113495bSYour Name }
1528*5113495bSYour Name 
1529*5113495bSYour Name static inline void
qdf_dma_invalid_buf_free(void * dev,uint8_t domain)1530*5113495bSYour Name qdf_dma_invalid_buf_free(void *dev, uint8_t domain)
1531*5113495bSYour Name {
1532*5113495bSYour Name }
1533*5113495bSYour Name #endif /* QCA_DMA_PADDR_CHECK */
1534*5113495bSYour Name #endif /* __QDF_MEMORY_H */
1535