xref: /wlan-driver/qca-wifi-host-cmn/qdf/inc/qdf_mem.h (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_mem
22  * QCA driver framework (QDF) memory management APIs
23  */
24 
25 #if !defined(__QDF_MEMORY_H)
26 #define __QDF_MEMORY_H
27 
28 /* Include Files */
29 #include <qdf_types.h>
30 #include <i_qdf_mem.h>
31 #include <i_qdf_trace.h>
32 #include <qdf_atomic.h>
33 
34 #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
35 
36 /**
37  * qdf_align() - align to the given size.
38  * @a: input that needs to be aligned.
39  * @align_size: boundary on which 'a' has to be aligned.
40  *
41  * Return: aligned value.
42  */
43 #define qdf_align(a, align_size)   __qdf_align(a, align_size)
44 #define qdf_page_size __page_size
45 
46 /**
47  * struct qdf_mem_dma_page_t - Allocated dmaable page
48  * @page_v_addr_start: Page start virtual address
49  * @page_v_addr_end: Page end virtual address
50  * @page_p_addr: Page start physical address
51  */
52 struct qdf_mem_dma_page_t {
53 	char *page_v_addr_start;
54 	char *page_v_addr_end;
55 	qdf_dma_addr_t page_p_addr;
56 };
57 
58 /**
59  * struct qdf_mem_multi_page_t - multiple page allocation information storage
60  * @num_element_per_page: Number of element in single page
61  * @num_pages: Number of allocation needed pages
62  * @dma_pages: page information storage in case of coherent memory
63  * @cacheable_pages: page information storage in case of cacheable memory
64  * @page_size: page size
65  * @is_mem_prealloc: flag for multiple pages pre-alloc or not
66  * @contiguous_dma_pages: flag for contiguous dma pages or not
67  */
68 struct qdf_mem_multi_page_t {
69 	uint16_t num_element_per_page;
70 	uint16_t num_pages;
71 	struct qdf_mem_dma_page_t *dma_pages;
72 	void **cacheable_pages;
73 	qdf_size_t page_size;
74 #ifdef DP_MEM_PRE_ALLOC
75 	uint8_t is_mem_prealloc;
76 #endif
77 #ifdef ALLOC_CONTIGUOUS_MULTI_PAGE
78 	bool contiguous_dma_pages;
79 #endif
80 };
81 
82 
83 /* Preprocessor definitions and constants */
84 
85 typedef __qdf_mempool_t qdf_mempool_t;
86 
87 /**
88  * qdf_mem_init() - Initialize QDF memory module
89  *
90  * Return: None
91  *
92  */
93 void qdf_mem_init(void);
94 
95 /**
96  * qdf_mem_exit() - Exit QDF memory module
97  *
98  * Return: None
99  *
100  */
101 void qdf_mem_exit(void);
102 
103 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
104 #define qdf_untracked_mem_malloc(size) \
105 	__qdf_untracked_mem_malloc(size, __func__, __LINE__)
106 
107 #define qdf_untracked_mem_free(ptr) \
108 	__qdf_untracked_mem_free(ptr)
109 #endif
110 
111 #define QDF_MEM_FUNC_NAME_SIZE 48
112 
113 #ifdef MEMORY_DEBUG
114 /**
115  * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
116  *
117  * Return: value of mem_debug_disabled qdf module argument
118  */
119 bool qdf_mem_debug_config_get(void);
120 
121 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
122 /**
123  * qdf_mem_debug_disabled_config_set() - Set mem_debug_disabled
124  * @str_value: value of the module param
125  *
126  * This function will set qdf module param mem_debug_disabled
127  *
128  * Return: QDF_STATUS_SUCCESS on Success
129  */
130 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value);
131 #endif
132 
133 /**
134  * qdf_mem_malloc_atomic_debug() - debug version of QDF memory allocation API
135  * @size: Number of bytes of memory to allocate.
136  * @func: Function name of the call site
137  * @line: Line number of the call site
138  * @caller: Address of the caller function
139  *
140  * This function will dynamically allocate the specified number of bytes of
141  * memory and add it to the qdf tracking list to check for memory leaks and
142  * corruptions
143  *
144  * Return: A valid memory location on success, or NULL on failure
145  */
146 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
147 				  uint32_t line, void *caller);
148 
149 /**
150  * qdf_mem_malloc_atomic_debug_fl() - allocation QDF memory atomically
151  * @size: Number of bytes of memory to allocate.
152  * @func: Function name of the call site
153  * @line: Line number of the call site
154  *
155  * This function will dynamically allocate the specified number of bytes of
156  * memory.
157  *
158  * Return:
159  * Upon successful allocate, returns a non-NULL pointer to the allocated
160  * memory.  If this function is unable to allocate the amount of memory
161  * specified (for any reason) it returns NULL.
162  */
163 void *qdf_mem_malloc_atomic_debug_fl(qdf_size_t size, const char *func,
164 				     uint32_t line);
165 
166 /**
167  * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
168  * @size: Number of bytes of memory to allocate.
169  * @func: Function name of the call site
170  * @line: Line number of the call site
171  * @caller: Address of the caller function
172  * @flag: GFP flag
173  *
174  * This function will dynamically allocate the specified number of bytes of
175  * memory and add it to the qdf tracking list to check for memory leaks and
176  * corruptions
177  *
178  * Return: A valid memory location on success, or NULL on failure
179  */
180 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
181 			   void *caller, uint32_t flag);
182 
183 #define qdf_mem_malloc(size) \
184 	qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
185 
186 #define qdf_mem_malloc_fl(size, func, line) \
187 	qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
188 
189 #define qdf_mem_malloc_atomic(size) \
190 	qdf_mem_malloc_atomic_debug(size, __func__, __LINE__, QDF_RET_IP)
191 
192 /**
193  * qdf_mem_free() - free allocate memory
194  * @ptr: Pointer to the starting address of the memory to be freed.
195  *
196  * This function will free the memory pointed to by 'ptr'. It also checks for
197  * memory corruption, underrun, overrun, double free, domain mismatch, etc.
198  *
199  * Return: none
200  */
201 #define qdf_mem_free(ptr) \
202 	qdf_mem_free_debug(ptr, __func__, __LINE__)
203 void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
204 
205 /**
206  * qdf_mem_multi_pages_alloc_debug() - Debug version of
207  * qdf_mem_multi_pages_alloc
208  * @osdev: OS device handle pointer
209  * @pages: Multi page information storage
210  * @element_size: Each element size
211  * @element_num: Total number of elements should be allocated
212  * @memctxt: Memory context
213  * @cacheable: Coherent memory or cacheable memory
214  * @func: Caller of this allocator
215  * @line: Line number of the caller
216  * @caller: Return address of the caller
217  *
218  * This function will allocate large size of memory over multiple pages.
219  * Large size of contiguous memory allocation will fail frequently, then
220  * instead of allocate large memory by one shot, allocate through multiple, non
221  * contiguous memory and combine pages when actual usage
222  *
223  * Return: None
224  */
225 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
226 				     struct qdf_mem_multi_page_t *pages,
227 				     size_t element_size, uint32_t element_num,
228 				     qdf_dma_context_t memctxt, bool cacheable,
229 				     const char *func, uint32_t line,
230 				     void *caller);
231 
232 /**
233  * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
234  * @osdev: OS device handle pointer
235  * @pages: Multi page information storage
236  * @element_size: Each element size
237  * @element_num: Total number of elements should be allocated
238  * @memctxt: Memory context
239  * @cacheable: Coherent memory or cacheable memory
240  *
241  * This function will allocate large size of memory over multiple pages.
242  * Large size of contiguous memory allocation will fail frequently, then
243  * instead of allocate large memory by one shot, allocate through multiple, non
244  * contiguous memory and combine pages when actual usage
245  *
246  * Return: None
247  */
248 #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
249 				  memctxt, cacheable) \
250 	qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
251 					element_num, memctxt, cacheable, \
252 					__func__, __LINE__, QDF_RET_IP)
253 
254 /**
255  * qdf_mem_multi_pages_free_debug() - Debug version of qdf_mem_multi_pages_free
256  * @osdev: OS device handle pointer
257  * @pages: Multi page information storage
258  * @memctxt: Memory context
259  * @cacheable: Coherent memory or cacheable memory
260  * @func: Caller of this allocator
261  * @line: Line number of the caller
262  *
263  * This function will free large size of memory over multiple pages.
264  *
265  * Return: None
266  */
267 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
268 				    struct qdf_mem_multi_page_t *pages,
269 				    qdf_dma_context_t memctxt, bool cacheable,
270 				    const char *func, uint32_t line);
271 
272 /**
273  * qdf_mem_multi_pages_free() - free large size of kernel memory
274  * @osdev: OS device handle pointer
275  * @pages: Multi page information storage
276  * @memctxt: Memory context
277  * @cacheable: Coherent memory or cacheable memory
278  *
279  * This function will free large size of memory over multiple pages.
280  *
281  * Return: None
282  */
283 #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
284 	qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
285 				       __func__, __LINE__)
286 
287 /**
288  * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
289  *
290  * Call this to ensure there are no active memory allocations being tracked
291  * against the current debug domain. For example, one should call this function
292  * immediately before a call to qdf_debug_domain_set() as a memory leak
293  * detection mechanism.
294  *
295  * e.g.
296  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
297  *
298  *	...
299  *
300  *	// memory is allocated and freed
301  *
302  *	...
303  *
304  *	// before transitioning back to inactive state,
305  *	// make sure all active memory has been freed
306  *	qdf_mem_check_for_leaks();
307  *	qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
308  *
309  *	...
310  *
311  *	// also, before program exit, make sure init time memory is freed
312  *	qdf_mem_check_for_leaks();
313  *	exit();
314  *
315  * Return: None
316  */
317 void qdf_mem_check_for_leaks(void);
318 
319 /**
320  * qdf_mem_alloc_consistent() - allocates consistent qdf memory
321  * @osdev: OS device handle
322  * @dev: Pointer to device handle
323  * @size: Size to be allocated
324  * @paddr: Physical address
325  *
326  * Return: pointer of allocated memory or null if memory alloc fails
327  */
328 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
329 	qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
330 				       __func__, __LINE__, QDF_RET_IP)
331 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
332 				     qdf_size_t size, qdf_dma_addr_t *paddr,
333 				     const char *func, uint32_t line,
334 				     void *caller);
335 
336 /**
337  * qdf_mem_free_consistent() - free consistent qdf memory
338  * @osdev: OS device handle
339  * @dev: OS device
340  * @size: Size to be allocated
341  * @vaddr: virtual address
342  * @paddr: Physical address
343  * @memctx: Pointer to DMA context
344  *
345  * Return: none
346  */
347 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
348 	qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
349 				  __func__, __LINE__)
350 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
351 				   qdf_size_t size, void *vaddr,
352 				   qdf_dma_addr_t paddr,
353 				   qdf_dma_context_t memctx,
354 				   const char *func, uint32_t line);
355 
356 #else
qdf_mem_debug_config_get(void)357 static inline bool qdf_mem_debug_config_get(void)
358 {
359 	return false;
360 }
361 
362 static inline
qdf_mem_debug_disabled_config_set(const char * str_value)363 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
364 {
365 	return QDF_STATUS_SUCCESS;
366 }
367 
368 /**
369  * qdf_mem_malloc() - allocation QDF memory
370  * @size: Number of bytes of memory to allocate.
371  *
372  * This function will dynamically allocate the specified number of bytes of
373  * memory.
374  *
375  * Return:
376  * Upon successful allocate, returns a non-NULL pointer to the allocated
377  * memory.  If this function is unable to allocate the amount of memory
378  * specified (for any reason) it returns NULL.
379  */
380 #define qdf_mem_malloc(size) \
381 	__qdf_mem_malloc(size, __func__, __LINE__)
382 
383 #define qdf_mem_malloc_fl(size, func, line) \
384 	__qdf_mem_malloc(size, func, line)
385 
386 /**
387  * qdf_mem_malloc_atomic() - allocation QDF memory atomically
388  * @size: Number of bytes of memory to allocate.
389  *
390  * This function will dynamically allocate the specified number of bytes of
391  * memory.
392  *
393  * Return:
394  * Upon successful allocate, returns a non-NULL pointer to the allocated
395  * memory.  If this function is unable to allocate the amount of memory
396  * specified (for any reason) it returns NULL.
397  */
398 #define qdf_mem_malloc_atomic(size) \
399 	qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
400 
401 void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
402 			       const char *func,
403 			       uint32_t line);
404 
405 #define qdf_mem_free(ptr) \
406 	__qdf_mem_free(ptr)
407 
qdf_mem_check_for_leaks(void)408 static inline void qdf_mem_check_for_leaks(void) { }
409 
410 #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
411 	__qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
412 
413 #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
414 	__qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
415 
416 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
417 			       struct qdf_mem_multi_page_t *pages,
418 			       size_t element_size, uint32_t element_num,
419 			       qdf_dma_context_t memctxt, bool cacheable);
420 
421 void qdf_mem_multi_pages_free(qdf_device_t osdev,
422 			      struct qdf_mem_multi_page_t *pages,
423 			      qdf_dma_context_t memctxt, bool cacheable);
424 
425 #endif /* MEMORY_DEBUG */
426 
427 /**
428  * qdf_mem_malloc_flags: Get mem allocation flags
429  *
430  * Return the flag to be use for memory allocation
431  * based on the context
432  *
433  * Returns: Based on the context, returns the GFP flag
434  * for memory alloaction
435  */
436 int qdf_mem_malloc_flags(void);
437 
438 /**
439  * qdf_prealloc_disabled_config_get() - Get the user configuration of
440  *                                      prealloc_disabled
441  *
442  * Return: value of prealloc_disabled qdf module argument
443  */
444 bool qdf_prealloc_disabled_config_get(void);
445 
446 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
447 /**
448  * qdf_prealloc_disabled_config_set() - Set prealloc_disabled
449  * @str_value: value of the module param
450  *
451  * This function will set qdf module param prealloc_disabled
452  *
453  * Return: QDF_STATUS_SUCCESS on Success
454  */
455 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value);
456 #endif
457 
458 /**
459  * qdf_mem_multi_pages_zero() - zero out each page memory
460  * @pages: Multi page information storage
461  * @cacheable: Coherent memory or cacheable memory
462  *
463  * This function will zero out each page memory
464  *
465  * Return: None
466  */
467 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
468 			      bool cacheable);
469 
470 /**
471  * qdf_aligned_malloc() - allocates aligned QDF memory.
472  * @size: Size to be allocated
473  * @vaddr_unaligned: Unaligned virtual address.
474  * @paddr_unaligned: Unaligned physical address.
475  * @paddr_aligned: Aligned physical address.
476  * @align: Base address alignment.
477  *
478  * This function will dynamically allocate the specified number of bytes of
479  * memory. Checks if the allocated base address is aligned with base_align.
480  * If not, it frees the allocated memory, adds base_align to alloc size and
481  * re-allocates the memory.
482  *
483  * Return:
484  * Upon successful allocate, returns an aligned base address of the allocated
485  * memory.  If this function is unable to allocate the amount of memory
486  * specified (for any reason) it returns NULL.
487  */
488 #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
489 			   paddr_aligned, align) \
490 	qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
491 			   paddr_aligned, align, __func__, __LINE__)
492 
493 void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
494 			    qdf_dma_addr_t *paddr_unaligned,
495 			    qdf_dma_addr_t *paddr_aligned,
496 			    uint32_t align,
497 			    const char *func, uint32_t line);
498 
499 /**
500  * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
501  * @osdev: OS device handle
502  * @size: Size to be allocated
503  * @vaddr_unaligned: Unaligned virtual address.
504  * @paddr_unaligned: Unaligned physical address.
505  * @paddr_aligned: Aligned physical address.
506  * @align: Base address alignment.
507  *
508  * Return: pointer of allocated memory or null if memory alloc fails.
509  */
510 #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
511 					 paddr_unaligned, paddr_aligned, \
512 					 align) \
513 	qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
514 					    paddr_unaligned, paddr_aligned, \
515 					    align, __func__, __LINE__)
516 
517 void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
518 					  void **vaddr_unaligned,
519 					  qdf_dma_addr_t *paddr_unaligned,
520 					  qdf_dma_addr_t *paddr_aligned,
521 					  uint32_t align, const char *func,
522 					  uint32_t line);
523 
524 /**
525  * qdf_mem_virt_to_phys() - Convert virtual address to physical
526  * @vaddr: virtual address
527  *
528  * Return: physical address
529  */
530 #define qdf_mem_virt_to_phys(vaddr) __qdf_mem_virt_to_phys(vaddr)
531 
532 /**
533  * qdf_mem_set_io() - set (fill) memory with a specified byte value.
534  * @ptr: Pointer to memory that will be set
535  * @value: Byte set in memory
536  * @num_bytes: Number of bytes to be set
537  *
538  * Return: None
539  */
540 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
541 
542 /**
543  * qdf_mem_copy_toio() - copy memory
544  * @dst_addr: Pointer to destination memory location (to copy to)
545  * @src_addr: Pointer to source memory location (to copy from)
546  * @num_bytes: Number of bytes to copy.
547  *
548  * Return: none
549  */
550 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
551 					   uint32_t num_bytes);
552 
553 /**
554  * qdf_mem_set() - set (fill) memory with a specified byte value.
555  * @ptr: Pointer to memory that will be set
556  * @num_bytes: Number of bytes to be set
557  * @value: Byte set in memory
558  *
559  * WARNING: parameter @num_bytes and @value are swapped comparing with
560  * standard C function "memset", please ensure correct usage of this function!
561  *
562  * Return: None
563  */
564 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
565 
566 /**
567  * qdf_mem_zero() - zero out memory
568  * @ptr: pointer to memory that will be set to zero
569  * @num_bytes: number of bytes zero
570  *
571  * This function sets the memory location to all zeros, essentially clearing
572  * the memory.
573  *
574  * Return: None
575  */
qdf_mem_zero(void * ptr,uint32_t num_bytes)576 static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
577 {
578 	qdf_mem_set(ptr, num_bytes, 0);
579 }
580 
581 /**
582  * qdf_mem_copy() - copy memory
583  * @dst_addr: Pointer to destination memory location (to copy to)
584  * @src_addr: Pointer to source memory location (to copy from)
585  * @num_bytes: Number of bytes to copy.
586  *
587  * Copy host memory from one location to another, similar to memcpy in
588  * standard C.  Note this function does not specifically handle overlapping
589  * source and destination memory locations.  Calling this function with
590  * overlapping source and destination memory locations will result in
591  * unpredictable results.  Use qdf_mem_move() if the memory locations
592  * for the source and destination are overlapping (or could be overlapping!)
593  *
594  * Return: none
595  */
596 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
597 
598 /**
599  * qdf_mem_move() - move memory
600  * @dst_addr: pointer to destination memory location (to move to)
601  * @src_addr: pointer to source memory location (to move from)
602  * @num_bytes: number of bytes to move.
603  *
604  * Move host memory from one location to another, similar to memmove in
605  * standard C.  Note this function *does* handle overlapping
606  * source and destination memory locations.
607  *
608  * Return: None
609  */
610 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
611 
612 /**
613  * qdf_mem_cmp() - memory compare
614  * @left: pointer to one location in memory to compare
615  * @right: pointer to second location in memory to compare
616  * @size: the number of bytes to compare
617  *
618  * Function to compare two pieces of memory, similar to memcmp function
619  * in standard C.
620  *
621  * Return:
622  *	0 -- equal
623  *	< 0 -- *memory1 is less than *memory2
624  *	> 0 -- *memory1 is bigger than *memory2
625  */
626 int qdf_mem_cmp(const void *left, const void *right, size_t size);
627 
628 /**
629  * qdf_ether_addr_copy() - copy an Ethernet address
630  * @dst_addr: A six-byte array Ethernet address destination
631  * @src_addr: A six-byte array Ethernet address source
632  *
633  * Please note: dst & src must both be aligned to u16.
634  *
635  * Return: none
636  */
637 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
638 
639 /**
640  * qdf_mem_map_nbytes_single - Map memory for DMA
641  * @osdev: pomter OS device context
642  * @buf: pointer to memory to be dma mapped
643  * @dir: DMA map direction
644  * @nbytes: number of bytes to be mapped.
645  * @phy_addr: pointer to receive physical address.
646  *
647  * Return: success/failure
648  */
qdf_mem_map_nbytes_single(qdf_device_t osdev,void * buf,qdf_dma_dir_t dir,int nbytes,qdf_dma_addr_t * phy_addr)649 static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
650 						 qdf_dma_dir_t dir, int nbytes,
651 						 qdf_dma_addr_t *phy_addr)
652 {
653 #if defined(HIF_PCI) || defined(HIF_IPCI)
654 	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
655 #else
656 	return 0;
657 #endif
658 }
659 
qdf_mem_dma_cache_sync(qdf_device_t osdev,qdf_dma_addr_t buf,qdf_dma_dir_t dir,int nbytes)660 static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
661 					  qdf_dma_addr_t buf,
662 					  qdf_dma_dir_t dir,
663 					  int nbytes)
664 {
665 	__qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
666 }
667 
668 /**
669  * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
670  * @osdev: pomter OS device context
671  * @phy_addr: physical address of memory to be dma unmapped
672  * @dir: DMA unmap direction
673  * @nbytes: number of bytes to be unmapped.
674  *
675  * Return: none
676  */
qdf_mem_unmap_nbytes_single(qdf_device_t osdev,qdf_dma_addr_t phy_addr,qdf_dma_dir_t dir,int nbytes)677 static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
678 					       qdf_dma_addr_t phy_addr,
679 					       qdf_dma_dir_t dir,
680 					       int nbytes)
681 {
682 #if defined(HIF_PCI) || defined(HIF_IPCI)
683 	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
684 #endif
685 }
686 
687 /**
688  * qdf_mempool_init - Create and initialize memory pool
689  * @osdev: platform device object
690  * @pool_addr: address of the pool created
691  * @elem_cnt: no. of elements in pool
692  * @elem_size: size of each pool element in bytes
693  * @flags: flags
694  * Return: Handle to memory pool or NULL if allocation failed
695  */
qdf_mempool_init(qdf_device_t osdev,qdf_mempool_t * pool_addr,int elem_cnt,size_t elem_size,uint32_t flags)696 static inline int qdf_mempool_init(qdf_device_t osdev,
697 				   qdf_mempool_t *pool_addr, int elem_cnt,
698 				   size_t elem_size, uint32_t flags)
699 {
700 	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
701 				  flags);
702 }
703 
704 /**
705  * qdf_mempool_destroy() - Destroy memory pool
706  * @osdev: platform device object
707  * @pool: to memory pool
708  *
709  * Return: none
710  */
qdf_mempool_destroy(qdf_device_t osdev,qdf_mempool_t pool)711 static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
712 {
713 	__qdf_mempool_destroy(osdev, pool);
714 }
715 
716 /**
717  * qdf_mempool_alloc() - Allocate an element memory pool
718  * @osdev: platform device object
719  * @pool: to memory pool
720  *
721  * Return: Pointer to the allocated element or NULL if the pool is empty
722  */
qdf_mempool_alloc(qdf_device_t osdev,qdf_mempool_t pool)723 static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
724 {
725 	return (void *)__qdf_mempool_alloc(osdev, pool);
726 }
727 
728 /**
729  * qdf_mempool_free() - Free a memory pool element
730  * @osdev: Platform device object
731  * @pool: Handle to memory pool
732  * @buf: Element to be freed
733  *
734  * Return: none
735  */
qdf_mempool_free(qdf_device_t osdev,qdf_mempool_t pool,void * buf)736 static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
737 				    void *buf)
738 {
739 	__qdf_mempool_free(osdev, pool, buf);
740 }
741 
742 /**
743  * qdf_kmem_cache_create() - OS abstraction for cache creation
744  * @c: Cache name
745  * @z: Size of the object to be created
746  *
747  * Return: Cache address on successful creation, else NULL
748  */
749 #ifdef QCA_KMEM_CACHE_SUPPORT
750 #define qdf_kmem_cache_create(c, z) __qdf_kmem_cache_create(c, z)
751 #else
752 #define qdf_kmem_cache_create(c, z) NULL
753 #endif
754 
755 /**
756  * qdf_kmem_cache_destroy() - OS abstraction for cache destruction
757  * @cache: Cache pointer
758  *
759  * Return: void
760  */
qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)761 static inline void qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
762 {
763 	__qdf_kmem_cache_destroy(cache);
764 }
765 
766 /**
767  * qdf_kmem_cache_alloc() - Function to allocation object from a cache
768  * @cache: Cache address
769  *
770  * Return: Object from cache
771  *
772  */
qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)773 static inline void *qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
774 {
775 	return __qdf_kmem_cache_alloc(cache);
776 }
777 
778 /**
779  * qdf_kmem_cache_free() - Function to free cache object
780  * @cache: Cache address
781  * @node: Object to be returned to cache
782  *
783  * Return: void
784  */
qdf_kmem_cache_free(qdf_kmem_cache_t cache,void * node)785 static inline void qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
786 {
787 	__qdf_kmem_cache_free(cache, node);
788 }
789 
790 /**
791  * qdf_mem_dma_sync_single_for_device() - assign memory to device
792  * @osdev: OS device handle
793  * @bus_addr: dma address to give to the device
794  * @size: Size of the memory block
795  * @direction: direction data will be DMAed
796  *
797  * Assign memory to the remote device.
798  * The cache lines are flushed to ram or invalidated as needed.
799  *
800  * Return: none
801  */
802 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
803 					qdf_dma_addr_t bus_addr,
804 					qdf_size_t size,
805 					__dma_data_direction direction);
806 
807 /**
808  * qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
809  * @osdev: OS device handle
810  * @bus_addr: dma address to give to the cpu
811  * @size: Size of the memory block
812  * @direction: direction data will be DMAed
813  *
814  * Assign memory to the CPU.
815  *
816  * Return: none
817  */
818 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
819 					qdf_dma_addr_t bus_addr,
820 					qdf_size_t size,
821 					__dma_data_direction direction);
822 
823 /**
824  * qdf_mem_multi_page_link() - Make links for multi page elements
825  * @osdev: OS device handle pointer
826  * @pages: Multi page information storage
827  * @elem_size: Single element size
828  * @elem_count: elements count should be linked
829  * @cacheable: Coherent memory or cacheable memory
830  *
831  * This function will make links for multi page allocated structure
832  *
833  * Return: 0 success
834  */
835 int qdf_mem_multi_page_link(qdf_device_t osdev,
836 			    struct qdf_mem_multi_page_t *pages,
837 			    uint32_t elem_size, uint32_t elem_count,
838 			    uint8_t cacheable);
839 
840 /**
841  * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
842  * @size: number of bytes to increment by
843  *
844  * Return: None
845  */
846 void qdf_mem_kmalloc_inc(qdf_size_t size);
847 
848 /**
849  * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
850  * @size: number of bytes to decrement by
851  *
852  * Return: None
853  */
854 void qdf_mem_kmalloc_dec(qdf_size_t size);
855 
856 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
857 /**
858  * qdf_mem_skb_inc() - increment total skb allocation size
859  * @size: size to be added
860  *
861  * Return: none
862  */
863 void qdf_mem_skb_inc(qdf_size_t size);
864 
865 /**
866  * qdf_mem_skb_dec() - decrement total skb allocation size
867  * @size: size to be decremented
868  *
869  * Return: none
870  */
871 void qdf_mem_skb_dec(qdf_size_t size);
872 
873 /**
874  * qdf_mem_skb_total_inc() - increment total skb allocation size
875  * in host driver in both debug and perf builds
876  * @size: size to be added
877  *
878  * Return: none
879  */
880 void qdf_mem_skb_total_inc(qdf_size_t size);
881 
882 /**
883  * qdf_mem_skb_total_dec() - decrement total skb allocation size
884  * in the host driver in debug and perf flavors
885  * @size: size to be decremented
886  *
887  * Return: none
888  */
889 void qdf_mem_skb_total_dec(qdf_size_t size);
890 
891 /**
892  * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
893  * @size: size to be added
894  *
895  * Return: none
896  */
897 void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
898 
899 /**
900  * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
901  * @size: size to be decreased
902  *
903  * Return: none
904  */
905 void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
906 
907 /**
908  * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
909  * @size: size to be added
910  *
911  * Return: none
912  */
913 void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
914 
915 /**
916  * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
917  * @size: size to be decreased
918  *
919  * Return: none
920  */
921 void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
922 
923 /**
924  * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
925  *
926  * Return: none
927  */
928 void qdf_mem_dp_tx_skb_cnt_inc(void);
929 
930 /**
931  * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
932  *
933  * Return: none
934  */
935 void qdf_mem_dp_tx_skb_cnt_dec(void);
936 
937 /**
938  * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
939  *
940  * Return: none
941  */
942 void qdf_mem_dp_rx_skb_cnt_inc(void);
943 
944 /**
945  * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
946  *
947  * Return: none
948  */
949 void qdf_mem_dp_rx_skb_cnt_dec(void);
950 #else
951 
qdf_mem_skb_inc(qdf_size_t size)952 static inline void qdf_mem_skb_inc(qdf_size_t size)
953 {
954 }
955 
qdf_mem_skb_dec(qdf_size_t size)956 static inline void qdf_mem_skb_dec(qdf_size_t size)
957 {
958 }
959 
qdf_mem_skb_total_inc(qdf_size_t size)960 static inline void qdf_mem_skb_total_inc(qdf_size_t size)
961 {
962 }
963 
qdf_mem_skb_total_dec(qdf_size_t size)964 static inline void qdf_mem_skb_total_dec(qdf_size_t size)
965 {
966 }
967 
qdf_mem_dp_tx_skb_inc(qdf_size_t size)968 static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
969 {
970 }
971 
qdf_mem_dp_tx_skb_dec(qdf_size_t size)972 static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
973 {
974 }
975 
qdf_mem_dp_rx_skb_inc(qdf_size_t size)976 static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
977 {
978 }
979 
qdf_mem_dp_rx_skb_dec(qdf_size_t size)980 static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
981 {
982 }
983 
qdf_mem_dp_tx_skb_cnt_inc(void)984 static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
985 {
986 }
987 
qdf_mem_dp_tx_skb_cnt_dec(void)988 static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
989 {
990 }
991 
qdf_mem_dp_rx_skb_cnt_inc(void)992 static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
993 {
994 }
995 
qdf_mem_dp_rx_skb_cnt_dec(void)996 static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
997 {
998 }
999 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1000 
1001 /**
1002  * qdf_mem_map_table_alloc() - Allocate shared memory info structure
1003  * @num: number of required storage
1004  *
1005  * Allocate mapping table for DMA memory allocation. This is needed for
1006  * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
1007  *
1008  * Return: shared memory info storage table pointer
1009  */
qdf_mem_map_table_alloc(uint32_t num)1010 static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
1011 {
1012 	qdf_mem_info_t *mem_info_arr;
1013 
1014 	mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
1015 	return mem_info_arr;
1016 }
1017 
1018 #ifdef ENHANCED_OS_ABSTRACTION
1019 /**
1020  * qdf_update_mem_map_table() - Update DMA memory map info
1021  * @osdev: Parent device instance
1022  * @mem_info: Pointer to shared memory information
1023  * @dma_addr: dma address
1024  * @mem_size: memory size allocated
1025  *
1026  * Store DMA shared memory information
1027  *
1028  * Return: none
1029  */
1030 void qdf_update_mem_map_table(qdf_device_t osdev,
1031 			      qdf_mem_info_t *mem_info,
1032 			      qdf_dma_addr_t dma_addr,
1033 			      uint32_t mem_size);
1034 
1035 /**
1036  * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
1037  * @osdev: Parent device instance
1038  * @dma_addr: DMA/IOVA address
1039  *
1040  * Get actual physical address from dma_addr based on SMMU enablement status.
1041  * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
1042  * (IOVA) otherwise returns physical address. So get SMMU physical address
1043  * mapping from IOVA.
1044  *
1045  * Return: dmaable physical address
1046  */
1047 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
1048 					  qdf_dma_addr_t dma_addr);
1049 #else
1050 static inline
qdf_update_mem_map_table(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_addr,uint32_t mem_size)1051 void qdf_update_mem_map_table(qdf_device_t osdev,
1052 			      qdf_mem_info_t *mem_info,
1053 			      qdf_dma_addr_t dma_addr,
1054 			      uint32_t mem_size)
1055 {
1056 	if (!mem_info) {
1057 		qdf_nofl_err("%s: NULL mem_info", __func__);
1058 		return;
1059 	}
1060 
1061 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
1062 }
1063 
1064 static inline
qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,qdf_dma_addr_t dma_addr)1065 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
1066 					  qdf_dma_addr_t dma_addr)
1067 {
1068 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
1069 }
1070 #endif
1071 
1072 /**
1073  * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
1074  * @osdev: parent device instance
1075  *
1076  * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
1077  */
qdf_mem_smmu_s1_enabled(qdf_device_t osdev)1078 static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
1079 {
1080 	return __qdf_mem_smmu_s1_enabled(osdev);
1081 }
1082 
1083 /**
1084  * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
1085  * @dev: device instance
1086  * @sgt: scatter gather table pointer
1087  * @cpu_addr: HLOS virtual address
1088  * @dma_addr: dma address
1089  * @size: allocated memory size
1090  *
1091  * Return: physical address
1092  */
1093 static inline int
qdf_mem_dma_get_sgtable(struct device * dev,void * sgt,void * cpu_addr,qdf_dma_addr_t dma_addr,size_t size)1094 qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
1095 			qdf_dma_addr_t dma_addr, size_t size)
1096 {
1097 	return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
1098 }
1099 
1100 /**
1101  * qdf_mem_free_sgtable() - Free a previously allocated sg table
1102  * @sgt: the mapped sg table header
1103  *
1104  * Return: None
1105  */
1106 static inline void
qdf_mem_free_sgtable(struct sg_table * sgt)1107 qdf_mem_free_sgtable(struct sg_table *sgt)
1108 {
1109 	__qdf_os_mem_free_sgtable(sgt);
1110 }
1111 
1112 /**
1113  * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
1114  * @sgt: scatter gather table pointer
1115  *
1116  * Return: None
1117  */
1118 static inline void
qdf_dma_get_sgtable_dma_addr(struct sg_table * sgt)1119 qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
1120 {
1121 	__qdf_dma_get_sgtable_dma_addr(sgt);
1122 }
1123 
1124 /**
1125  * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
1126  * @osdev: Parent device instance
1127  * @mem_info: Pointer to allocated memory information
1128  *
1129  * Get dma address based on SMMU enablement status. If SMMU Stage 1
1130  * translation is enabled, DMA APIs return IO virtual address otherwise
1131  * returns physical address.
1132  *
1133  * Return: dma address
1134  */
qdf_mem_get_dma_addr(qdf_device_t osdev,qdf_mem_info_t * mem_info)1135 static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
1136 						  qdf_mem_info_t *mem_info)
1137 {
1138 	return __qdf_mem_get_dma_addr(osdev, mem_info);
1139 }
1140 
1141 /**
1142  * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
1143  * @osdev: Parent device instance
1144  * @mem_info: Pointer to allocated memory information
1145  *
1146  * Based on smmu stage 1 translation enablement, return corresponding dma
1147  * address storage pointer.
1148  *
1149  * Return: dma address storage pointer
1150  */
qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,qdf_mem_info_t * mem_info)1151 static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
1152 						       qdf_mem_info_t *mem_info)
1153 {
1154 	return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
1155 }
1156 
1157 
1158 /**
1159  * qdf_mem_get_dma_size() - Return DMA memory size
1160  * @osdev: parent device instance
1161  * @mem_info: Pointer to allocated memory information
1162  *
1163  * Return: DMA memory size
1164  */
1165 static inline uint32_t
qdf_mem_get_dma_size(qdf_device_t osdev,qdf_mem_info_t * mem_info)1166 qdf_mem_get_dma_size(qdf_device_t osdev,
1167 		       qdf_mem_info_t *mem_info)
1168 {
1169 	return __qdf_mem_get_dma_size(osdev, mem_info);
1170 }
1171 
1172 /**
1173  * qdf_mem_set_dma_size() - Set DMA memory size
1174  * @osdev: parent device instance
1175  * @mem_info: Pointer to allocated memory information
1176  * @mem_size: memory size allocated
1177  *
1178  * Return: none
1179  */
1180 static inline void
qdf_mem_set_dma_size(qdf_device_t osdev,qdf_mem_info_t * mem_info,uint32_t mem_size)1181 qdf_mem_set_dma_size(qdf_device_t osdev,
1182 		       qdf_mem_info_t *mem_info,
1183 		       uint32_t mem_size)
1184 {
1185 	__qdf_mem_set_dma_size(osdev, mem_info, mem_size);
1186 }
1187 
1188 /**
1189  * qdf_mem_get_dma_pa() - Return DMA physical address
1190  * @osdev: parent device instance
1191  * @mem_info: Pointer to allocated memory information
1192  *
1193  * Return: DMA physical address
1194  */
1195 static inline qdf_dma_addr_t
qdf_mem_get_dma_pa(qdf_device_t osdev,qdf_mem_info_t * mem_info)1196 qdf_mem_get_dma_pa(qdf_device_t osdev,
1197 		     qdf_mem_info_t *mem_info)
1198 {
1199 	return __qdf_mem_get_dma_pa(osdev, mem_info);
1200 }
1201 
1202 /**
1203  * qdf_mem_set_dma_pa() - Set DMA physical address
1204  * @osdev: parent device instance
1205  * @mem_info: Pointer to allocated memory information
1206  * @dma_pa: DMA phsical address
1207  *
1208  * Return: none
1209  */
1210 static inline void
qdf_mem_set_dma_pa(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_pa)1211 qdf_mem_set_dma_pa(qdf_device_t osdev,
1212 		     qdf_mem_info_t *mem_info,
1213 		     qdf_dma_addr_t dma_pa)
1214 {
1215 	__qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
1216 }
1217 
1218 /**
1219  * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
1220  * @osdev: parent device instance
1221  * @size: size to be allocated
1222  *
1223  * Allocate DMA memory which will be shared with external kernel module. This
1224  * information is needed for SMMU mapping.
1225  *
1226  * Return: Pointer to allocated DMA memory on success, NULL on failure
1227  */
1228 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
1229 
1230 #ifdef DP_UMAC_HW_RESET_SUPPORT
1231 /**
1232  * qdf_tx_desc_pool_free_bufs() - Go through elems and call the registered  cb
1233  * @ctxt: Context to be passed to the cb
1234  * @pages: Multi page information storage
1235  * @elem_size: Each element size
1236  * @elem_count: Total number of elements in the pool.
1237  * @cacheable: Coherent memory or cacheable memory
1238  * @cb: Callback to free the elements
1239  * @elem_list: elem list for delayed free
1240  *
1241  * Return: 0 on Succscc, or Error code
1242  */
1243 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
1244 			       uint32_t elem_size, uint32_t elem_count,
1245 			       uint8_t cacheable, qdf_mem_release_cb cb,
1246 			       void *elem_list);
1247 #endif
1248 
1249 /**
1250  * qdf_mem_shared_mem_free() - Free shared memory
1251  * @osdev: parent device instance
1252  * @shared_mem: shared memory information storage
1253  *
1254  * Free DMA shared memory resource
1255  *
1256  * Return: None
1257  */
qdf_mem_shared_mem_free(qdf_device_t osdev,qdf_shared_mem_t * shared_mem)1258 static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
1259 					   qdf_shared_mem_t *shared_mem)
1260 {
1261 	if (!shared_mem) {
1262 		qdf_nofl_err("%s: NULL shared mem struct passed",
1263 			     __func__);
1264 		return;
1265 	}
1266 
1267 	if (shared_mem->vaddr) {
1268 		qdf_mem_free_consistent(osdev, osdev->dev,
1269 					qdf_mem_get_dma_size(osdev,
1270 						&shared_mem->mem_info),
1271 					shared_mem->vaddr,
1272 					qdf_mem_get_dma_addr(osdev,
1273 						&shared_mem->mem_info),
1274 					qdf_get_dma_mem_context(shared_mem,
1275 								memctx));
1276 	}
1277 	qdf_mem_free_sgtable(&shared_mem->sgtable);
1278 	qdf_mem_free(shared_mem);
1279 }
1280 
1281 /**
1282  * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
1283  * host driver
1284  *
1285  * Return: Total DMA memory allocated
1286  */
1287 int32_t qdf_dma_mem_stats_read(void);
1288 
1289 /**
1290  * qdf_heap_mem_stats_read() - Return the heap memory allocated
1291  * in host driver
1292  *
1293  * Return: Total heap memory allocated
1294  */
1295 int32_t qdf_heap_mem_stats_read(void);
1296 
1297 /**
1298  * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
1299  * host driver
1300  *
1301  * Return: Total SKB memory allocated
1302  */
1303 int32_t qdf_skb_mem_stats_read(void);
1304 
1305 /**
1306  * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
1307  * in the host driver tracked in both debug and perf builds
1308  *
1309  * Return: Total SKB memory allocated
1310  */
1311 int32_t qdf_skb_total_mem_stats_read(void);
1312 
1313 /**
1314  * qdf_skb_max_mem_stats_read() - Return the max SKB memory
1315  * allocated in host driver. This is the high watermark for the
1316  * total SKB allocated in the host driver
1317  *
1318  * Return: None
1319  */
1320 int32_t qdf_skb_max_mem_stats_read(void);
1321 
1322 /**
1323  * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
1324  * which are waiting on Tx completions
1325  *
1326  * Return: Outstanding Tx desc count
1327  */
1328 int32_t qdf_mem_tx_desc_cnt_read(void);
1329 
1330 /**
1331  * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
1332  * descs which are waiting on Tx completions. This is the high
1333  * watermark for the pending desc count
1334  *
1335  * Return: Max outstanding Tx desc count
1336  */
1337 int32_t qdf_mem_tx_desc_max_read(void);
1338 
1339 /**
1340  * qdf_mem_stats_init() - Initialize the qdf memstats fields on
1341  * creating the sysfs node
1342  *
1343  * Return: None
1344  */
1345 void qdf_mem_stats_init(void);
1346 
1347 /**
1348  * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
1349  * allocated for Tx data path
1350  *
1351  * Return: Tx SKB memory allocated
1352  */
1353 int32_t qdf_dp_tx_skb_mem_stats_read(void);
1354 
1355 /**
1356  * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
1357  * allocated for Rx data path
1358  *
1359  * Return: Rx SKB memory allocated
1360  */
1361 int32_t qdf_dp_rx_skb_mem_stats_read(void);
1362 
1363 /**
1364  * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
1365  * watermark for the SKB memory allocated for Tx data path
1366  *
1367  * Return: Max Tx SKB memory allocated
1368  */
1369 int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
1370 
1371 /**
1372  * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
1373  * watermark for the SKB memory allocated for Rx data path
1374  *
1375  * Return: Max Rx SKB memory allocated
1376  */
1377 int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
1378 
1379 /**
1380  * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
1381  * allocated in the Tx data path by the host driver or
1382  * buffers coming from the n/w stack
1383  *
1384  * Return: Number of DP Tx buffers allocated
1385  */
1386 int32_t qdf_mem_dp_tx_skb_cnt_read(void);
1387 
1388 /**
1389  * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
1390  * buffers allocated in the Tx data path
1391  *
1392  * Return: Max number of DP Tx buffers allocated
1393  */
1394 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
1395 
1396 /**
1397  * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
1398  * allocated in the Rx data path
1399  *
1400  * Return: Number of DP Rx buffers allocated
1401  */
1402 int32_t qdf_mem_dp_rx_skb_cnt_read(void);
1403 
1404 /**
1405  * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
1406  * buffers allocated in the Rx data path
1407  *
1408  * Return: Max number of DP Rx buffers allocated
1409  */
1410 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
1411 
1412 /**
1413  * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
1414  * count and the high watermark for pending tx desc count
1415  *
1416  * @pending_tx_descs: outstanding Tx desc count
1417  * @tx_descs_max: high watermark for outstanding Tx desc count
1418  *
1419  * Return: None
1420  */
1421 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
1422 				int32_t tx_descs_max);
1423 
1424 /**
1425  * qdf_mem_vfree() - Free the virtual memory pointed to by ptr
1426  * @ptr: Pointer to the starting address of the memory to
1427  * be freed.
1428  *
1429  * Return: None
1430  */
1431 #define qdf_mem_vfree(ptr)   __qdf_mem_vfree(ptr)
1432 
1433 /**
1434  * qdf_mem_valloc() - Allocate virtual memory for the given
1435  * size
1436  * @size: Number of bytes of memory to be allocated
1437  *
1438  * Return: Pointer to the starting address of the allocated virtual memory
1439  */
1440 #define qdf_mem_valloc(size) __qdf_mem_valloc(size, __func__, __LINE__)
1441 
1442 #ifdef ENABLE_VALLOC_REPLACE_MALLOC
1443 /**
1444  * qdf_mem_common_alloc() - Common function to allocate memory for the
1445  * given size, allocation method decided by ENABLE_VALLOC_REPLACE_MALLOC
1446  * @size: Number of bytes of memory to be allocated
1447  *
1448  * Return: Pointer to the starting address of the allocated memory
1449  */
1450 #define qdf_mem_common_alloc(size) qdf_mem_valloc(size)
1451 
1452 /**
1453  * qdf_mem_common_free() - Common function to free the memory pointed
1454  * to by ptr, memory free method decided by ENABLE_VALLOC_REPLACE_MALLOC
1455  * @ptr: Pointer to the starting address of the memory to
1456  * be freed.
1457  *
1458  * Return: None
1459  */
1460 #define qdf_mem_common_free(ptr) qdf_mem_vfree(ptr)
1461 #else
1462 #define qdf_mem_common_alloc(size) qdf_mem_malloc(size)
1463 #define qdf_mem_common_free(ptr) qdf_mem_free(ptr)
1464 #endif
1465 
1466 /**
1467  * qdf_ioremap() - map bus memory into cpu space
1468  * @HOST_CE_ADDRESS: bus address of the memory
1469  * @HOST_CE_SIZE: memory size to map
1470  */
1471 #define qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
1472 			__qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
1473 
1474 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
1475 /*
1476  * typedef qdf_iommu_domain_t: Platform independent iommu domain
1477  * abstraction
1478  */
1479 typedef __qdf_iommu_domain_t qdf_iommu_domain_t;
1480 
1481 /**
1482  * qdf_iommu_domain_get_attr() - API to get iommu domain attributes
1483  * @domain: iommu domain
1484  * @attr: iommu attribute
1485  * @data: data pointer
1486  *
1487  * Return: 0 on success, else errno
1488  */
1489 int
1490 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
1491 			  enum qdf_iommu_attr attr, void *data);
1492 #endif
1493 
1494 #define DEFAULT_DEBUG_DOMAIN_INIT 0
1495 #ifdef QCA_DMA_PADDR_CHECK
1496 /**
1497  * qdf_dma_invalid_buf_list_init() - Initialize dma invalid buffer list
1498  *
1499  * Return: none
1500  */
1501 void qdf_dma_invalid_buf_list_init(void);
1502 
1503 /**
1504  * qdf_dma_invalid_buf_list_deinit() - Deinitialize dma invalid buffer list
1505  *
1506  * Return: none
1507  */
1508 void qdf_dma_invalid_buf_list_deinit(void);
1509 
1510 /**
1511  * qdf_dma_invalid_buf_free() - Free dma invalid buffer
1512  * @dev: Pointer to device handle
1513  * @domain: Debug domain
1514  *
1515  * Return: none
1516  */
1517 void qdf_dma_invalid_buf_free(void *dev, uint8_t domain);
1518 #else
1519 static inline void
qdf_dma_invalid_buf_list_init(void)1520 qdf_dma_invalid_buf_list_init(void)
1521 {
1522 }
1523 
1524 static inline void
qdf_dma_invalid_buf_list_deinit(void)1525 qdf_dma_invalid_buf_list_deinit(void)
1526 {
1527 }
1528 
1529 static inline void
qdf_dma_invalid_buf_free(void * dev,uint8_t domain)1530 qdf_dma_invalid_buf_free(void *dev, uint8_t domain)
1531 {
1532 }
1533 #endif /* QCA_DMA_PADDR_CHECK */
1534 #endif /* __QDF_MEMORY_H */
1535