xref: /wlan-driver/qca-wifi-host-cmn/qdf/linux/src/qdf_mem.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  * DOC: qdf_mem
22*5113495bSYour Name  * This file provides OS dependent memory management APIs
23*5113495bSYour Name  */
24*5113495bSYour Name 
25*5113495bSYour Name #include "qdf_debugfs.h"
26*5113495bSYour Name #include "qdf_mem.h"
27*5113495bSYour Name #include "qdf_nbuf.h"
28*5113495bSYour Name #include "qdf_lock.h"
29*5113495bSYour Name #include "qdf_mc_timer.h"
30*5113495bSYour Name #include "qdf_module.h"
31*5113495bSYour Name #include <qdf_trace.h>
32*5113495bSYour Name #include "qdf_str.h"
33*5113495bSYour Name #include "qdf_talloc.h"
34*5113495bSYour Name #include <linux/debugfs.h>
35*5113495bSYour Name #include <linux/seq_file.h>
36*5113495bSYour Name #include <linux/string.h>
37*5113495bSYour Name #include <qdf_list.h>
38*5113495bSYour Name 
39*5113495bSYour Name #ifdef CNSS_MEM_PRE_ALLOC
40*5113495bSYour Name #ifdef CONFIG_CNSS_OUT_OF_TREE
41*5113495bSYour Name #include "cnss_prealloc.h"
42*5113495bSYour Name #else
43*5113495bSYour Name #include <net/cnss_prealloc.h>
44*5113495bSYour Name #endif
45*5113495bSYour Name #endif
46*5113495bSYour Name 
47*5113495bSYour Name #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
48*5113495bSYour Name static bool mem_debug_disabled;
49*5113495bSYour Name qdf_declare_param(mem_debug_disabled, bool);
50*5113495bSYour Name #endif
51*5113495bSYour Name 
52*5113495bSYour Name #ifdef MEMORY_DEBUG
53*5113495bSYour Name static bool is_initial_mem_debug_disabled;
54*5113495bSYour Name #endif
55*5113495bSYour Name 
56*5113495bSYour Name /* Preprocessor Definitions and Constants */
57*5113495bSYour Name #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
58*5113495bSYour Name #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
59*5113495bSYour Name #define QDF_DEBUG_STRING_SIZE 512
60*5113495bSYour Name 
61*5113495bSYour Name /**
62*5113495bSYour Name  * struct __qdf_mem_stat - qdf memory statistics
63*5113495bSYour Name  * @kmalloc: total kmalloc allocations
64*5113495bSYour Name  * @dma: total dma allocations
65*5113495bSYour Name  * @skb: total skb allocations
66*5113495bSYour Name  * @skb_total: total skb allocations in host driver
67*5113495bSYour Name  * @dp_tx_skb: total Tx skb allocations in datapath
68*5113495bSYour Name  * @dp_rx_skb: total Rx skb allocations in datapath
69*5113495bSYour Name  * @skb_mem_max: high watermark for skb allocations
70*5113495bSYour Name  * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
71*5113495bSYour Name  * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
72*5113495bSYour Name  * @dp_tx_skb_count: DP Tx buffer count
73*5113495bSYour Name  * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
74*5113495bSYour Name  * @dp_rx_skb_count: DP Rx buffer count
75*5113495bSYour Name  * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
76*5113495bSYour Name  * @tx_descs_outstanding: Current pending Tx descs count
77*5113495bSYour Name  * @tx_descs_max: High watermark for pending Tx descs count
78*5113495bSYour Name  */
79*5113495bSYour Name static struct __qdf_mem_stat {
80*5113495bSYour Name 	qdf_atomic_t kmalloc;
81*5113495bSYour Name 	qdf_atomic_t dma;
82*5113495bSYour Name 	qdf_atomic_t skb;
83*5113495bSYour Name 	qdf_atomic_t skb_total;
84*5113495bSYour Name 	qdf_atomic_t dp_tx_skb;
85*5113495bSYour Name 	qdf_atomic_t dp_rx_skb;
86*5113495bSYour Name 	int32_t skb_mem_max;
87*5113495bSYour Name 	int32_t dp_tx_skb_mem_max;
88*5113495bSYour Name 	int32_t dp_rx_skb_mem_max;
89*5113495bSYour Name 	qdf_atomic_t dp_tx_skb_count;
90*5113495bSYour Name 	int32_t dp_tx_skb_count_max;
91*5113495bSYour Name 	qdf_atomic_t dp_rx_skb_count;
92*5113495bSYour Name 	int32_t dp_rx_skb_count_max;
93*5113495bSYour Name 	qdf_atomic_t tx_descs_outstanding;
94*5113495bSYour Name 	int32_t tx_descs_max;
95*5113495bSYour Name } qdf_mem_stat;
96*5113495bSYour Name 
97*5113495bSYour Name #ifdef MEMORY_DEBUG
98*5113495bSYour Name #include "qdf_debug_domain.h"
99*5113495bSYour Name 
100*5113495bSYour Name enum list_type {
101*5113495bSYour Name 	LIST_TYPE_MEM = 0,
102*5113495bSYour Name 	LIST_TYPE_DMA = 1,
103*5113495bSYour Name 	LIST_TYPE_NBUF = 2,
104*5113495bSYour Name 	LIST_TYPE_MAX,
105*5113495bSYour Name };
106*5113495bSYour Name 
107*5113495bSYour Name /**
108*5113495bSYour Name  * struct major_alloc_priv - private data registered to debugfs entry
109*5113495bSYour Name  *                           created to list the list major allocations
110*5113495bSYour Name  * @type:            type of the list to be parsed
111*5113495bSYour Name  * @threshold:       configured by user by overwriting the respective debugfs
112*5113495bSYour Name  *                   sys entry. This is to list the functions which requested
113*5113495bSYour Name  *                   memory/dma allocations more than threshold number of times.
114*5113495bSYour Name  */
115*5113495bSYour Name struct major_alloc_priv {
116*5113495bSYour Name 	enum list_type type;
117*5113495bSYour Name 	uint32_t threshold;
118*5113495bSYour Name };
119*5113495bSYour Name 
120*5113495bSYour Name static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
121*5113495bSYour Name static qdf_spinlock_t qdf_mem_list_lock;
122*5113495bSYour Name 
123*5113495bSYour Name static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
124*5113495bSYour Name static qdf_spinlock_t qdf_mem_dma_list_lock;
125*5113495bSYour Name 
qdf_mem_list_get(enum qdf_debug_domain domain)126*5113495bSYour Name static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
127*5113495bSYour Name {
128*5113495bSYour Name 	return &qdf_mem_domains[domain];
129*5113495bSYour Name }
130*5113495bSYour Name 
qdf_mem_dma_list(enum qdf_debug_domain domain)131*5113495bSYour Name static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
132*5113495bSYour Name {
133*5113495bSYour Name 	return &qdf_mem_dma_domains[domain];
134*5113495bSYour Name }
135*5113495bSYour Name 
136*5113495bSYour Name /**
137*5113495bSYour Name  * struct qdf_mem_header - memory object to dubug
138*5113495bSYour Name  * @node: node to the list
139*5113495bSYour Name  * @domain: the active memory domain at time of allocation
140*5113495bSYour Name  * @freed: flag set during free, used to detect double frees
141*5113495bSYour Name  *	Use uint8_t so we can detect corruption
142*5113495bSYour Name  * @func: name of the function the allocation was made from
143*5113495bSYour Name  * @line: line number of the file the allocation was made from
144*5113495bSYour Name  * @size: size of the allocation in bytes
145*5113495bSYour Name  * @caller: Caller of the function for which memory is allocated
146*5113495bSYour Name  * @header: a known value, used to detect out-of-bounds access
147*5113495bSYour Name  * @time: timestamp at which allocation was made
148*5113495bSYour Name  */
149*5113495bSYour Name struct qdf_mem_header {
150*5113495bSYour Name 	qdf_list_node_t node;
151*5113495bSYour Name 	enum qdf_debug_domain domain;
152*5113495bSYour Name 	uint8_t freed;
153*5113495bSYour Name 	char func[QDF_MEM_FUNC_NAME_SIZE];
154*5113495bSYour Name 	uint32_t line;
155*5113495bSYour Name 	uint32_t size;
156*5113495bSYour Name 	void *caller;
157*5113495bSYour Name 	uint64_t header;
158*5113495bSYour Name 	uint64_t time;
159*5113495bSYour Name };
160*5113495bSYour Name 
161*5113495bSYour Name /* align the qdf_mem_header to 8 bytes */
162*5113495bSYour Name #define QDF_DMA_MEM_HEADER_ALIGN 8
163*5113495bSYour Name 
164*5113495bSYour Name static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
165*5113495bSYour Name static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
166*5113495bSYour Name 
qdf_mem_get_header(void * ptr)167*5113495bSYour Name static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
168*5113495bSYour Name {
169*5113495bSYour Name 	return (struct qdf_mem_header *)ptr - 1;
170*5113495bSYour Name }
171*5113495bSYour Name 
172*5113495bSYour Name /* make sure the header pointer is 8bytes aligned */
qdf_mem_dma_get_header(void * ptr,qdf_size_t size)173*5113495bSYour Name static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
174*5113495bSYour Name 							    qdf_size_t size)
175*5113495bSYour Name {
176*5113495bSYour Name 	return (struct qdf_mem_header *)
177*5113495bSYour Name 				qdf_roundup((size_t)((uint8_t *)ptr + size),
178*5113495bSYour Name 					    QDF_DMA_MEM_HEADER_ALIGN);
179*5113495bSYour Name }
180*5113495bSYour Name 
qdf_mem_get_trailer(struct qdf_mem_header * header)181*5113495bSYour Name static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
182*5113495bSYour Name {
183*5113495bSYour Name 	return (uint64_t *)((void *)(header + 1) + header->size);
184*5113495bSYour Name }
185*5113495bSYour Name 
qdf_mem_get_ptr(struct qdf_mem_header * header)186*5113495bSYour Name static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
187*5113495bSYour Name {
188*5113495bSYour Name 	return (void *)(header + 1);
189*5113495bSYour Name }
190*5113495bSYour Name 
191*5113495bSYour Name /* number of bytes needed for the qdf memory debug information */
192*5113495bSYour Name #define QDF_MEM_DEBUG_SIZE \
193*5113495bSYour Name 	(sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
194*5113495bSYour Name 
195*5113495bSYour Name /* number of bytes needed for the qdf dma memory debug information */
196*5113495bSYour Name #define QDF_DMA_MEM_DEBUG_SIZE \
197*5113495bSYour Name 	(sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
198*5113495bSYour Name 
qdf_mem_trailer_init(struct qdf_mem_header * header)199*5113495bSYour Name static void qdf_mem_trailer_init(struct qdf_mem_header *header)
200*5113495bSYour Name {
201*5113495bSYour Name 	QDF_BUG(header);
202*5113495bSYour Name 	if (!header)
203*5113495bSYour Name 		return;
204*5113495bSYour Name 	*qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
205*5113495bSYour Name }
206*5113495bSYour Name 
qdf_mem_header_init(struct qdf_mem_header * header,qdf_size_t size,const char * func,uint32_t line,void * caller)207*5113495bSYour Name static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
208*5113495bSYour Name 				const char *func, uint32_t line, void *caller)
209*5113495bSYour Name {
210*5113495bSYour Name 	QDF_BUG(header);
211*5113495bSYour Name 	if (!header)
212*5113495bSYour Name 		return;
213*5113495bSYour Name 
214*5113495bSYour Name 	header->domain = qdf_debug_domain_get();
215*5113495bSYour Name 	header->freed = false;
216*5113495bSYour Name 
217*5113495bSYour Name 	qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
218*5113495bSYour Name 
219*5113495bSYour Name 	header->line = line;
220*5113495bSYour Name 	header->size = size;
221*5113495bSYour Name 	header->caller = caller;
222*5113495bSYour Name 	header->header = WLAN_MEM_HEADER;
223*5113495bSYour Name 	header->time = qdf_get_log_timestamp();
224*5113495bSYour Name }
225*5113495bSYour Name 
226*5113495bSYour Name enum qdf_mem_validation_bitmap {
227*5113495bSYour Name 	QDF_MEM_BAD_HEADER = 1 << 0,
228*5113495bSYour Name 	QDF_MEM_BAD_TRAILER = 1 << 1,
229*5113495bSYour Name 	QDF_MEM_BAD_SIZE = 1 << 2,
230*5113495bSYour Name 	QDF_MEM_DOUBLE_FREE = 1 << 3,
231*5113495bSYour Name 	QDF_MEM_BAD_FREED = 1 << 4,
232*5113495bSYour Name 	QDF_MEM_BAD_NODE = 1 << 5,
233*5113495bSYour Name 	QDF_MEM_BAD_DOMAIN = 1 << 6,
234*5113495bSYour Name 	QDF_MEM_WRONG_DOMAIN = 1 << 7,
235*5113495bSYour Name };
236*5113495bSYour Name 
237*5113495bSYour Name static enum qdf_mem_validation_bitmap
qdf_mem_trailer_validate(struct qdf_mem_header * header)238*5113495bSYour Name qdf_mem_trailer_validate(struct qdf_mem_header *header)
239*5113495bSYour Name {
240*5113495bSYour Name 	enum qdf_mem_validation_bitmap error_bitmap = 0;
241*5113495bSYour Name 
242*5113495bSYour Name 	if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
243*5113495bSYour Name 		error_bitmap |= QDF_MEM_BAD_TRAILER;
244*5113495bSYour Name 	return error_bitmap;
245*5113495bSYour Name }
246*5113495bSYour Name 
247*5113495bSYour Name static enum qdf_mem_validation_bitmap
qdf_mem_header_validate(struct qdf_mem_header * header,enum qdf_debug_domain domain)248*5113495bSYour Name qdf_mem_header_validate(struct qdf_mem_header *header,
249*5113495bSYour Name 			enum qdf_debug_domain domain)
250*5113495bSYour Name {
251*5113495bSYour Name 	enum qdf_mem_validation_bitmap error_bitmap = 0;
252*5113495bSYour Name 
253*5113495bSYour Name 	if (header->header != WLAN_MEM_HEADER)
254*5113495bSYour Name 		error_bitmap |= QDF_MEM_BAD_HEADER;
255*5113495bSYour Name 
256*5113495bSYour Name 	if (header->size > QDF_MEM_MAX_MALLOC)
257*5113495bSYour Name 		error_bitmap |= QDF_MEM_BAD_SIZE;
258*5113495bSYour Name 
259*5113495bSYour Name 	if (header->freed == true)
260*5113495bSYour Name 		error_bitmap |= QDF_MEM_DOUBLE_FREE;
261*5113495bSYour Name 	else if (header->freed)
262*5113495bSYour Name 		error_bitmap |= QDF_MEM_BAD_FREED;
263*5113495bSYour Name 
264*5113495bSYour Name 	if (!qdf_list_node_in_any_list(&header->node))
265*5113495bSYour Name 		error_bitmap |= QDF_MEM_BAD_NODE;
266*5113495bSYour Name 
267*5113495bSYour Name 	if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
268*5113495bSYour Name 	    header->domain >= QDF_DEBUG_DOMAIN_COUNT)
269*5113495bSYour Name 		error_bitmap |= QDF_MEM_BAD_DOMAIN;
270*5113495bSYour Name 	else if (header->domain != domain)
271*5113495bSYour Name 		error_bitmap |= QDF_MEM_WRONG_DOMAIN;
272*5113495bSYour Name 
273*5113495bSYour Name 	return error_bitmap;
274*5113495bSYour Name }
275*5113495bSYour Name 
276*5113495bSYour Name static void
qdf_mem_header_assert_valid(struct qdf_mem_header * header,enum qdf_debug_domain current_domain,enum qdf_mem_validation_bitmap error_bitmap,const char * func,uint32_t line)277*5113495bSYour Name qdf_mem_header_assert_valid(struct qdf_mem_header *header,
278*5113495bSYour Name 			    enum qdf_debug_domain current_domain,
279*5113495bSYour Name 			    enum qdf_mem_validation_bitmap error_bitmap,
280*5113495bSYour Name 			    const char *func,
281*5113495bSYour Name 			    uint32_t line)
282*5113495bSYour Name {
283*5113495bSYour Name 	if (!error_bitmap)
284*5113495bSYour Name 		return;
285*5113495bSYour Name 
286*5113495bSYour Name 	if (error_bitmap & QDF_MEM_BAD_HEADER)
287*5113495bSYour Name 		qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
288*5113495bSYour Name 			header->header, WLAN_MEM_HEADER);
289*5113495bSYour Name 
290*5113495bSYour Name 	if (error_bitmap & QDF_MEM_BAD_SIZE)
291*5113495bSYour Name 		qdf_err("Corrupted memory size %u (expected < %d)",
292*5113495bSYour Name 			header->size, QDF_MEM_MAX_MALLOC);
293*5113495bSYour Name 
294*5113495bSYour Name 	if (error_bitmap & QDF_MEM_BAD_TRAILER)
295*5113495bSYour Name 		qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
296*5113495bSYour Name 			*qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
297*5113495bSYour Name 
298*5113495bSYour Name 	if (error_bitmap & QDF_MEM_DOUBLE_FREE)
299*5113495bSYour Name 		qdf_err("Memory has previously been freed");
300*5113495bSYour Name 
301*5113495bSYour Name 	if (error_bitmap & QDF_MEM_BAD_FREED)
302*5113495bSYour Name 		qdf_err("Corrupted memory freed flag 0x%x", header->freed);
303*5113495bSYour Name 
304*5113495bSYour Name 	if (error_bitmap & QDF_MEM_BAD_NODE)
305*5113495bSYour Name 		qdf_err("Corrupted memory header node or double free");
306*5113495bSYour Name 
307*5113495bSYour Name 	if (error_bitmap & QDF_MEM_BAD_DOMAIN)
308*5113495bSYour Name 		qdf_err("Corrupted memory domain 0x%x", header->domain);
309*5113495bSYour Name 
310*5113495bSYour Name 	if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
311*5113495bSYour Name 		qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
312*5113495bSYour Name 			qdf_debug_domain_name(header->domain), header->domain,
313*5113495bSYour Name 			qdf_debug_domain_name(current_domain), current_domain);
314*5113495bSYour Name 
315*5113495bSYour Name 	QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
316*5113495bSYour Name }
317*5113495bSYour Name 
318*5113495bSYour Name /**
319*5113495bSYour Name  * struct __qdf_mem_info - memory statistics
320*5113495bSYour Name  * @func: the function which allocated memory
321*5113495bSYour Name  * @line: the line at which allocation happened
322*5113495bSYour Name  * @size: the size of allocation
323*5113495bSYour Name  * @caller: Address of the caller function
324*5113495bSYour Name  * @count: how many allocations of same type
325*5113495bSYour Name  * @time: timestamp at which allocation happened
326*5113495bSYour Name  */
327*5113495bSYour Name struct __qdf_mem_info {
328*5113495bSYour Name 	char func[QDF_MEM_FUNC_NAME_SIZE];
329*5113495bSYour Name 	uint32_t line;
330*5113495bSYour Name 	uint32_t size;
331*5113495bSYour Name 	void *caller;
332*5113495bSYour Name 	uint32_t count;
333*5113495bSYour Name 	uint64_t time;
334*5113495bSYour Name };
335*5113495bSYour Name 
336*5113495bSYour Name /*
337*5113495bSYour Name  * The table depth defines the de-duplication proximity scope.
338*5113495bSYour Name  * A deeper table takes more time, so choose any optimum value.
339*5113495bSYour Name  */
340*5113495bSYour Name #define QDF_MEM_STAT_TABLE_SIZE 8
341*5113495bSYour Name 
342*5113495bSYour Name /**
343*5113495bSYour Name  * qdf_mem_debug_print_header() - memory debug header print logic
344*5113495bSYour Name  * @print: the print adapter function
345*5113495bSYour Name  * @print_priv: the private data to be consumed by @print
346*5113495bSYour Name  * @threshold: the threshold value set by user to list top allocations
347*5113495bSYour Name  *
348*5113495bSYour Name  * Return: None
349*5113495bSYour Name  */
qdf_mem_debug_print_header(qdf_abstract_print print,void * print_priv,uint32_t threshold)350*5113495bSYour Name static void qdf_mem_debug_print_header(qdf_abstract_print print,
351*5113495bSYour Name 				       void *print_priv,
352*5113495bSYour Name 				       uint32_t threshold)
353*5113495bSYour Name {
354*5113495bSYour Name 	if (threshold)
355*5113495bSYour Name 		print(print_priv, "APIs requested allocations >= %u no of time",
356*5113495bSYour Name 		      threshold);
357*5113495bSYour Name 	print(print_priv,
358*5113495bSYour Name 	      "--------------------------------------------------------------");
359*5113495bSYour Name 	print(print_priv,
360*5113495bSYour Name 	      " count    size     total    filename     caller    timestamp");
361*5113495bSYour Name 	print(print_priv,
362*5113495bSYour Name 	      "--------------------------------------------------------------");
363*5113495bSYour Name }
364*5113495bSYour Name 
365*5113495bSYour Name /**
366*5113495bSYour Name  * qdf_mem_meta_table_insert() - insert memory metadata into the given table
367*5113495bSYour Name  * @table: the memory metadata table to insert into
368*5113495bSYour Name  * @meta: the memory metadata to insert
369*5113495bSYour Name  *
370*5113495bSYour Name  * Return: true if the table is full after inserting, false otherwise
371*5113495bSYour Name  */
qdf_mem_meta_table_insert(struct __qdf_mem_info * table,struct qdf_mem_header * meta)372*5113495bSYour Name static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
373*5113495bSYour Name 				      struct qdf_mem_header *meta)
374*5113495bSYour Name {
375*5113495bSYour Name 	int i;
376*5113495bSYour Name 
377*5113495bSYour Name 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
378*5113495bSYour Name 		if (!table[i].count) {
379*5113495bSYour Name 			qdf_str_lcopy(table[i].func, meta->func,
380*5113495bSYour Name 				      QDF_MEM_FUNC_NAME_SIZE);
381*5113495bSYour Name 			table[i].line = meta->line;
382*5113495bSYour Name 			table[i].size = meta->size;
383*5113495bSYour Name 			table[i].count = 1;
384*5113495bSYour Name 			table[i].caller = meta->caller;
385*5113495bSYour Name 			table[i].time = meta->time;
386*5113495bSYour Name 			break;
387*5113495bSYour Name 		}
388*5113495bSYour Name 
389*5113495bSYour Name 		if (qdf_str_eq(table[i].func, meta->func) &&
390*5113495bSYour Name 		    table[i].line == meta->line &&
391*5113495bSYour Name 		    table[i].size == meta->size &&
392*5113495bSYour Name 		    table[i].caller == meta->caller) {
393*5113495bSYour Name 			table[i].count++;
394*5113495bSYour Name 			break;
395*5113495bSYour Name 		}
396*5113495bSYour Name 	}
397*5113495bSYour Name 
398*5113495bSYour Name 	/* return true if the table is now full */
399*5113495bSYour Name 	return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
400*5113495bSYour Name }
401*5113495bSYour Name 
402*5113495bSYour Name /**
403*5113495bSYour Name  * qdf_mem_domain_print() - output agnostic memory domain print logic
404*5113495bSYour Name  * @domain: the memory domain to print
405*5113495bSYour Name  * @print: the print adapter function
406*5113495bSYour Name  * @print_priv: the private data to be consumed by @print
407*5113495bSYour Name  * @threshold: the threshold value set by uset to list top allocations
408*5113495bSYour Name  * @mem_print: pointer to function which prints the memory allocation data
409*5113495bSYour Name  *
410*5113495bSYour Name  * Return: None
411*5113495bSYour Name  */
qdf_mem_domain_print(qdf_list_t * domain,qdf_abstract_print print,void * print_priv,uint32_t threshold,void (* mem_print)(struct __qdf_mem_info *,qdf_abstract_print,void *,uint32_t))412*5113495bSYour Name static void qdf_mem_domain_print(qdf_list_t *domain,
413*5113495bSYour Name 				 qdf_abstract_print print,
414*5113495bSYour Name 				 void *print_priv,
415*5113495bSYour Name 				 uint32_t threshold,
416*5113495bSYour Name 				 void (*mem_print)(struct __qdf_mem_info *,
417*5113495bSYour Name 						   qdf_abstract_print,
418*5113495bSYour Name 						   void *, uint32_t))
419*5113495bSYour Name {
420*5113495bSYour Name 	QDF_STATUS status;
421*5113495bSYour Name 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
422*5113495bSYour Name 	qdf_list_node_t *node;
423*5113495bSYour Name 
424*5113495bSYour Name 	qdf_mem_zero(table, sizeof(table));
425*5113495bSYour Name 	qdf_mem_debug_print_header(print, print_priv, threshold);
426*5113495bSYour Name 
427*5113495bSYour Name 	/* hold lock while inserting to avoid use-after free of the metadata */
428*5113495bSYour Name 	qdf_spin_lock(&qdf_mem_list_lock);
429*5113495bSYour Name 	status = qdf_list_peek_front(domain, &node);
430*5113495bSYour Name 	while (QDF_IS_STATUS_SUCCESS(status)) {
431*5113495bSYour Name 		struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
432*5113495bSYour Name 		bool is_full = qdf_mem_meta_table_insert(table, meta);
433*5113495bSYour Name 
434*5113495bSYour Name 		qdf_spin_unlock(&qdf_mem_list_lock);
435*5113495bSYour Name 
436*5113495bSYour Name 		if (is_full) {
437*5113495bSYour Name 			(*mem_print)(table, print, print_priv, threshold);
438*5113495bSYour Name 			qdf_mem_zero(table, sizeof(table));
439*5113495bSYour Name 		}
440*5113495bSYour Name 
441*5113495bSYour Name 		qdf_spin_lock(&qdf_mem_list_lock);
442*5113495bSYour Name 		status = qdf_list_peek_next(domain, node, &node);
443*5113495bSYour Name 	}
444*5113495bSYour Name 	qdf_spin_unlock(&qdf_mem_list_lock);
445*5113495bSYour Name 
446*5113495bSYour Name 	(*mem_print)(table, print, print_priv, threshold);
447*5113495bSYour Name }
448*5113495bSYour Name 
449*5113495bSYour Name /**
450*5113495bSYour Name  * qdf_mem_meta_table_print() - memory metadata table print logic
451*5113495bSYour Name  * @table: the memory metadata table to print
452*5113495bSYour Name  * @print: the print adapter function
453*5113495bSYour Name  * @print_priv: the private data to be consumed by @print
454*5113495bSYour Name  * @threshold: the threshold value set by user to list top allocations
455*5113495bSYour Name  *
456*5113495bSYour Name  * Return: None
457*5113495bSYour Name  */
qdf_mem_meta_table_print(struct __qdf_mem_info * table,qdf_abstract_print print,void * print_priv,uint32_t threshold)458*5113495bSYour Name static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
459*5113495bSYour Name 				     qdf_abstract_print print,
460*5113495bSYour Name 				     void *print_priv,
461*5113495bSYour Name 				     uint32_t threshold)
462*5113495bSYour Name {
463*5113495bSYour Name 	int i;
464*5113495bSYour Name 	char debug_str[QDF_DEBUG_STRING_SIZE];
465*5113495bSYour Name 	size_t len = 0;
466*5113495bSYour Name 	char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
467*5113495bSYour Name 
468*5113495bSYour Name 	len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
469*5113495bSYour Name 			     "%s", debug_prefix);
470*5113495bSYour Name 
471*5113495bSYour Name 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472*5113495bSYour Name 		if (!table[i].count)
473*5113495bSYour Name 			break;
474*5113495bSYour Name 
475*5113495bSYour Name 		print(print_priv,
476*5113495bSYour Name 		      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
477*5113495bSYour Name 		      table[i].count,
478*5113495bSYour Name 		      table[i].size,
479*5113495bSYour Name 		      table[i].count * table[i].size,
480*5113495bSYour Name 		      table[i].func,
481*5113495bSYour Name 		      table[i].line, table[i].caller,
482*5113495bSYour Name 		      table[i].time);
483*5113495bSYour Name 		len += qdf_scnprintf(debug_str + len,
484*5113495bSYour Name 				     sizeof(debug_str) - len,
485*5113495bSYour Name 				     " @ %s:%u %pS",
486*5113495bSYour Name 				     table[i].func,
487*5113495bSYour Name 				     table[i].line,
488*5113495bSYour Name 				     table[i].caller);
489*5113495bSYour Name 	}
490*5113495bSYour Name 	print(print_priv, "%s", debug_str);
491*5113495bSYour Name }
492*5113495bSYour Name 
qdf_err_printer(void * priv,const char * fmt,...)493*5113495bSYour Name static int qdf_err_printer(void *priv, const char *fmt, ...)
494*5113495bSYour Name {
495*5113495bSYour Name 	va_list args;
496*5113495bSYour Name 
497*5113495bSYour Name 	va_start(args, fmt);
498*5113495bSYour Name 	QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
499*5113495bSYour Name 	va_end(args);
500*5113495bSYour Name 
501*5113495bSYour Name 	return 0;
502*5113495bSYour Name }
503*5113495bSYour Name 
504*5113495bSYour Name #endif /* MEMORY_DEBUG */
505*5113495bSYour Name 
506*5113495bSYour Name bool prealloc_disabled = 1;
507*5113495bSYour Name qdf_declare_param(prealloc_disabled, bool);
508*5113495bSYour Name qdf_export_symbol(prealloc_disabled);
509*5113495bSYour Name 
qdf_mem_malloc_flags(void)510*5113495bSYour Name int qdf_mem_malloc_flags(void)
511*5113495bSYour Name {
512*5113495bSYour Name 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
513*5113495bSYour Name 		return GFP_ATOMIC;
514*5113495bSYour Name 
515*5113495bSYour Name 	return GFP_KERNEL;
516*5113495bSYour Name }
517*5113495bSYour Name 
518*5113495bSYour Name qdf_export_symbol(qdf_mem_malloc_flags);
519*5113495bSYour Name 
qdf_prealloc_disabled_config_get(void)520*5113495bSYour Name bool qdf_prealloc_disabled_config_get(void)
521*5113495bSYour Name {
522*5113495bSYour Name 	return prealloc_disabled;
523*5113495bSYour Name }
524*5113495bSYour Name 
525*5113495bSYour Name qdf_export_symbol(qdf_prealloc_disabled_config_get);
526*5113495bSYour Name 
527*5113495bSYour Name #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
qdf_prealloc_disabled_config_set(const char * str_value)528*5113495bSYour Name QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
529*5113495bSYour Name {
530*5113495bSYour Name 	QDF_STATUS status;
531*5113495bSYour Name 
532*5113495bSYour Name 	status = qdf_bool_parse(str_value, &prealloc_disabled);
533*5113495bSYour Name 	return status;
534*5113495bSYour Name }
535*5113495bSYour Name #endif
536*5113495bSYour Name 
537*5113495bSYour Name #if defined WLAN_DEBUGFS
538*5113495bSYour Name 
539*5113495bSYour Name /* Debugfs root directory for qdf_mem */
540*5113495bSYour Name static struct dentry *qdf_mem_debugfs_root;
541*5113495bSYour Name 
542*5113495bSYour Name #ifdef MEMORY_DEBUG
seq_printf_printer(void * priv,const char * fmt,...)543*5113495bSYour Name static int seq_printf_printer(void *priv, const char *fmt, ...)
544*5113495bSYour Name {
545*5113495bSYour Name 	struct seq_file *file = priv;
546*5113495bSYour Name 	va_list args;
547*5113495bSYour Name 
548*5113495bSYour Name 	va_start(args, fmt);
549*5113495bSYour Name 	seq_vprintf(file, fmt, args);
550*5113495bSYour Name 	seq_puts(file, "\n");
551*5113495bSYour Name 	va_end(args);
552*5113495bSYour Name 
553*5113495bSYour Name 	return 0;
554*5113495bSYour Name }
555*5113495bSYour Name 
556*5113495bSYour Name /**
557*5113495bSYour Name  * qdf_print_major_alloc() - memory metadata table print logic
558*5113495bSYour Name  * @table: the memory metadata table to print
559*5113495bSYour Name  * @print: the print adapter function
560*5113495bSYour Name  * @print_priv: the private data to be consumed by @print
561*5113495bSYour Name  * @threshold: the threshold value set by uset to list top allocations
562*5113495bSYour Name  *
563*5113495bSYour Name  * Return: None
564*5113495bSYour Name  */
qdf_print_major_alloc(struct __qdf_mem_info * table,qdf_abstract_print print,void * print_priv,uint32_t threshold)565*5113495bSYour Name static void qdf_print_major_alloc(struct __qdf_mem_info *table,
566*5113495bSYour Name 				  qdf_abstract_print print,
567*5113495bSYour Name 				  void *print_priv,
568*5113495bSYour Name 				  uint32_t threshold)
569*5113495bSYour Name {
570*5113495bSYour Name 	int i;
571*5113495bSYour Name 
572*5113495bSYour Name 	for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
573*5113495bSYour Name 		if (!table[i].count)
574*5113495bSYour Name 			break;
575*5113495bSYour Name 		if (table[i].count >= threshold)
576*5113495bSYour Name 			print(print_priv,
577*5113495bSYour Name 			      "%6u x %5u = %7uB @ %s:%u   %pS %llu",
578*5113495bSYour Name 			      table[i].count,
579*5113495bSYour Name 			      table[i].size,
580*5113495bSYour Name 			      table[i].count * table[i].size,
581*5113495bSYour Name 			      table[i].func,
582*5113495bSYour Name 			      table[i].line, table[i].caller,
583*5113495bSYour Name 			      table[i].time);
584*5113495bSYour Name 	}
585*5113495bSYour Name }
586*5113495bSYour Name 
587*5113495bSYour Name /**
588*5113495bSYour Name  * qdf_mem_seq_start() - sequential callback to start
589*5113495bSYour Name  * @seq: seq_file handle
590*5113495bSYour Name  * @pos: The start position of the sequence
591*5113495bSYour Name  *
592*5113495bSYour Name  * Return: iterator pointer, or NULL if iteration is complete
593*5113495bSYour Name  */
qdf_mem_seq_start(struct seq_file * seq,loff_t * pos)594*5113495bSYour Name static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
595*5113495bSYour Name {
596*5113495bSYour Name 	enum qdf_debug_domain domain = *pos;
597*5113495bSYour Name 
598*5113495bSYour Name 	if (!qdf_debug_domain_valid(domain))
599*5113495bSYour Name 		return NULL;
600*5113495bSYour Name 
601*5113495bSYour Name 	/* just use the current position as our iterator */
602*5113495bSYour Name 	return pos;
603*5113495bSYour Name }
604*5113495bSYour Name 
605*5113495bSYour Name /**
606*5113495bSYour Name  * qdf_mem_seq_next() - next sequential callback
607*5113495bSYour Name  * @seq: seq_file handle
608*5113495bSYour Name  * @v: the current iterator
609*5113495bSYour Name  * @pos: the current position
610*5113495bSYour Name  *
611*5113495bSYour Name  * Get the next node and release previous node.
612*5113495bSYour Name  *
613*5113495bSYour Name  * Return: iterator pointer, or NULL if iteration is complete
614*5113495bSYour Name  */
qdf_mem_seq_next(struct seq_file * seq,void * v,loff_t * pos)615*5113495bSYour Name static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
616*5113495bSYour Name {
617*5113495bSYour Name 	++*pos;
618*5113495bSYour Name 
619*5113495bSYour Name 	return qdf_mem_seq_start(seq, pos);
620*5113495bSYour Name }
621*5113495bSYour Name 
622*5113495bSYour Name /**
623*5113495bSYour Name  * qdf_mem_seq_stop() - stop sequential callback
624*5113495bSYour Name  * @seq: seq_file handle
625*5113495bSYour Name  * @v: current iterator
626*5113495bSYour Name  *
627*5113495bSYour Name  * Return: None
628*5113495bSYour Name  */
qdf_mem_seq_stop(struct seq_file * seq,void * v)629*5113495bSYour Name static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
630*5113495bSYour Name 
631*5113495bSYour Name /**
632*5113495bSYour Name  * qdf_mem_seq_show() - print sequential callback
633*5113495bSYour Name  * @seq: seq_file handle
634*5113495bSYour Name  * @v: current iterator
635*5113495bSYour Name  *
636*5113495bSYour Name  * Return: 0 - success
637*5113495bSYour Name  */
qdf_mem_seq_show(struct seq_file * seq,void * v)638*5113495bSYour Name static int qdf_mem_seq_show(struct seq_file *seq, void *v)
639*5113495bSYour Name {
640*5113495bSYour Name 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
641*5113495bSYour Name 
642*5113495bSYour Name 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
643*5113495bSYour Name 		   qdf_debug_domain_name(domain_id), domain_id);
644*5113495bSYour Name 	qdf_mem_domain_print(qdf_mem_list_get(domain_id),
645*5113495bSYour Name 			     seq_printf_printer,
646*5113495bSYour Name 			     seq,
647*5113495bSYour Name 			     0,
648*5113495bSYour Name 			     qdf_mem_meta_table_print);
649*5113495bSYour Name 
650*5113495bSYour Name 	return 0;
651*5113495bSYour Name }
652*5113495bSYour Name 
653*5113495bSYour Name /* sequential file operation table */
654*5113495bSYour Name static const struct seq_operations qdf_mem_seq_ops = {
655*5113495bSYour Name 	.start = qdf_mem_seq_start,
656*5113495bSYour Name 	.next  = qdf_mem_seq_next,
657*5113495bSYour Name 	.stop  = qdf_mem_seq_stop,
658*5113495bSYour Name 	.show  = qdf_mem_seq_show,
659*5113495bSYour Name };
660*5113495bSYour Name 
661*5113495bSYour Name 
qdf_mem_debugfs_open(struct inode * inode,struct file * file)662*5113495bSYour Name static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
663*5113495bSYour Name {
664*5113495bSYour Name 	return seq_open(file, &qdf_mem_seq_ops);
665*5113495bSYour Name }
666*5113495bSYour Name 
667*5113495bSYour Name /**
668*5113495bSYour Name  * qdf_major_alloc_show() - print sequential callback
669*5113495bSYour Name  * @seq: seq_file handle
670*5113495bSYour Name  * @v: current iterator
671*5113495bSYour Name  *
672*5113495bSYour Name  * Return: 0 - success
673*5113495bSYour Name  */
qdf_major_alloc_show(struct seq_file * seq,void * v)674*5113495bSYour Name static int qdf_major_alloc_show(struct seq_file *seq, void *v)
675*5113495bSYour Name {
676*5113495bSYour Name 	enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
677*5113495bSYour Name 	struct major_alloc_priv *priv;
678*5113495bSYour Name 	qdf_list_t *list;
679*5113495bSYour Name 
680*5113495bSYour Name 	priv = (struct major_alloc_priv *)seq->private;
681*5113495bSYour Name 	seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
682*5113495bSYour Name 		   qdf_debug_domain_name(domain_id), domain_id);
683*5113495bSYour Name 
684*5113495bSYour Name 	switch (priv->type) {
685*5113495bSYour Name 	case LIST_TYPE_MEM:
686*5113495bSYour Name 		list = qdf_mem_list_get(domain_id);
687*5113495bSYour Name 		break;
688*5113495bSYour Name 	case LIST_TYPE_DMA:
689*5113495bSYour Name 		list = qdf_mem_dma_list(domain_id);
690*5113495bSYour Name 		break;
691*5113495bSYour Name 	default:
692*5113495bSYour Name 		list = NULL;
693*5113495bSYour Name 		break;
694*5113495bSYour Name 	}
695*5113495bSYour Name 
696*5113495bSYour Name 	if (list)
697*5113495bSYour Name 		qdf_mem_domain_print(list,
698*5113495bSYour Name 				     seq_printf_printer,
699*5113495bSYour Name 				     seq,
700*5113495bSYour Name 				     priv->threshold,
701*5113495bSYour Name 				     qdf_print_major_alloc);
702*5113495bSYour Name 
703*5113495bSYour Name 	return 0;
704*5113495bSYour Name }
705*5113495bSYour Name 
706*5113495bSYour Name /* sequential file operation table created to track major allocs */
707*5113495bSYour Name static const struct seq_operations qdf_major_allocs_seq_ops = {
708*5113495bSYour Name 	.start = qdf_mem_seq_start,
709*5113495bSYour Name 	.next = qdf_mem_seq_next,
710*5113495bSYour Name 	.stop = qdf_mem_seq_stop,
711*5113495bSYour Name 	.show = qdf_major_alloc_show,
712*5113495bSYour Name };
713*5113495bSYour Name 
qdf_major_allocs_open(struct inode * inode,struct file * file)714*5113495bSYour Name static int qdf_major_allocs_open(struct inode *inode, struct file *file)
715*5113495bSYour Name {
716*5113495bSYour Name 	void *private = inode->i_private;
717*5113495bSYour Name 	struct seq_file *seq;
718*5113495bSYour Name 	int rc;
719*5113495bSYour Name 
720*5113495bSYour Name 	rc = seq_open(file, &qdf_major_allocs_seq_ops);
721*5113495bSYour Name 	if (rc == 0) {
722*5113495bSYour Name 		seq = file->private_data;
723*5113495bSYour Name 		seq->private = private;
724*5113495bSYour Name 	}
725*5113495bSYour Name 	return rc;
726*5113495bSYour Name }
727*5113495bSYour Name 
qdf_major_alloc_set_threshold(struct file * file,const char __user * user_buf,size_t count,loff_t * pos)728*5113495bSYour Name static ssize_t qdf_major_alloc_set_threshold(struct file *file,
729*5113495bSYour Name 					     const char __user *user_buf,
730*5113495bSYour Name 					     size_t count,
731*5113495bSYour Name 					     loff_t *pos)
732*5113495bSYour Name {
733*5113495bSYour Name 	char buf[32];
734*5113495bSYour Name 	ssize_t buf_size;
735*5113495bSYour Name 	uint32_t threshold;
736*5113495bSYour Name 	struct seq_file *seq = file->private_data;
737*5113495bSYour Name 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
738*5113495bSYour Name 
739*5113495bSYour Name 	buf_size = min(count, (sizeof(buf) - 1));
740*5113495bSYour Name 	if (buf_size <= 0)
741*5113495bSYour Name 		return 0;
742*5113495bSYour Name 	if (copy_from_user(buf, user_buf, buf_size))
743*5113495bSYour Name 		return -EFAULT;
744*5113495bSYour Name 	buf[buf_size] = '\0';
745*5113495bSYour Name 	if (!kstrtou32(buf, 10, &threshold))
746*5113495bSYour Name 		priv->threshold = threshold;
747*5113495bSYour Name 	return buf_size;
748*5113495bSYour Name }
749*5113495bSYour Name 
750*5113495bSYour Name /**
751*5113495bSYour Name  * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
752*5113495bSYour Name  * @threshold: the threshold value set by uset to list top allocations
753*5113495bSYour Name  * @print: the print adapter function
754*5113495bSYour Name  * @print_priv: the private data to be consumed by @print
755*5113495bSYour Name  * @mem_print: pointer to function which prints the memory allocation data
756*5113495bSYour Name  *
757*5113495bSYour Name  * Return: None
758*5113495bSYour Name  */
759*5113495bSYour Name static void
qdf_print_major_nbuf_allocs(uint32_t threshold,qdf_abstract_print print,void * print_priv,void (* mem_print)(struct __qdf_mem_info *,qdf_abstract_print,void *,uint32_t))760*5113495bSYour Name qdf_print_major_nbuf_allocs(uint32_t threshold,
761*5113495bSYour Name 			    qdf_abstract_print print,
762*5113495bSYour Name 			    void *print_priv,
763*5113495bSYour Name 			    void (*mem_print)(struct __qdf_mem_info *,
764*5113495bSYour Name 					      qdf_abstract_print,
765*5113495bSYour Name 					      void *, uint32_t))
766*5113495bSYour Name {
767*5113495bSYour Name 	uint32_t nbuf_iter;
768*5113495bSYour Name 	unsigned long irq_flag = 0;
769*5113495bSYour Name 	QDF_NBUF_TRACK *p_node;
770*5113495bSYour Name 	struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
771*5113495bSYour Name 	struct qdf_mem_header meta;
772*5113495bSYour Name 	bool is_full;
773*5113495bSYour Name 
774*5113495bSYour Name 	qdf_mem_zero(table, sizeof(table));
775*5113495bSYour Name 	qdf_mem_debug_print_header(print, print_priv, threshold);
776*5113495bSYour Name 
777*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
778*5113495bSYour Name 		return;
779*5113495bSYour Name 
780*5113495bSYour Name 	qdf_rl_info("major nbuf print with threshold %u", threshold);
781*5113495bSYour Name 
782*5113495bSYour Name 	for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
783*5113495bSYour Name 	     nbuf_iter++) {
784*5113495bSYour Name 		qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
785*5113495bSYour Name 		p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
786*5113495bSYour Name 		while (p_node) {
787*5113495bSYour Name 			meta.line = p_node->line_num;
788*5113495bSYour Name 			meta.size = p_node->size;
789*5113495bSYour Name 			meta.caller = NULL;
790*5113495bSYour Name 			meta.time = p_node->time;
791*5113495bSYour Name 			qdf_str_lcopy(meta.func, p_node->func_name,
792*5113495bSYour Name 				      QDF_MEM_FUNC_NAME_SIZE);
793*5113495bSYour Name 
794*5113495bSYour Name 			is_full = qdf_mem_meta_table_insert(table, &meta);
795*5113495bSYour Name 
796*5113495bSYour Name 			if (is_full) {
797*5113495bSYour Name 				(*mem_print)(table, print,
798*5113495bSYour Name 					     print_priv, threshold);
799*5113495bSYour Name 				qdf_mem_zero(table, sizeof(table));
800*5113495bSYour Name 			}
801*5113495bSYour Name 
802*5113495bSYour Name 			p_node = p_node->p_next;
803*5113495bSYour Name 		}
804*5113495bSYour Name 		qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
805*5113495bSYour Name 	}
806*5113495bSYour Name 
807*5113495bSYour Name 	(*mem_print)(table, print, print_priv, threshold);
808*5113495bSYour Name 
809*5113495bSYour Name 	qdf_rl_info("major nbuf print end");
810*5113495bSYour Name }
811*5113495bSYour Name 
812*5113495bSYour Name /**
813*5113495bSYour Name  * qdf_major_nbuf_alloc_show() - print sequential callback
814*5113495bSYour Name  * @seq: seq_file handle
815*5113495bSYour Name  * @v: current iterator
816*5113495bSYour Name  *
817*5113495bSYour Name  * Return: 0 - success
818*5113495bSYour Name  */
qdf_major_nbuf_alloc_show(struct seq_file * seq,void * v)819*5113495bSYour Name static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
820*5113495bSYour Name {
821*5113495bSYour Name 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
822*5113495bSYour Name 
823*5113495bSYour Name 	if (!priv) {
824*5113495bSYour Name 		qdf_err("priv is null");
825*5113495bSYour Name 		return -EINVAL;
826*5113495bSYour Name 	}
827*5113495bSYour Name 
828*5113495bSYour Name 	qdf_print_major_nbuf_allocs(priv->threshold,
829*5113495bSYour Name 				    seq_printf_printer,
830*5113495bSYour Name 				    seq,
831*5113495bSYour Name 				    qdf_print_major_alloc);
832*5113495bSYour Name 
833*5113495bSYour Name 	return 0;
834*5113495bSYour Name }
835*5113495bSYour Name 
836*5113495bSYour Name /**
837*5113495bSYour Name  * qdf_nbuf_seq_start() - sequential callback to start
838*5113495bSYour Name  * @seq: seq_file handle
839*5113495bSYour Name  * @pos: The start position of the sequence
840*5113495bSYour Name  *
841*5113495bSYour Name  * Return: iterator pointer, or NULL if iteration is complete
842*5113495bSYour Name  */
qdf_nbuf_seq_start(struct seq_file * seq,loff_t * pos)843*5113495bSYour Name static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
844*5113495bSYour Name {
845*5113495bSYour Name 	enum qdf_debug_domain domain = *pos;
846*5113495bSYour Name 
847*5113495bSYour Name 	if (domain > QDF_DEBUG_NBUF_DOMAIN)
848*5113495bSYour Name 		return NULL;
849*5113495bSYour Name 
850*5113495bSYour Name 	return pos;
851*5113495bSYour Name }
852*5113495bSYour Name 
853*5113495bSYour Name /**
854*5113495bSYour Name  * qdf_nbuf_seq_next() - next sequential callback
855*5113495bSYour Name  * @seq: seq_file handle
856*5113495bSYour Name  * @v: the current iterator
857*5113495bSYour Name  * @pos: the current position
858*5113495bSYour Name  *
859*5113495bSYour Name  * Get the next node and release previous node.
860*5113495bSYour Name  *
861*5113495bSYour Name  * Return: iterator pointer, or NULL if iteration is complete
862*5113495bSYour Name  */
qdf_nbuf_seq_next(struct seq_file * seq,void * v,loff_t * pos)863*5113495bSYour Name static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
864*5113495bSYour Name {
865*5113495bSYour Name 	++*pos;
866*5113495bSYour Name 
867*5113495bSYour Name 	return qdf_nbuf_seq_start(seq, pos);
868*5113495bSYour Name }
869*5113495bSYour Name 
870*5113495bSYour Name /**
871*5113495bSYour Name  * qdf_nbuf_seq_stop() - stop sequential callback
872*5113495bSYour Name  * @seq: seq_file handle
873*5113495bSYour Name  * @v: current iterator
874*5113495bSYour Name  *
875*5113495bSYour Name  * Return: None
876*5113495bSYour Name  */
qdf_nbuf_seq_stop(struct seq_file * seq,void * v)877*5113495bSYour Name static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
878*5113495bSYour Name 
879*5113495bSYour Name /* sequential file operation table created to track major skb allocs */
880*5113495bSYour Name static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
881*5113495bSYour Name 	.start = qdf_nbuf_seq_start,
882*5113495bSYour Name 	.next = qdf_nbuf_seq_next,
883*5113495bSYour Name 	.stop = qdf_nbuf_seq_stop,
884*5113495bSYour Name 	.show = qdf_major_nbuf_alloc_show,
885*5113495bSYour Name };
886*5113495bSYour Name 
qdf_major_nbuf_allocs_open(struct inode * inode,struct file * file)887*5113495bSYour Name static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
888*5113495bSYour Name {
889*5113495bSYour Name 	void *private = inode->i_private;
890*5113495bSYour Name 	struct seq_file *seq;
891*5113495bSYour Name 	int rc;
892*5113495bSYour Name 
893*5113495bSYour Name 	rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
894*5113495bSYour Name 	if (rc == 0) {
895*5113495bSYour Name 		seq = file->private_data;
896*5113495bSYour Name 		seq->private = private;
897*5113495bSYour Name 	}
898*5113495bSYour Name 	return rc;
899*5113495bSYour Name }
900*5113495bSYour Name 
qdf_major_nbuf_alloc_set_threshold(struct file * file,const char __user * user_buf,size_t count,loff_t * pos)901*5113495bSYour Name static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
902*5113495bSYour Name 						  const char __user *user_buf,
903*5113495bSYour Name 						  size_t count,
904*5113495bSYour Name 						  loff_t *pos)
905*5113495bSYour Name {
906*5113495bSYour Name 	char buf[32];
907*5113495bSYour Name 	ssize_t buf_size;
908*5113495bSYour Name 	uint32_t threshold;
909*5113495bSYour Name 	struct seq_file *seq = file->private_data;
910*5113495bSYour Name 	struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
911*5113495bSYour Name 
912*5113495bSYour Name 	buf_size = min(count, (sizeof(buf) - 1));
913*5113495bSYour Name 	if (buf_size <= 0)
914*5113495bSYour Name 		return 0;
915*5113495bSYour Name 	if (copy_from_user(buf, user_buf, buf_size))
916*5113495bSYour Name 		return -EFAULT;
917*5113495bSYour Name 	buf[buf_size] = '\0';
918*5113495bSYour Name 	if (!kstrtou32(buf, 10, &threshold))
919*5113495bSYour Name 		priv->threshold = threshold;
920*5113495bSYour Name 	return buf_size;
921*5113495bSYour Name }
922*5113495bSYour Name 
923*5113495bSYour Name /* file operation table for listing major allocs */
924*5113495bSYour Name static const struct file_operations fops_qdf_major_allocs = {
925*5113495bSYour Name 	.owner = THIS_MODULE,
926*5113495bSYour Name 	.open = qdf_major_allocs_open,
927*5113495bSYour Name 	.read = seq_read,
928*5113495bSYour Name 	.llseek = seq_lseek,
929*5113495bSYour Name 	.release = seq_release,
930*5113495bSYour Name 	.write = qdf_major_alloc_set_threshold,
931*5113495bSYour Name };
932*5113495bSYour Name 
933*5113495bSYour Name /* debugfs file operation table */
934*5113495bSYour Name static const struct file_operations fops_qdf_mem_debugfs = {
935*5113495bSYour Name 	.owner = THIS_MODULE,
936*5113495bSYour Name 	.open = qdf_mem_debugfs_open,
937*5113495bSYour Name 	.read = seq_read,
938*5113495bSYour Name 	.llseek = seq_lseek,
939*5113495bSYour Name 	.release = seq_release,
940*5113495bSYour Name };
941*5113495bSYour Name 
942*5113495bSYour Name /* file operation table for listing major allocs */
943*5113495bSYour Name static const struct file_operations fops_qdf_nbuf_major_allocs = {
944*5113495bSYour Name 	.owner = THIS_MODULE,
945*5113495bSYour Name 	.open = qdf_major_nbuf_allocs_open,
946*5113495bSYour Name 	.read = seq_read,
947*5113495bSYour Name 	.llseek = seq_lseek,
948*5113495bSYour Name 	.release = seq_release,
949*5113495bSYour Name 	.write = qdf_major_nbuf_alloc_set_threshold,
950*5113495bSYour Name };
951*5113495bSYour Name 
952*5113495bSYour Name static struct major_alloc_priv mem_priv = {
953*5113495bSYour Name 	/* List type set to mem */
954*5113495bSYour Name 	LIST_TYPE_MEM,
955*5113495bSYour Name 	/* initial threshold to list APIs which allocates mem >= 50 times */
956*5113495bSYour Name 	50
957*5113495bSYour Name };
958*5113495bSYour Name 
959*5113495bSYour Name static struct major_alloc_priv dma_priv = {
960*5113495bSYour Name 	/* List type set to DMA */
961*5113495bSYour Name 	LIST_TYPE_DMA,
962*5113495bSYour Name 	/* initial threshold to list APIs which allocates dma >= 50 times */
963*5113495bSYour Name 	50
964*5113495bSYour Name };
965*5113495bSYour Name 
966*5113495bSYour Name static struct major_alloc_priv nbuf_priv = {
967*5113495bSYour Name 	/* List type set to NBUF */
968*5113495bSYour Name 	LIST_TYPE_NBUF,
969*5113495bSYour Name 	/* initial threshold to list APIs which allocates nbuf >= 50 times */
970*5113495bSYour Name 	50
971*5113495bSYour Name };
972*5113495bSYour Name 
qdf_mem_debug_debugfs_init(void)973*5113495bSYour Name static QDF_STATUS qdf_mem_debug_debugfs_init(void)
974*5113495bSYour Name {
975*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
976*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
977*5113495bSYour Name 
978*5113495bSYour Name 	if (!qdf_mem_debugfs_root)
979*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
980*5113495bSYour Name 
981*5113495bSYour Name 	debugfs_create_file("list",
982*5113495bSYour Name 			    S_IRUSR,
983*5113495bSYour Name 			    qdf_mem_debugfs_root,
984*5113495bSYour Name 			    NULL,
985*5113495bSYour Name 			    &fops_qdf_mem_debugfs);
986*5113495bSYour Name 
987*5113495bSYour Name 	debugfs_create_file("major_mem_allocs",
988*5113495bSYour Name 			    0600,
989*5113495bSYour Name 			    qdf_mem_debugfs_root,
990*5113495bSYour Name 			    &mem_priv,
991*5113495bSYour Name 			    &fops_qdf_major_allocs);
992*5113495bSYour Name 
993*5113495bSYour Name 	debugfs_create_file("major_dma_allocs",
994*5113495bSYour Name 			    0600,
995*5113495bSYour Name 			    qdf_mem_debugfs_root,
996*5113495bSYour Name 			    &dma_priv,
997*5113495bSYour Name 			    &fops_qdf_major_allocs);
998*5113495bSYour Name 
999*5113495bSYour Name 	debugfs_create_file("major_nbuf_allocs",
1000*5113495bSYour Name 			    0600,
1001*5113495bSYour Name 			    qdf_mem_debugfs_root,
1002*5113495bSYour Name 			    &nbuf_priv,
1003*5113495bSYour Name 			    &fops_qdf_nbuf_major_allocs);
1004*5113495bSYour Name 
1005*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1006*5113495bSYour Name }
1007*5113495bSYour Name 
qdf_mem_debug_debugfs_exit(void)1008*5113495bSYour Name static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1009*5113495bSYour Name {
1010*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1011*5113495bSYour Name }
1012*5113495bSYour Name 
1013*5113495bSYour Name #else /* MEMORY_DEBUG */
1014*5113495bSYour Name 
qdf_mem_debug_debugfs_init(void)1015*5113495bSYour Name static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1016*5113495bSYour Name {
1017*5113495bSYour Name 	return QDF_STATUS_E_NOSUPPORT;
1018*5113495bSYour Name }
1019*5113495bSYour Name 
qdf_mem_debug_debugfs_exit(void)1020*5113495bSYour Name static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1021*5113495bSYour Name {
1022*5113495bSYour Name 	return QDF_STATUS_E_NOSUPPORT;
1023*5113495bSYour Name }
1024*5113495bSYour Name 
1025*5113495bSYour Name #endif /* MEMORY_DEBUG */
1026*5113495bSYour Name 
1027*5113495bSYour Name 
qdf_mem_debugfs_exit(void)1028*5113495bSYour Name static void qdf_mem_debugfs_exit(void)
1029*5113495bSYour Name {
1030*5113495bSYour Name 	debugfs_remove_recursive(qdf_mem_debugfs_root);
1031*5113495bSYour Name 	qdf_mem_debugfs_root = NULL;
1032*5113495bSYour Name }
1033*5113495bSYour Name 
qdf_mem_debugfs_init(void)1034*5113495bSYour Name static QDF_STATUS qdf_mem_debugfs_init(void)
1035*5113495bSYour Name {
1036*5113495bSYour Name 	struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1037*5113495bSYour Name 
1038*5113495bSYour Name 	if (!qdf_debugfs_root)
1039*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1040*5113495bSYour Name 
1041*5113495bSYour Name 	qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1042*5113495bSYour Name 
1043*5113495bSYour Name 	if (!qdf_mem_debugfs_root)
1044*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1045*5113495bSYour Name 
1046*5113495bSYour Name 
1047*5113495bSYour Name 	debugfs_create_atomic_t("kmalloc",
1048*5113495bSYour Name 				S_IRUSR,
1049*5113495bSYour Name 				qdf_mem_debugfs_root,
1050*5113495bSYour Name 				&qdf_mem_stat.kmalloc);
1051*5113495bSYour Name 
1052*5113495bSYour Name 	debugfs_create_atomic_t("dma",
1053*5113495bSYour Name 				S_IRUSR,
1054*5113495bSYour Name 				qdf_mem_debugfs_root,
1055*5113495bSYour Name 				&qdf_mem_stat.dma);
1056*5113495bSYour Name 
1057*5113495bSYour Name 	debugfs_create_atomic_t("skb",
1058*5113495bSYour Name 				S_IRUSR,
1059*5113495bSYour Name 				qdf_mem_debugfs_root,
1060*5113495bSYour Name 				&qdf_mem_stat.skb);
1061*5113495bSYour Name 
1062*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1063*5113495bSYour Name }
1064*5113495bSYour Name 
1065*5113495bSYour Name #else /* WLAN_DEBUGFS */
1066*5113495bSYour Name 
qdf_mem_debugfs_init(void)1067*5113495bSYour Name static QDF_STATUS qdf_mem_debugfs_init(void)
1068*5113495bSYour Name {
1069*5113495bSYour Name 	return QDF_STATUS_E_NOSUPPORT;
1070*5113495bSYour Name }
qdf_mem_debugfs_exit(void)1071*5113495bSYour Name static void qdf_mem_debugfs_exit(void) {}
1072*5113495bSYour Name 
1073*5113495bSYour Name 
qdf_mem_debug_debugfs_init(void)1074*5113495bSYour Name static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1075*5113495bSYour Name {
1076*5113495bSYour Name 	return QDF_STATUS_E_NOSUPPORT;
1077*5113495bSYour Name }
1078*5113495bSYour Name 
qdf_mem_debug_debugfs_exit(void)1079*5113495bSYour Name static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1080*5113495bSYour Name {
1081*5113495bSYour Name 	return QDF_STATUS_E_NOSUPPORT;
1082*5113495bSYour Name }
1083*5113495bSYour Name 
1084*5113495bSYour Name #endif /* WLAN_DEBUGFS */
1085*5113495bSYour Name 
qdf_mem_kmalloc_inc(qdf_size_t size)1086*5113495bSYour Name void qdf_mem_kmalloc_inc(qdf_size_t size)
1087*5113495bSYour Name {
1088*5113495bSYour Name 	qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1089*5113495bSYour Name }
1090*5113495bSYour Name 
qdf_mem_dma_inc(qdf_size_t size)1091*5113495bSYour Name static void qdf_mem_dma_inc(qdf_size_t size)
1092*5113495bSYour Name {
1093*5113495bSYour Name 	qdf_atomic_add(size, &qdf_mem_stat.dma);
1094*5113495bSYour Name }
1095*5113495bSYour Name 
1096*5113495bSYour Name #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
qdf_mem_skb_inc(qdf_size_t size)1097*5113495bSYour Name void qdf_mem_skb_inc(qdf_size_t size)
1098*5113495bSYour Name {
1099*5113495bSYour Name 	qdf_atomic_add(size, &qdf_mem_stat.skb);
1100*5113495bSYour Name }
1101*5113495bSYour Name 
qdf_mem_skb_dec(qdf_size_t size)1102*5113495bSYour Name void qdf_mem_skb_dec(qdf_size_t size)
1103*5113495bSYour Name {
1104*5113495bSYour Name 	qdf_atomic_sub(size, &qdf_mem_stat.skb);
1105*5113495bSYour Name }
1106*5113495bSYour Name 
qdf_mem_skb_total_inc(qdf_size_t size)1107*5113495bSYour Name void qdf_mem_skb_total_inc(qdf_size_t size)
1108*5113495bSYour Name {
1109*5113495bSYour Name 	int32_t skb_mem_max = 0;
1110*5113495bSYour Name 
1111*5113495bSYour Name 	qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1112*5113495bSYour Name 	skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1113*5113495bSYour Name 	if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1114*5113495bSYour Name 		qdf_mem_stat.skb_mem_max = skb_mem_max;
1115*5113495bSYour Name }
1116*5113495bSYour Name 
qdf_mem_skb_total_dec(qdf_size_t size)1117*5113495bSYour Name void qdf_mem_skb_total_dec(qdf_size_t size)
1118*5113495bSYour Name {
1119*5113495bSYour Name 	qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1120*5113495bSYour Name }
1121*5113495bSYour Name 
qdf_mem_dp_tx_skb_inc(qdf_size_t size)1122*5113495bSYour Name void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1123*5113495bSYour Name {
1124*5113495bSYour Name 	int32_t curr_dp_tx_skb_mem_max = 0;
1125*5113495bSYour Name 
1126*5113495bSYour Name 	qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1127*5113495bSYour Name 	curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1128*5113495bSYour Name 	if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1129*5113495bSYour Name 		qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1130*5113495bSYour Name }
1131*5113495bSYour Name 
qdf_mem_dp_tx_skb_dec(qdf_size_t size)1132*5113495bSYour Name void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1133*5113495bSYour Name {
1134*5113495bSYour Name 	qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1135*5113495bSYour Name }
1136*5113495bSYour Name 
qdf_mem_dp_rx_skb_inc(qdf_size_t size)1137*5113495bSYour Name void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1138*5113495bSYour Name {
1139*5113495bSYour Name 	int32_t curr_dp_rx_skb_mem_max = 0;
1140*5113495bSYour Name 
1141*5113495bSYour Name 	qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1142*5113495bSYour Name 	curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1143*5113495bSYour Name 	if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1144*5113495bSYour Name 		qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1145*5113495bSYour Name }
1146*5113495bSYour Name 
qdf_mem_dp_rx_skb_dec(qdf_size_t size)1147*5113495bSYour Name void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1148*5113495bSYour Name {
1149*5113495bSYour Name 	qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1150*5113495bSYour Name }
1151*5113495bSYour Name 
qdf_mem_dp_tx_skb_cnt_inc(void)1152*5113495bSYour Name void qdf_mem_dp_tx_skb_cnt_inc(void)
1153*5113495bSYour Name {
1154*5113495bSYour Name 	int32_t curr_dp_tx_skb_count_max = 0;
1155*5113495bSYour Name 
1156*5113495bSYour Name 	qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1157*5113495bSYour Name 	curr_dp_tx_skb_count_max =
1158*5113495bSYour Name 		qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1159*5113495bSYour Name 	if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1160*5113495bSYour Name 		qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1161*5113495bSYour Name }
1162*5113495bSYour Name 
qdf_mem_dp_tx_skb_cnt_dec(void)1163*5113495bSYour Name void qdf_mem_dp_tx_skb_cnt_dec(void)
1164*5113495bSYour Name {
1165*5113495bSYour Name 	qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1166*5113495bSYour Name }
1167*5113495bSYour Name 
qdf_mem_dp_rx_skb_cnt_inc(void)1168*5113495bSYour Name void qdf_mem_dp_rx_skb_cnt_inc(void)
1169*5113495bSYour Name {
1170*5113495bSYour Name 	int32_t curr_dp_rx_skb_count_max = 0;
1171*5113495bSYour Name 
1172*5113495bSYour Name 	qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1173*5113495bSYour Name 	curr_dp_rx_skb_count_max =
1174*5113495bSYour Name 		qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1175*5113495bSYour Name 	if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1176*5113495bSYour Name 		qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1177*5113495bSYour Name }
1178*5113495bSYour Name 
qdf_mem_dp_rx_skb_cnt_dec(void)1179*5113495bSYour Name void qdf_mem_dp_rx_skb_cnt_dec(void)
1180*5113495bSYour Name {
1181*5113495bSYour Name 	qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1182*5113495bSYour Name }
1183*5113495bSYour Name #endif
1184*5113495bSYour Name 
qdf_mem_kmalloc_dec(qdf_size_t size)1185*5113495bSYour Name void qdf_mem_kmalloc_dec(qdf_size_t size)
1186*5113495bSYour Name {
1187*5113495bSYour Name 	qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1188*5113495bSYour Name }
1189*5113495bSYour Name 
qdf_mem_dma_dec(qdf_size_t size)1190*5113495bSYour Name static inline void qdf_mem_dma_dec(qdf_size_t size)
1191*5113495bSYour Name {
1192*5113495bSYour Name 	qdf_atomic_sub(size, &qdf_mem_stat.dma);
1193*5113495bSYour Name }
1194*5113495bSYour Name 
__qdf_mempool_init(qdf_device_t osdev,__qdf_mempool_t * pool_addr,int elem_cnt,size_t elem_size,u_int32_t flags)1195*5113495bSYour Name int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1196*5113495bSYour Name 		       int elem_cnt, size_t elem_size, u_int32_t flags)
1197*5113495bSYour Name {
1198*5113495bSYour Name 	__qdf_mempool_ctxt_t *new_pool = NULL;
1199*5113495bSYour Name 	u_int32_t align = L1_CACHE_BYTES;
1200*5113495bSYour Name 	unsigned long aligned_pool_mem;
1201*5113495bSYour Name 	int pool_id;
1202*5113495bSYour Name 	int i;
1203*5113495bSYour Name 
1204*5113495bSYour Name 	if (prealloc_disabled) {
1205*5113495bSYour Name 		/* TBD: We can maintain a list of pools in qdf_device_t
1206*5113495bSYour Name 		 * to help debugging
1207*5113495bSYour Name 		 * when pre-allocation is not enabled
1208*5113495bSYour Name 		 */
1209*5113495bSYour Name 		new_pool = (__qdf_mempool_ctxt_t *)
1210*5113495bSYour Name 			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1211*5113495bSYour Name 		if (!new_pool)
1212*5113495bSYour Name 			return QDF_STATUS_E_NOMEM;
1213*5113495bSYour Name 
1214*5113495bSYour Name 		memset(new_pool, 0, sizeof(*new_pool));
1215*5113495bSYour Name 		/* TBD: define flags for zeroing buffers etc */
1216*5113495bSYour Name 		new_pool->flags = flags;
1217*5113495bSYour Name 		new_pool->elem_size = elem_size;
1218*5113495bSYour Name 		new_pool->max_elem = elem_cnt;
1219*5113495bSYour Name 		*pool_addr = new_pool;
1220*5113495bSYour Name 		return 0;
1221*5113495bSYour Name 	}
1222*5113495bSYour Name 
1223*5113495bSYour Name 	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1224*5113495bSYour Name 		if (!osdev->mem_pool[pool_id])
1225*5113495bSYour Name 			break;
1226*5113495bSYour Name 	}
1227*5113495bSYour Name 
1228*5113495bSYour Name 	if (pool_id == MAX_MEM_POOLS)
1229*5113495bSYour Name 		return -ENOMEM;
1230*5113495bSYour Name 
1231*5113495bSYour Name 	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1232*5113495bSYour Name 		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1233*5113495bSYour Name 	if (!new_pool)
1234*5113495bSYour Name 		return -ENOMEM;
1235*5113495bSYour Name 
1236*5113495bSYour Name 	memset(new_pool, 0, sizeof(*new_pool));
1237*5113495bSYour Name 	/* TBD: define flags for zeroing buffers etc */
1238*5113495bSYour Name 	new_pool->flags = flags;
1239*5113495bSYour Name 	new_pool->pool_id = pool_id;
1240*5113495bSYour Name 
1241*5113495bSYour Name 	/* Round up the element size to cacheline */
1242*5113495bSYour Name 	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1243*5113495bSYour Name 	new_pool->mem_size = elem_cnt * new_pool->elem_size +
1244*5113495bSYour Name 				((align)?(align - 1):0);
1245*5113495bSYour Name 
1246*5113495bSYour Name 	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1247*5113495bSYour Name 	if (!new_pool->pool_mem) {
1248*5113495bSYour Name 			/* TBD: Check if we need get_free_pages above */
1249*5113495bSYour Name 		kfree(new_pool);
1250*5113495bSYour Name 		osdev->mem_pool[pool_id] = NULL;
1251*5113495bSYour Name 		return -ENOMEM;
1252*5113495bSYour Name 	}
1253*5113495bSYour Name 
1254*5113495bSYour Name 	spin_lock_init(&new_pool->lock);
1255*5113495bSYour Name 
1256*5113495bSYour Name 	/* Initialize free list */
1257*5113495bSYour Name 	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1258*5113495bSYour Name 			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1259*5113495bSYour Name 	STAILQ_INIT(&new_pool->free_list);
1260*5113495bSYour Name 
1261*5113495bSYour Name 	for (i = 0; i < elem_cnt; i++)
1262*5113495bSYour Name 		STAILQ_INSERT_TAIL(&(new_pool->free_list),
1263*5113495bSYour Name 			(mempool_elem_t *)(aligned_pool_mem +
1264*5113495bSYour Name 			(new_pool->elem_size * i)), mempool_entry);
1265*5113495bSYour Name 
1266*5113495bSYour Name 
1267*5113495bSYour Name 	new_pool->free_cnt = elem_cnt;
1268*5113495bSYour Name 	*pool_addr = new_pool;
1269*5113495bSYour Name 	return 0;
1270*5113495bSYour Name }
1271*5113495bSYour Name qdf_export_symbol(__qdf_mempool_init);
1272*5113495bSYour Name 
__qdf_mempool_destroy(qdf_device_t osdev,__qdf_mempool_t pool)1273*5113495bSYour Name void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1274*5113495bSYour Name {
1275*5113495bSYour Name 	int pool_id = 0;
1276*5113495bSYour Name 
1277*5113495bSYour Name 	if (!pool)
1278*5113495bSYour Name 		return;
1279*5113495bSYour Name 
1280*5113495bSYour Name 	if (prealloc_disabled) {
1281*5113495bSYour Name 		kfree(pool);
1282*5113495bSYour Name 		return;
1283*5113495bSYour Name 	}
1284*5113495bSYour Name 
1285*5113495bSYour Name 	pool_id = pool->pool_id;
1286*5113495bSYour Name 
1287*5113495bSYour Name 	/* TBD: Check if free count matches elem_cnt if debug is enabled */
1288*5113495bSYour Name 	kfree(pool->pool_mem);
1289*5113495bSYour Name 	kfree(pool);
1290*5113495bSYour Name 	osdev->mem_pool[pool_id] = NULL;
1291*5113495bSYour Name }
1292*5113495bSYour Name qdf_export_symbol(__qdf_mempool_destroy);
1293*5113495bSYour Name 
__qdf_mempool_alloc(qdf_device_t osdev,__qdf_mempool_t pool)1294*5113495bSYour Name void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1295*5113495bSYour Name {
1296*5113495bSYour Name 	void *buf = NULL;
1297*5113495bSYour Name 
1298*5113495bSYour Name 	if (!pool)
1299*5113495bSYour Name 		return NULL;
1300*5113495bSYour Name 
1301*5113495bSYour Name 	if (prealloc_disabled)
1302*5113495bSYour Name 		return  qdf_mem_malloc(pool->elem_size);
1303*5113495bSYour Name 
1304*5113495bSYour Name 	spin_lock_bh(&pool->lock);
1305*5113495bSYour Name 
1306*5113495bSYour Name 	buf = STAILQ_FIRST(&pool->free_list);
1307*5113495bSYour Name 	if (buf) {
1308*5113495bSYour Name 		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1309*5113495bSYour Name 		pool->free_cnt--;
1310*5113495bSYour Name 	}
1311*5113495bSYour Name 
1312*5113495bSYour Name 	/* TBD: Update free count if debug is enabled */
1313*5113495bSYour Name 	spin_unlock_bh(&pool->lock);
1314*5113495bSYour Name 
1315*5113495bSYour Name 	return buf;
1316*5113495bSYour Name }
1317*5113495bSYour Name qdf_export_symbol(__qdf_mempool_alloc);
1318*5113495bSYour Name 
__qdf_mempool_free(qdf_device_t osdev,__qdf_mempool_t pool,void * buf)1319*5113495bSYour Name void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1320*5113495bSYour Name {
1321*5113495bSYour Name 	if (!pool)
1322*5113495bSYour Name 		return;
1323*5113495bSYour Name 
1324*5113495bSYour Name 
1325*5113495bSYour Name 	if (prealloc_disabled)
1326*5113495bSYour Name 		return qdf_mem_free(buf);
1327*5113495bSYour Name 
1328*5113495bSYour Name 	spin_lock_bh(&pool->lock);
1329*5113495bSYour Name 	pool->free_cnt++;
1330*5113495bSYour Name 
1331*5113495bSYour Name 	STAILQ_INSERT_TAIL
1332*5113495bSYour Name 		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1333*5113495bSYour Name 	spin_unlock_bh(&pool->lock);
1334*5113495bSYour Name }
1335*5113495bSYour Name qdf_export_symbol(__qdf_mempool_free);
1336*5113495bSYour Name 
1337*5113495bSYour Name #ifdef CNSS_MEM_PRE_ALLOC
qdf_might_be_prealloc(void * ptr)1338*5113495bSYour Name static bool qdf_might_be_prealloc(void *ptr)
1339*5113495bSYour Name {
1340*5113495bSYour Name 	if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1341*5113495bSYour Name 		return true;
1342*5113495bSYour Name 	else
1343*5113495bSYour Name 		return false;
1344*5113495bSYour Name }
1345*5113495bSYour Name 
1346*5113495bSYour Name /**
1347*5113495bSYour Name  * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1348*5113495bSYour Name  * @size: the number of bytes to allocate
1349*5113495bSYour Name  *
1350*5113495bSYour Name  * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1351*5113495bSYour Name  * a chunk of pre-allocated memory. If size if less than or equal to
1352*5113495bSYour Name  * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1353*5113495bSYour Name  *
1354*5113495bSYour Name  * Return: NULL on failure, non-NULL on success
1355*5113495bSYour Name  */
qdf_mem_prealloc_get(size_t size)1356*5113495bSYour Name static void *qdf_mem_prealloc_get(size_t size)
1357*5113495bSYour Name {
1358*5113495bSYour Name 	void *ptr;
1359*5113495bSYour Name 
1360*5113495bSYour Name 	if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1361*5113495bSYour Name 		return NULL;
1362*5113495bSYour Name 
1363*5113495bSYour Name 	ptr = wcnss_prealloc_get(size);
1364*5113495bSYour Name 	if (!ptr)
1365*5113495bSYour Name 		return NULL;
1366*5113495bSYour Name 
1367*5113495bSYour Name 	memset(ptr, 0, size);
1368*5113495bSYour Name 
1369*5113495bSYour Name 	return ptr;
1370*5113495bSYour Name }
1371*5113495bSYour Name 
qdf_mem_prealloc_put(void * ptr)1372*5113495bSYour Name static inline bool qdf_mem_prealloc_put(void *ptr)
1373*5113495bSYour Name {
1374*5113495bSYour Name 	return wcnss_prealloc_put(ptr);
1375*5113495bSYour Name }
1376*5113495bSYour Name #else
qdf_might_be_prealloc(void * ptr)1377*5113495bSYour Name static bool qdf_might_be_prealloc(void *ptr)
1378*5113495bSYour Name {
1379*5113495bSYour Name 	return false;
1380*5113495bSYour Name }
1381*5113495bSYour Name 
qdf_mem_prealloc_get(size_t size)1382*5113495bSYour Name static inline void *qdf_mem_prealloc_get(size_t size)
1383*5113495bSYour Name {
1384*5113495bSYour Name 	return NULL;
1385*5113495bSYour Name }
1386*5113495bSYour Name 
qdf_mem_prealloc_put(void * ptr)1387*5113495bSYour Name static inline bool qdf_mem_prealloc_put(void *ptr)
1388*5113495bSYour Name {
1389*5113495bSYour Name 	return false;
1390*5113495bSYour Name }
1391*5113495bSYour Name #endif /* CNSS_MEM_PRE_ALLOC */
1392*5113495bSYour Name 
1393*5113495bSYour Name /* External Function implementation */
1394*5113495bSYour Name #ifdef MEMORY_DEBUG
1395*5113495bSYour Name #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
qdf_mem_debug_config_get(void)1396*5113495bSYour Name bool qdf_mem_debug_config_get(void)
1397*5113495bSYour Name {
1398*5113495bSYour Name 	/* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1399*5113495bSYour Name 	return false;
1400*5113495bSYour Name }
1401*5113495bSYour Name #else
qdf_mem_debug_config_get(void)1402*5113495bSYour Name bool qdf_mem_debug_config_get(void)
1403*5113495bSYour Name {
1404*5113495bSYour Name 	return mem_debug_disabled;
1405*5113495bSYour Name }
1406*5113495bSYour Name #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1407*5113495bSYour Name 
1408*5113495bSYour Name #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
qdf_mem_debug_disabled_config_set(const char * str_value)1409*5113495bSYour Name QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1410*5113495bSYour Name {
1411*5113495bSYour Name 	QDF_STATUS status;
1412*5113495bSYour Name 
1413*5113495bSYour Name 	status = qdf_bool_parse(str_value, &mem_debug_disabled);
1414*5113495bSYour Name 	return status;
1415*5113495bSYour Name }
1416*5113495bSYour Name #endif
1417*5113495bSYour Name 
1418*5113495bSYour Name /**
1419*5113495bSYour Name  * qdf_mem_debug_init() - initialize qdf memory debug functionality
1420*5113495bSYour Name  *
1421*5113495bSYour Name  * Return: none
1422*5113495bSYour Name  */
qdf_mem_debug_init(void)1423*5113495bSYour Name static void qdf_mem_debug_init(void)
1424*5113495bSYour Name {
1425*5113495bSYour Name 	int i;
1426*5113495bSYour Name 
1427*5113495bSYour Name 	is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1428*5113495bSYour Name 
1429*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1430*5113495bSYour Name 		return;
1431*5113495bSYour Name 
1432*5113495bSYour Name 	/* Initializing the list with maximum size of 60000 */
1433*5113495bSYour Name 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1434*5113495bSYour Name 		qdf_list_create(&qdf_mem_domains[i], 60000);
1435*5113495bSYour Name 	qdf_spinlock_create(&qdf_mem_list_lock);
1436*5113495bSYour Name 
1437*5113495bSYour Name 	/* dma */
1438*5113495bSYour Name 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1439*5113495bSYour Name 		qdf_list_create(&qdf_mem_dma_domains[i], 0);
1440*5113495bSYour Name 	qdf_spinlock_create(&qdf_mem_dma_list_lock);
1441*5113495bSYour Name }
1442*5113495bSYour Name 
1443*5113495bSYour Name static uint32_t
qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,qdf_list_t * mem_list)1444*5113495bSYour Name qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1445*5113495bSYour Name 			       qdf_list_t *mem_list)
1446*5113495bSYour Name {
1447*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1448*5113495bSYour Name 		return 0;
1449*5113495bSYour Name 
1450*5113495bSYour Name 	if (qdf_list_empty(mem_list))
1451*5113495bSYour Name 		return 0;
1452*5113495bSYour Name 
1453*5113495bSYour Name 	qdf_err("Memory leaks detected in %s domain!",
1454*5113495bSYour Name 		qdf_debug_domain_name(domain));
1455*5113495bSYour Name 	qdf_mem_domain_print(mem_list,
1456*5113495bSYour Name 			     qdf_err_printer,
1457*5113495bSYour Name 			     NULL,
1458*5113495bSYour Name 			     0,
1459*5113495bSYour Name 			     qdf_mem_meta_table_print);
1460*5113495bSYour Name 
1461*5113495bSYour Name 	return mem_list->count;
1462*5113495bSYour Name }
1463*5113495bSYour Name 
qdf_mem_domain_set_check_for_leaks(qdf_list_t * domains)1464*5113495bSYour Name static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1465*5113495bSYour Name {
1466*5113495bSYour Name 	uint32_t leak_count = 0;
1467*5113495bSYour Name 	int i;
1468*5113495bSYour Name 
1469*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1470*5113495bSYour Name 		return;
1471*5113495bSYour Name 
1472*5113495bSYour Name 	/* detect and print leaks */
1473*5113495bSYour Name 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1474*5113495bSYour Name 		leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1475*5113495bSYour Name 
1476*5113495bSYour Name 	if (leak_count)
1477*5113495bSYour Name 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1478*5113495bSYour Name 				   leak_count);
1479*5113495bSYour Name }
1480*5113495bSYour Name 
1481*5113495bSYour Name /**
1482*5113495bSYour Name  * qdf_mem_debug_exit() - exit qdf memory debug functionality
1483*5113495bSYour Name  *
1484*5113495bSYour Name  * Return: none
1485*5113495bSYour Name  */
qdf_mem_debug_exit(void)1486*5113495bSYour Name static void qdf_mem_debug_exit(void)
1487*5113495bSYour Name {
1488*5113495bSYour Name 	int i;
1489*5113495bSYour Name 
1490*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1491*5113495bSYour Name 		return;
1492*5113495bSYour Name 
1493*5113495bSYour Name 	/* mem */
1494*5113495bSYour Name 	qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1495*5113495bSYour Name 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1496*5113495bSYour Name 		qdf_list_destroy(qdf_mem_list_get(i));
1497*5113495bSYour Name 
1498*5113495bSYour Name 	qdf_spinlock_destroy(&qdf_mem_list_lock);
1499*5113495bSYour Name 
1500*5113495bSYour Name 	/* dma */
1501*5113495bSYour Name 	qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1502*5113495bSYour Name 	for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1503*5113495bSYour Name 		qdf_list_destroy(&qdf_mem_dma_domains[i]);
1504*5113495bSYour Name 	qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1505*5113495bSYour Name }
1506*5113495bSYour Name 
qdf_mem_malloc_debug(size_t size,const char * func,uint32_t line,void * caller,uint32_t flag)1507*5113495bSYour Name void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1508*5113495bSYour Name 			   void *caller, uint32_t flag)
1509*5113495bSYour Name {
1510*5113495bSYour Name 	QDF_STATUS status;
1511*5113495bSYour Name 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1512*5113495bSYour Name 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1513*5113495bSYour Name 	struct qdf_mem_header *header;
1514*5113495bSYour Name 	void *ptr;
1515*5113495bSYour Name 	unsigned long start, duration;
1516*5113495bSYour Name 
1517*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1518*5113495bSYour Name 		return __qdf_mem_malloc(size, func, line);
1519*5113495bSYour Name 
1520*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1521*5113495bSYour Name 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1522*5113495bSYour Name 		return NULL;
1523*5113495bSYour Name 	}
1524*5113495bSYour Name 
1525*5113495bSYour Name 	ptr = qdf_mem_prealloc_get(size);
1526*5113495bSYour Name 	if (ptr)
1527*5113495bSYour Name 		return ptr;
1528*5113495bSYour Name 
1529*5113495bSYour Name 	if (!flag)
1530*5113495bSYour Name 		flag = qdf_mem_malloc_flags();
1531*5113495bSYour Name 
1532*5113495bSYour Name 	start = qdf_mc_timer_get_system_time();
1533*5113495bSYour Name 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1534*5113495bSYour Name 	duration = qdf_mc_timer_get_system_time() - start;
1535*5113495bSYour Name 
1536*5113495bSYour Name 	if (duration > QDF_MEM_WARN_THRESHOLD)
1537*5113495bSYour Name 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1538*5113495bSYour Name 			 duration, size, func, line);
1539*5113495bSYour Name 
1540*5113495bSYour Name 	if (!header) {
1541*5113495bSYour Name 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1542*5113495bSYour Name 		return NULL;
1543*5113495bSYour Name 	}
1544*5113495bSYour Name 
1545*5113495bSYour Name 	qdf_mem_header_init(header, size, func, line, caller);
1546*5113495bSYour Name 	qdf_mem_trailer_init(header);
1547*5113495bSYour Name 	ptr = qdf_mem_get_ptr(header);
1548*5113495bSYour Name 
1549*5113495bSYour Name 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1550*5113495bSYour Name 	status = qdf_list_insert_front(mem_list, &header->node);
1551*5113495bSYour Name 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1552*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
1553*5113495bSYour Name 		qdf_err("Failed to insert memory header; status %d", status);
1554*5113495bSYour Name 
1555*5113495bSYour Name 	qdf_mem_kmalloc_inc(ksize(header));
1556*5113495bSYour Name 
1557*5113495bSYour Name 	return ptr;
1558*5113495bSYour Name }
1559*5113495bSYour Name qdf_export_symbol(qdf_mem_malloc_debug);
1560*5113495bSYour Name 
qdf_mem_malloc_atomic_debug(size_t size,const char * func,uint32_t line,void * caller)1561*5113495bSYour Name void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1562*5113495bSYour Name 				  uint32_t line, void *caller)
1563*5113495bSYour Name {
1564*5113495bSYour Name 	QDF_STATUS status;
1565*5113495bSYour Name 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1566*5113495bSYour Name 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1567*5113495bSYour Name 	struct qdf_mem_header *header;
1568*5113495bSYour Name 	void *ptr;
1569*5113495bSYour Name 	unsigned long start, duration;
1570*5113495bSYour Name 
1571*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1572*5113495bSYour Name 		return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1573*5113495bSYour Name 
1574*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1575*5113495bSYour Name 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1576*5113495bSYour Name 		return NULL;
1577*5113495bSYour Name 	}
1578*5113495bSYour Name 
1579*5113495bSYour Name 	ptr = qdf_mem_prealloc_get(size);
1580*5113495bSYour Name 	if (ptr)
1581*5113495bSYour Name 		return ptr;
1582*5113495bSYour Name 
1583*5113495bSYour Name 	start = qdf_mc_timer_get_system_time();
1584*5113495bSYour Name 	header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1585*5113495bSYour Name 	duration = qdf_mc_timer_get_system_time() - start;
1586*5113495bSYour Name 
1587*5113495bSYour Name 	if (duration > QDF_MEM_WARN_THRESHOLD)
1588*5113495bSYour Name 		qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1589*5113495bSYour Name 			 duration, size, func, line);
1590*5113495bSYour Name 
1591*5113495bSYour Name 	if (!header) {
1592*5113495bSYour Name 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1593*5113495bSYour Name 		return NULL;
1594*5113495bSYour Name 	}
1595*5113495bSYour Name 
1596*5113495bSYour Name 	qdf_mem_header_init(header, size, func, line, caller);
1597*5113495bSYour Name 	qdf_mem_trailer_init(header);
1598*5113495bSYour Name 	ptr = qdf_mem_get_ptr(header);
1599*5113495bSYour Name 
1600*5113495bSYour Name 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1601*5113495bSYour Name 	status = qdf_list_insert_front(mem_list, &header->node);
1602*5113495bSYour Name 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1603*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
1604*5113495bSYour Name 		qdf_err("Failed to insert memory header; status %d", status);
1605*5113495bSYour Name 
1606*5113495bSYour Name 	qdf_mem_kmalloc_inc(ksize(header));
1607*5113495bSYour Name 
1608*5113495bSYour Name 	return ptr;
1609*5113495bSYour Name }
1610*5113495bSYour Name 
1611*5113495bSYour Name qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1612*5113495bSYour Name 
qdf_mem_malloc_atomic_debug_fl(size_t size,const char * func,uint32_t line)1613*5113495bSYour Name void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1614*5113495bSYour Name 				     uint32_t line)
1615*5113495bSYour Name {
1616*5113495bSYour Name 	void *ptr;
1617*5113495bSYour Name 
1618*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1619*5113495bSYour Name 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1620*5113495bSYour Name 			     line);
1621*5113495bSYour Name 		return NULL;
1622*5113495bSYour Name 	}
1623*5113495bSYour Name 
1624*5113495bSYour Name 	ptr = qdf_mem_prealloc_get(size);
1625*5113495bSYour Name 	if (ptr)
1626*5113495bSYour Name 		return ptr;
1627*5113495bSYour Name 
1628*5113495bSYour Name 	ptr = kzalloc(size, GFP_ATOMIC);
1629*5113495bSYour Name 	if (!ptr) {
1630*5113495bSYour Name 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1631*5113495bSYour Name 			      size, func, line);
1632*5113495bSYour Name 		return NULL;
1633*5113495bSYour Name 	}
1634*5113495bSYour Name 
1635*5113495bSYour Name 	qdf_mem_kmalloc_inc(ksize(ptr));
1636*5113495bSYour Name 
1637*5113495bSYour Name 	return ptr;
1638*5113495bSYour Name }
1639*5113495bSYour Name 
1640*5113495bSYour Name qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1641*5113495bSYour Name 
qdf_mem_free_debug(void * ptr,const char * func,uint32_t line)1642*5113495bSYour Name void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1643*5113495bSYour Name {
1644*5113495bSYour Name 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1645*5113495bSYour Name 	struct qdf_mem_header *header;
1646*5113495bSYour Name 	enum qdf_mem_validation_bitmap error_bitmap;
1647*5113495bSYour Name 
1648*5113495bSYour Name 	if (is_initial_mem_debug_disabled) {
1649*5113495bSYour Name 		__qdf_mem_free(ptr);
1650*5113495bSYour Name 		return;
1651*5113495bSYour Name 	}
1652*5113495bSYour Name 
1653*5113495bSYour Name 	/* freeing a null pointer is valid */
1654*5113495bSYour Name 	if (qdf_unlikely(!ptr))
1655*5113495bSYour Name 		return;
1656*5113495bSYour Name 
1657*5113495bSYour Name 	if (qdf_mem_prealloc_put(ptr))
1658*5113495bSYour Name 		return;
1659*5113495bSYour Name 
1660*5113495bSYour Name 	if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1661*5113495bSYour Name 		QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1662*5113495bSYour Name 				   ptr);
1663*5113495bSYour Name 
1664*5113495bSYour Name 	qdf_talloc_assert_no_children_fl(ptr, func, line);
1665*5113495bSYour Name 
1666*5113495bSYour Name 	qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1667*5113495bSYour Name 	header = qdf_mem_get_header(ptr);
1668*5113495bSYour Name 	error_bitmap = qdf_mem_header_validate(header, current_domain);
1669*5113495bSYour Name 	error_bitmap |= qdf_mem_trailer_validate(header);
1670*5113495bSYour Name 
1671*5113495bSYour Name 	if (!error_bitmap) {
1672*5113495bSYour Name 		header->freed = true;
1673*5113495bSYour Name 		qdf_list_remove_node(qdf_mem_list_get(header->domain),
1674*5113495bSYour Name 				     &header->node);
1675*5113495bSYour Name 	}
1676*5113495bSYour Name 	qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1677*5113495bSYour Name 
1678*5113495bSYour Name 	qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1679*5113495bSYour Name 				    func, line);
1680*5113495bSYour Name 
1681*5113495bSYour Name 	qdf_mem_kmalloc_dec(ksize(header));
1682*5113495bSYour Name 	kfree(header);
1683*5113495bSYour Name }
1684*5113495bSYour Name qdf_export_symbol(qdf_mem_free_debug);
1685*5113495bSYour Name 
qdf_mem_check_for_leaks(void)1686*5113495bSYour Name void qdf_mem_check_for_leaks(void)
1687*5113495bSYour Name {
1688*5113495bSYour Name 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1689*5113495bSYour Name 	qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1690*5113495bSYour Name 	qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1691*5113495bSYour Name 	uint32_t leaks_count = 0;
1692*5113495bSYour Name 
1693*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
1694*5113495bSYour Name 		return;
1695*5113495bSYour Name 
1696*5113495bSYour Name 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1697*5113495bSYour Name 	leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1698*5113495bSYour Name 
1699*5113495bSYour Name 	if (leaks_count)
1700*5113495bSYour Name 		QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1701*5113495bSYour Name 				   leaks_count);
1702*5113495bSYour Name }
1703*5113495bSYour Name 
qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable,const char * func,uint32_t line,void * caller)1704*5113495bSYour Name void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1705*5113495bSYour Name 				     struct qdf_mem_multi_page_t *pages,
1706*5113495bSYour Name 				     size_t element_size, uint32_t element_num,
1707*5113495bSYour Name 				     qdf_dma_context_t memctxt, bool cacheable,
1708*5113495bSYour Name 				     const char *func, uint32_t line,
1709*5113495bSYour Name 				     void *caller)
1710*5113495bSYour Name {
1711*5113495bSYour Name 	uint16_t page_idx;
1712*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
1713*5113495bSYour Name 	void **cacheable_pages = NULL;
1714*5113495bSYour Name 	uint16_t i;
1715*5113495bSYour Name 
1716*5113495bSYour Name 	if (!pages->page_size)
1717*5113495bSYour Name 		pages->page_size = qdf_page_size;
1718*5113495bSYour Name 
1719*5113495bSYour Name 	pages->num_element_per_page = pages->page_size / element_size;
1720*5113495bSYour Name 	if (!pages->num_element_per_page) {
1721*5113495bSYour Name 		qdf_print("Invalid page %d or element size %d",
1722*5113495bSYour Name 			  (int)pages->page_size, (int)element_size);
1723*5113495bSYour Name 		goto out_fail;
1724*5113495bSYour Name 	}
1725*5113495bSYour Name 
1726*5113495bSYour Name 	pages->num_pages = element_num / pages->num_element_per_page;
1727*5113495bSYour Name 	if (element_num % pages->num_element_per_page)
1728*5113495bSYour Name 		pages->num_pages++;
1729*5113495bSYour Name 
1730*5113495bSYour Name 	if (cacheable) {
1731*5113495bSYour Name 		/* Pages information storage */
1732*5113495bSYour Name 		pages->cacheable_pages = qdf_mem_malloc_debug(
1733*5113495bSYour Name 			pages->num_pages * sizeof(pages->cacheable_pages),
1734*5113495bSYour Name 			func, line, caller, 0);
1735*5113495bSYour Name 		if (!pages->cacheable_pages)
1736*5113495bSYour Name 			goto out_fail;
1737*5113495bSYour Name 
1738*5113495bSYour Name 		cacheable_pages = pages->cacheable_pages;
1739*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1740*5113495bSYour Name 			cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1741*5113495bSYour Name 				pages->page_size, func, line, caller, 0);
1742*5113495bSYour Name 			if (!cacheable_pages[page_idx])
1743*5113495bSYour Name 				goto page_alloc_fail;
1744*5113495bSYour Name 		}
1745*5113495bSYour Name 		pages->dma_pages = NULL;
1746*5113495bSYour Name 	} else {
1747*5113495bSYour Name 		pages->dma_pages = qdf_mem_malloc_debug(
1748*5113495bSYour Name 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1749*5113495bSYour Name 			func, line, caller, 0);
1750*5113495bSYour Name 		if (!pages->dma_pages)
1751*5113495bSYour Name 			goto out_fail;
1752*5113495bSYour Name 
1753*5113495bSYour Name 		dma_pages = pages->dma_pages;
1754*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1755*5113495bSYour Name 			dma_pages->page_v_addr_start =
1756*5113495bSYour Name 				qdf_mem_alloc_consistent_debug(
1757*5113495bSYour Name 					osdev, osdev->dev, pages->page_size,
1758*5113495bSYour Name 					&dma_pages->page_p_addr,
1759*5113495bSYour Name 					func, line, caller);
1760*5113495bSYour Name 			if (!dma_pages->page_v_addr_start) {
1761*5113495bSYour Name 				qdf_print("dmaable page alloc fail pi %d",
1762*5113495bSYour Name 					  page_idx);
1763*5113495bSYour Name 				goto page_alloc_fail;
1764*5113495bSYour Name 			}
1765*5113495bSYour Name 			dma_pages->page_v_addr_end =
1766*5113495bSYour Name 				dma_pages->page_v_addr_start + pages->page_size;
1767*5113495bSYour Name 			dma_pages++;
1768*5113495bSYour Name 		}
1769*5113495bSYour Name 		pages->cacheable_pages = NULL;
1770*5113495bSYour Name 	}
1771*5113495bSYour Name 	return;
1772*5113495bSYour Name 
1773*5113495bSYour Name page_alloc_fail:
1774*5113495bSYour Name 	if (cacheable) {
1775*5113495bSYour Name 		for (i = 0; i < page_idx; i++)
1776*5113495bSYour Name 			qdf_mem_free_debug(pages->cacheable_pages[i],
1777*5113495bSYour Name 					   func, line);
1778*5113495bSYour Name 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1779*5113495bSYour Name 	} else {
1780*5113495bSYour Name 		dma_pages = pages->dma_pages;
1781*5113495bSYour Name 		for (i = 0; i < page_idx; i++) {
1782*5113495bSYour Name 			qdf_mem_free_consistent_debug(
1783*5113495bSYour Name 				osdev, osdev->dev,
1784*5113495bSYour Name 				pages->page_size, dma_pages->page_v_addr_start,
1785*5113495bSYour Name 				dma_pages->page_p_addr, memctxt, func, line);
1786*5113495bSYour Name 			dma_pages++;
1787*5113495bSYour Name 		}
1788*5113495bSYour Name 		qdf_mem_free_debug(pages->dma_pages, func, line);
1789*5113495bSYour Name 	}
1790*5113495bSYour Name 
1791*5113495bSYour Name out_fail:
1792*5113495bSYour Name 	pages->cacheable_pages = NULL;
1793*5113495bSYour Name 	pages->dma_pages = NULL;
1794*5113495bSYour Name 	pages->num_pages = 0;
1795*5113495bSYour Name }
1796*5113495bSYour Name 
1797*5113495bSYour Name qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1798*5113495bSYour Name 
qdf_mem_multi_pages_free_debug(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable,const char * func,uint32_t line)1799*5113495bSYour Name void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1800*5113495bSYour Name 				    struct qdf_mem_multi_page_t *pages,
1801*5113495bSYour Name 				    qdf_dma_context_t memctxt, bool cacheable,
1802*5113495bSYour Name 				    const char *func, uint32_t line)
1803*5113495bSYour Name {
1804*5113495bSYour Name 	unsigned int page_idx;
1805*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
1806*5113495bSYour Name 
1807*5113495bSYour Name 	if (!pages->page_size)
1808*5113495bSYour Name 		pages->page_size = qdf_page_size;
1809*5113495bSYour Name 
1810*5113495bSYour Name 	if (cacheable) {
1811*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1812*5113495bSYour Name 			qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1813*5113495bSYour Name 					   func, line);
1814*5113495bSYour Name 		qdf_mem_free_debug(pages->cacheable_pages, func, line);
1815*5113495bSYour Name 	} else {
1816*5113495bSYour Name 		dma_pages = pages->dma_pages;
1817*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1818*5113495bSYour Name 			qdf_mem_free_consistent_debug(
1819*5113495bSYour Name 				osdev, osdev->dev, pages->page_size,
1820*5113495bSYour Name 				dma_pages->page_v_addr_start,
1821*5113495bSYour Name 				dma_pages->page_p_addr, memctxt, func, line);
1822*5113495bSYour Name 			dma_pages++;
1823*5113495bSYour Name 		}
1824*5113495bSYour Name 		qdf_mem_free_debug(pages->dma_pages, func, line);
1825*5113495bSYour Name 	}
1826*5113495bSYour Name 
1827*5113495bSYour Name 	pages->cacheable_pages = NULL;
1828*5113495bSYour Name 	pages->dma_pages = NULL;
1829*5113495bSYour Name 	pages->num_pages = 0;
1830*5113495bSYour Name }
1831*5113495bSYour Name 
1832*5113495bSYour Name qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1833*5113495bSYour Name 
1834*5113495bSYour Name #else
qdf_mem_debug_init(void)1835*5113495bSYour Name static void qdf_mem_debug_init(void) {}
1836*5113495bSYour Name 
qdf_mem_debug_exit(void)1837*5113495bSYour Name static void qdf_mem_debug_exit(void) {}
1838*5113495bSYour Name 
qdf_mem_malloc_atomic_fl(size_t size,const char * func,uint32_t line)1839*5113495bSYour Name void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1840*5113495bSYour Name {
1841*5113495bSYour Name 	void *ptr;
1842*5113495bSYour Name 
1843*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
1844*5113495bSYour Name 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1845*5113495bSYour Name 			     line);
1846*5113495bSYour Name 		return NULL;
1847*5113495bSYour Name 	}
1848*5113495bSYour Name 
1849*5113495bSYour Name 	ptr = qdf_mem_prealloc_get(size);
1850*5113495bSYour Name 	if (ptr)
1851*5113495bSYour Name 		return ptr;
1852*5113495bSYour Name 
1853*5113495bSYour Name 	ptr = kzalloc(size, GFP_ATOMIC);
1854*5113495bSYour Name 	if (!ptr) {
1855*5113495bSYour Name 		qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1856*5113495bSYour Name 			      size, func, line);
1857*5113495bSYour Name 		return NULL;
1858*5113495bSYour Name 	}
1859*5113495bSYour Name 
1860*5113495bSYour Name 	qdf_mem_kmalloc_inc(ksize(ptr));
1861*5113495bSYour Name 
1862*5113495bSYour Name 	return ptr;
1863*5113495bSYour Name }
1864*5113495bSYour Name qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1865*5113495bSYour Name 
1866*5113495bSYour Name #ifndef ALLOC_CONTIGUOUS_MULTI_PAGE
qdf_mem_multi_pages_alloc(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable)1867*5113495bSYour Name void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1868*5113495bSYour Name 			       struct qdf_mem_multi_page_t *pages,
1869*5113495bSYour Name 			       size_t element_size, uint32_t element_num,
1870*5113495bSYour Name 			       qdf_dma_context_t memctxt, bool cacheable)
1871*5113495bSYour Name {
1872*5113495bSYour Name 	uint16_t page_idx;
1873*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
1874*5113495bSYour Name 	void **cacheable_pages = NULL;
1875*5113495bSYour Name 	uint16_t i;
1876*5113495bSYour Name 
1877*5113495bSYour Name 	if (!pages->page_size)
1878*5113495bSYour Name 		pages->page_size = qdf_page_size;
1879*5113495bSYour Name 
1880*5113495bSYour Name 	pages->num_element_per_page = pages->page_size / element_size;
1881*5113495bSYour Name 	if (!pages->num_element_per_page) {
1882*5113495bSYour Name 		qdf_print("Invalid page %d or element size %d",
1883*5113495bSYour Name 			  (int)pages->page_size, (int)element_size);
1884*5113495bSYour Name 		goto out_fail;
1885*5113495bSYour Name 	}
1886*5113495bSYour Name 
1887*5113495bSYour Name 	pages->num_pages = element_num / pages->num_element_per_page;
1888*5113495bSYour Name 	if (element_num % pages->num_element_per_page)
1889*5113495bSYour Name 		pages->num_pages++;
1890*5113495bSYour Name 
1891*5113495bSYour Name 	if (cacheable) {
1892*5113495bSYour Name 		/* Pages information storage */
1893*5113495bSYour Name 		pages->cacheable_pages = qdf_mem_malloc(
1894*5113495bSYour Name 			pages->num_pages * sizeof(pages->cacheable_pages));
1895*5113495bSYour Name 		if (!pages->cacheable_pages)
1896*5113495bSYour Name 			goto out_fail;
1897*5113495bSYour Name 
1898*5113495bSYour Name 		cacheable_pages = pages->cacheable_pages;
1899*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1900*5113495bSYour Name 			cacheable_pages[page_idx] =
1901*5113495bSYour Name 				qdf_mem_malloc(pages->page_size);
1902*5113495bSYour Name 			if (!cacheable_pages[page_idx])
1903*5113495bSYour Name 				goto page_alloc_fail;
1904*5113495bSYour Name 		}
1905*5113495bSYour Name 		pages->dma_pages = NULL;
1906*5113495bSYour Name 	} else {
1907*5113495bSYour Name 		pages->dma_pages = qdf_mem_malloc(
1908*5113495bSYour Name 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1909*5113495bSYour Name 		if (!pages->dma_pages)
1910*5113495bSYour Name 			goto out_fail;
1911*5113495bSYour Name 
1912*5113495bSYour Name 		dma_pages = pages->dma_pages;
1913*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1914*5113495bSYour Name 			dma_pages->page_v_addr_start =
1915*5113495bSYour Name 				qdf_mem_alloc_consistent(osdev, osdev->dev,
1916*5113495bSYour Name 					 pages->page_size,
1917*5113495bSYour Name 					&dma_pages->page_p_addr);
1918*5113495bSYour Name 			if (!dma_pages->page_v_addr_start) {
1919*5113495bSYour Name 				qdf_print("dmaable page alloc fail pi %d",
1920*5113495bSYour Name 					page_idx);
1921*5113495bSYour Name 				goto page_alloc_fail;
1922*5113495bSYour Name 			}
1923*5113495bSYour Name 			dma_pages->page_v_addr_end =
1924*5113495bSYour Name 				dma_pages->page_v_addr_start + pages->page_size;
1925*5113495bSYour Name 			dma_pages++;
1926*5113495bSYour Name 		}
1927*5113495bSYour Name 		pages->cacheable_pages = NULL;
1928*5113495bSYour Name 	}
1929*5113495bSYour Name 	return;
1930*5113495bSYour Name 
1931*5113495bSYour Name page_alloc_fail:
1932*5113495bSYour Name 	if (cacheable) {
1933*5113495bSYour Name 		for (i = 0; i < page_idx; i++)
1934*5113495bSYour Name 			qdf_mem_free(pages->cacheable_pages[i]);
1935*5113495bSYour Name 		qdf_mem_free(pages->cacheable_pages);
1936*5113495bSYour Name 	} else {
1937*5113495bSYour Name 		dma_pages = pages->dma_pages;
1938*5113495bSYour Name 		for (i = 0; i < page_idx; i++) {
1939*5113495bSYour Name 			qdf_mem_free_consistent(
1940*5113495bSYour Name 				osdev, osdev->dev, pages->page_size,
1941*5113495bSYour Name 				dma_pages->page_v_addr_start,
1942*5113495bSYour Name 				dma_pages->page_p_addr, memctxt);
1943*5113495bSYour Name 			dma_pages++;
1944*5113495bSYour Name 		}
1945*5113495bSYour Name 		qdf_mem_free(pages->dma_pages);
1946*5113495bSYour Name 	}
1947*5113495bSYour Name 
1948*5113495bSYour Name out_fail:
1949*5113495bSYour Name 	pages->cacheable_pages = NULL;
1950*5113495bSYour Name 	pages->dma_pages = NULL;
1951*5113495bSYour Name 	pages->num_pages = 0;
1952*5113495bSYour Name 	return;
1953*5113495bSYour Name }
1954*5113495bSYour Name #else
qdf_mem_multi_pages_alloc(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable)1955*5113495bSYour Name void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1956*5113495bSYour Name 			       struct qdf_mem_multi_page_t *pages,
1957*5113495bSYour Name 			       size_t element_size, uint32_t element_num,
1958*5113495bSYour Name 			       qdf_dma_context_t memctxt, bool cacheable)
1959*5113495bSYour Name {
1960*5113495bSYour Name 	uint16_t page_idx;
1961*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
1962*5113495bSYour Name 	void **cacheable_pages = NULL;
1963*5113495bSYour Name 	uint16_t i;
1964*5113495bSYour Name 	struct qdf_mem_dma_page_t temp_dma_pages;
1965*5113495bSYour Name 	struct qdf_mem_dma_page_t *total_dma_pages = &temp_dma_pages;
1966*5113495bSYour Name 	qdf_size_t total_size = 0;
1967*5113495bSYour Name 
1968*5113495bSYour Name 	pages->contiguous_dma_pages = false;
1969*5113495bSYour Name 
1970*5113495bSYour Name 	if (!pages->page_size)
1971*5113495bSYour Name 		pages->page_size = qdf_page_size;
1972*5113495bSYour Name 
1973*5113495bSYour Name 	pages->num_element_per_page = pages->page_size / element_size;
1974*5113495bSYour Name 	if (!pages->num_element_per_page) {
1975*5113495bSYour Name 		qdf_print("Invalid page %d or element size %d",
1976*5113495bSYour Name 			  (int)pages->page_size, (int)element_size);
1977*5113495bSYour Name 		goto out_fail;
1978*5113495bSYour Name 	}
1979*5113495bSYour Name 
1980*5113495bSYour Name 	pages->num_pages = element_num / pages->num_element_per_page;
1981*5113495bSYour Name 	if (element_num % pages->num_element_per_page)
1982*5113495bSYour Name 		pages->num_pages++;
1983*5113495bSYour Name 
1984*5113495bSYour Name 	if (cacheable) {
1985*5113495bSYour Name 		/* Pages information storage */
1986*5113495bSYour Name 		pages->cacheable_pages = qdf_mem_malloc(
1987*5113495bSYour Name 			pages->num_pages * sizeof(pages->cacheable_pages));
1988*5113495bSYour Name 		if (!pages->cacheable_pages)
1989*5113495bSYour Name 			goto out_fail;
1990*5113495bSYour Name 
1991*5113495bSYour Name 		cacheable_pages = pages->cacheable_pages;
1992*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1993*5113495bSYour Name 			cacheable_pages[page_idx] =
1994*5113495bSYour Name 				qdf_mem_malloc(pages->page_size);
1995*5113495bSYour Name 			if (!cacheable_pages[page_idx])
1996*5113495bSYour Name 				goto page_alloc_fail;
1997*5113495bSYour Name 		}
1998*5113495bSYour Name 		pages->dma_pages = NULL;
1999*5113495bSYour Name 	} else {
2000*5113495bSYour Name 		pages->dma_pages = qdf_mem_malloc(
2001*5113495bSYour Name 			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
2002*5113495bSYour Name 		if (!pages->dma_pages)
2003*5113495bSYour Name 			goto out_fail;
2004*5113495bSYour Name 
2005*5113495bSYour Name 		dma_pages = pages->dma_pages;
2006*5113495bSYour Name 		total_size = pages->page_size * pages->num_pages;
2007*5113495bSYour Name 		total_dma_pages->page_v_addr_start =
2008*5113495bSYour Name 			qdf_mem_alloc_consistent(osdev, osdev->dev,
2009*5113495bSYour Name 						 total_size,
2010*5113495bSYour Name 						 &total_dma_pages->page_p_addr);
2011*5113495bSYour Name 		total_dma_pages->page_v_addr_end =
2012*5113495bSYour Name 			total_dma_pages->page_v_addr_start + total_size;
2013*5113495bSYour Name 		if (!total_dma_pages->page_v_addr_start) {
2014*5113495bSYour Name 			qdf_print("mem allocate fail, total_size: %zu",
2015*5113495bSYour Name 				  total_size);
2016*5113495bSYour Name 			goto page_alloc_default;
2017*5113495bSYour Name 		}
2018*5113495bSYour Name 
2019*5113495bSYour Name 		pages->contiguous_dma_pages = true;
2020*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2021*5113495bSYour Name 			dma_pages->page_v_addr_start =
2022*5113495bSYour Name 				total_dma_pages->page_v_addr_start +
2023*5113495bSYour Name 				(pages->page_size * page_idx);
2024*5113495bSYour Name 			dma_pages->page_p_addr =
2025*5113495bSYour Name 				total_dma_pages->page_p_addr +
2026*5113495bSYour Name 				(pages->page_size * page_idx);
2027*5113495bSYour Name 			dma_pages->page_v_addr_end =
2028*5113495bSYour Name 				dma_pages->page_v_addr_start + pages->page_size;
2029*5113495bSYour Name 			dma_pages++;
2030*5113495bSYour Name 		}
2031*5113495bSYour Name 		pages->cacheable_pages = NULL;
2032*5113495bSYour Name 		return;
2033*5113495bSYour Name 
2034*5113495bSYour Name page_alloc_default:
2035*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2036*5113495bSYour Name 			dma_pages->page_v_addr_start =
2037*5113495bSYour Name 				qdf_mem_alloc_consistent(osdev, osdev->dev,
2038*5113495bSYour Name 							 pages->page_size,
2039*5113495bSYour Name 						&dma_pages->page_p_addr);
2040*5113495bSYour Name 			if (!dma_pages->page_v_addr_start) {
2041*5113495bSYour Name 				qdf_print("dmaable page alloc fail pi %d",
2042*5113495bSYour Name 					  page_idx);
2043*5113495bSYour Name 				goto page_alloc_fail;
2044*5113495bSYour Name 			}
2045*5113495bSYour Name 			dma_pages->page_v_addr_end =
2046*5113495bSYour Name 				dma_pages->page_v_addr_start + pages->page_size;
2047*5113495bSYour Name 			dma_pages++;
2048*5113495bSYour Name 		}
2049*5113495bSYour Name 		pages->cacheable_pages = NULL;
2050*5113495bSYour Name 	}
2051*5113495bSYour Name 	return;
2052*5113495bSYour Name 
2053*5113495bSYour Name page_alloc_fail:
2054*5113495bSYour Name 	if (cacheable) {
2055*5113495bSYour Name 		for (i = 0; i < page_idx; i++)
2056*5113495bSYour Name 			qdf_mem_free(pages->cacheable_pages[i]);
2057*5113495bSYour Name 		qdf_mem_free(pages->cacheable_pages);
2058*5113495bSYour Name 	} else {
2059*5113495bSYour Name 		dma_pages = pages->dma_pages;
2060*5113495bSYour Name 		for (i = 0; i < page_idx; i++) {
2061*5113495bSYour Name 			qdf_mem_free_consistent(
2062*5113495bSYour Name 				osdev, osdev->dev, pages->page_size,
2063*5113495bSYour Name 				dma_pages->page_v_addr_start,
2064*5113495bSYour Name 				dma_pages->page_p_addr, memctxt);
2065*5113495bSYour Name 			dma_pages++;
2066*5113495bSYour Name 		}
2067*5113495bSYour Name 		qdf_mem_free(pages->dma_pages);
2068*5113495bSYour Name 	}
2069*5113495bSYour Name 
2070*5113495bSYour Name out_fail:
2071*5113495bSYour Name 	pages->cacheable_pages = NULL;
2072*5113495bSYour Name 	pages->dma_pages = NULL;
2073*5113495bSYour Name 	pages->num_pages = 0;
2074*5113495bSYour Name }
2075*5113495bSYour Name #endif
2076*5113495bSYour Name qdf_export_symbol(qdf_mem_multi_pages_alloc);
2077*5113495bSYour Name 
2078*5113495bSYour Name #ifndef ALLOC_CONTIGUOUS_MULTI_PAGE
qdf_mem_multi_pages_free(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable)2079*5113495bSYour Name void qdf_mem_multi_pages_free(qdf_device_t osdev,
2080*5113495bSYour Name 			      struct qdf_mem_multi_page_t *pages,
2081*5113495bSYour Name 			      qdf_dma_context_t memctxt, bool cacheable)
2082*5113495bSYour Name {
2083*5113495bSYour Name 	unsigned int page_idx;
2084*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
2085*5113495bSYour Name 
2086*5113495bSYour Name 	if (!pages->page_size)
2087*5113495bSYour Name 		pages->page_size = qdf_page_size;
2088*5113495bSYour Name 
2089*5113495bSYour Name 	if (cacheable) {
2090*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2091*5113495bSYour Name 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2092*5113495bSYour Name 		qdf_mem_free(pages->cacheable_pages);
2093*5113495bSYour Name 	} else {
2094*5113495bSYour Name 		dma_pages = pages->dma_pages;
2095*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2096*5113495bSYour Name 			qdf_mem_free_consistent(
2097*5113495bSYour Name 				osdev, osdev->dev, pages->page_size,
2098*5113495bSYour Name 				dma_pages->page_v_addr_start,
2099*5113495bSYour Name 				dma_pages->page_p_addr, memctxt);
2100*5113495bSYour Name 			dma_pages++;
2101*5113495bSYour Name 		}
2102*5113495bSYour Name 		qdf_mem_free(pages->dma_pages);
2103*5113495bSYour Name 	}
2104*5113495bSYour Name 
2105*5113495bSYour Name 	pages->cacheable_pages = NULL;
2106*5113495bSYour Name 	pages->dma_pages = NULL;
2107*5113495bSYour Name 	pages->num_pages = 0;
2108*5113495bSYour Name 	return;
2109*5113495bSYour Name }
2110*5113495bSYour Name #else
qdf_mem_multi_pages_free(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable)2111*5113495bSYour Name void qdf_mem_multi_pages_free(qdf_device_t osdev,
2112*5113495bSYour Name 			      struct qdf_mem_multi_page_t *pages,
2113*5113495bSYour Name 			      qdf_dma_context_t memctxt, bool cacheable)
2114*5113495bSYour Name {
2115*5113495bSYour Name 	unsigned int page_idx;
2116*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
2117*5113495bSYour Name 	qdf_size_t total_size = 0;
2118*5113495bSYour Name 
2119*5113495bSYour Name 	if (!pages->page_size)
2120*5113495bSYour Name 		pages->page_size = qdf_page_size;
2121*5113495bSYour Name 
2122*5113495bSYour Name 	if (cacheable) {
2123*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2124*5113495bSYour Name 			qdf_mem_free(pages->cacheable_pages[page_idx]);
2125*5113495bSYour Name 		qdf_mem_free(pages->cacheable_pages);
2126*5113495bSYour Name 	} else {
2127*5113495bSYour Name 		dma_pages = pages->dma_pages;
2128*5113495bSYour Name 		total_size = pages->page_size * pages->num_pages;
2129*5113495bSYour Name 		if (pages->contiguous_dma_pages) {
2130*5113495bSYour Name 			qdf_mem_free_consistent(
2131*5113495bSYour Name 				osdev, osdev->dev, total_size,
2132*5113495bSYour Name 				dma_pages->page_v_addr_start,
2133*5113495bSYour Name 				dma_pages->page_p_addr, memctxt);
2134*5113495bSYour Name 			goto pages_free_default;
2135*5113495bSYour Name 		}
2136*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2137*5113495bSYour Name 			qdf_mem_free_consistent(
2138*5113495bSYour Name 				osdev, osdev->dev, pages->page_size,
2139*5113495bSYour Name 				dma_pages->page_v_addr_start,
2140*5113495bSYour Name 				dma_pages->page_p_addr, memctxt);
2141*5113495bSYour Name 			dma_pages++;
2142*5113495bSYour Name 		}
2143*5113495bSYour Name pages_free_default:
2144*5113495bSYour Name 		qdf_mem_free(pages->dma_pages);
2145*5113495bSYour Name 	}
2146*5113495bSYour Name 
2147*5113495bSYour Name 	pages->cacheable_pages = NULL;
2148*5113495bSYour Name 	pages->dma_pages = NULL;
2149*5113495bSYour Name 	pages->num_pages = 0;
2150*5113495bSYour Name }
2151*5113495bSYour Name #endif
2152*5113495bSYour Name qdf_export_symbol(qdf_mem_multi_pages_free);
2153*5113495bSYour Name #endif
2154*5113495bSYour Name 
qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t * pages,bool cacheable)2155*5113495bSYour Name void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2156*5113495bSYour Name 			      bool cacheable)
2157*5113495bSYour Name {
2158*5113495bSYour Name 	unsigned int page_idx;
2159*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_pages;
2160*5113495bSYour Name 
2161*5113495bSYour Name 	if (!pages->page_size)
2162*5113495bSYour Name 		pages->page_size = qdf_page_size;
2163*5113495bSYour Name 
2164*5113495bSYour Name 	if (cacheable) {
2165*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2166*5113495bSYour Name 			qdf_mem_zero(pages->cacheable_pages[page_idx],
2167*5113495bSYour Name 				     pages->page_size);
2168*5113495bSYour Name 	} else {
2169*5113495bSYour Name 		dma_pages = pages->dma_pages;
2170*5113495bSYour Name 		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2171*5113495bSYour Name 			qdf_mem_zero(dma_pages->page_v_addr_start,
2172*5113495bSYour Name 				     pages->page_size);
2173*5113495bSYour Name 			dma_pages++;
2174*5113495bSYour Name 		}
2175*5113495bSYour Name 	}
2176*5113495bSYour Name }
2177*5113495bSYour Name 
2178*5113495bSYour Name qdf_export_symbol(qdf_mem_multi_pages_zero);
2179*5113495bSYour Name 
__qdf_mem_free(void * ptr)2180*5113495bSYour Name void __qdf_mem_free(void *ptr)
2181*5113495bSYour Name {
2182*5113495bSYour Name 	if (!ptr)
2183*5113495bSYour Name 		return;
2184*5113495bSYour Name 
2185*5113495bSYour Name 	if (qdf_might_be_prealloc(ptr)) {
2186*5113495bSYour Name 		if (qdf_mem_prealloc_put(ptr))
2187*5113495bSYour Name 			return;
2188*5113495bSYour Name 	}
2189*5113495bSYour Name 
2190*5113495bSYour Name 	qdf_mem_kmalloc_dec(ksize(ptr));
2191*5113495bSYour Name 
2192*5113495bSYour Name 	kfree(ptr);
2193*5113495bSYour Name }
2194*5113495bSYour Name 
2195*5113495bSYour Name qdf_export_symbol(__qdf_mem_free);
2196*5113495bSYour Name 
__qdf_mem_malloc(size_t size,const char * func,uint32_t line)2197*5113495bSYour Name void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2198*5113495bSYour Name {
2199*5113495bSYour Name 	void *ptr;
2200*5113495bSYour Name 
2201*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2202*5113495bSYour Name 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2203*5113495bSYour Name 			     line);
2204*5113495bSYour Name 		return NULL;
2205*5113495bSYour Name 	}
2206*5113495bSYour Name 
2207*5113495bSYour Name 	ptr = qdf_mem_prealloc_get(size);
2208*5113495bSYour Name 	if (ptr)
2209*5113495bSYour Name 		return ptr;
2210*5113495bSYour Name 
2211*5113495bSYour Name 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2212*5113495bSYour Name 	if (!ptr)
2213*5113495bSYour Name 		return NULL;
2214*5113495bSYour Name 
2215*5113495bSYour Name 	qdf_mem_kmalloc_inc(ksize(ptr));
2216*5113495bSYour Name 
2217*5113495bSYour Name 	return ptr;
2218*5113495bSYour Name }
2219*5113495bSYour Name 
2220*5113495bSYour Name qdf_export_symbol(__qdf_mem_malloc);
2221*5113495bSYour Name 
2222*5113495bSYour Name #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
__qdf_untracked_mem_free(void * ptr)2223*5113495bSYour Name void __qdf_untracked_mem_free(void *ptr)
2224*5113495bSYour Name {
2225*5113495bSYour Name 	if (!ptr)
2226*5113495bSYour Name 		return;
2227*5113495bSYour Name 
2228*5113495bSYour Name 	kfree(ptr);
2229*5113495bSYour Name }
2230*5113495bSYour Name 
__qdf_untracked_mem_malloc(size_t size,const char * func,uint32_t line)2231*5113495bSYour Name void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2232*5113495bSYour Name {
2233*5113495bSYour Name 	void *ptr;
2234*5113495bSYour Name 
2235*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2236*5113495bSYour Name 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2237*5113495bSYour Name 			     line);
2238*5113495bSYour Name 		return NULL;
2239*5113495bSYour Name 	}
2240*5113495bSYour Name 
2241*5113495bSYour Name 	ptr = kzalloc(size, qdf_mem_malloc_flags());
2242*5113495bSYour Name 	if (!ptr)
2243*5113495bSYour Name 		return NULL;
2244*5113495bSYour Name 
2245*5113495bSYour Name 	return ptr;
2246*5113495bSYour Name }
2247*5113495bSYour Name #endif
2248*5113495bSYour Name 
qdf_aligned_malloc_fl(uint32_t * size,void ** vaddr_unaligned,qdf_dma_addr_t * paddr_unaligned,qdf_dma_addr_t * paddr_aligned,uint32_t align,const char * func,uint32_t line)2249*5113495bSYour Name void *qdf_aligned_malloc_fl(uint32_t *size,
2250*5113495bSYour Name 			    void **vaddr_unaligned,
2251*5113495bSYour Name 				qdf_dma_addr_t *paddr_unaligned,
2252*5113495bSYour Name 				qdf_dma_addr_t *paddr_aligned,
2253*5113495bSYour Name 				uint32_t align,
2254*5113495bSYour Name 			    const char *func, uint32_t line)
2255*5113495bSYour Name {
2256*5113495bSYour Name 	void *vaddr_aligned;
2257*5113495bSYour Name 	uint32_t align_alloc_size;
2258*5113495bSYour Name 
2259*5113495bSYour Name 	*vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2260*5113495bSYour Name 			line);
2261*5113495bSYour Name 	if (!*vaddr_unaligned) {
2262*5113495bSYour Name 		qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2263*5113495bSYour Name 		return NULL;
2264*5113495bSYour Name 	}
2265*5113495bSYour Name 
2266*5113495bSYour Name 	*paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2267*5113495bSYour Name 
2268*5113495bSYour Name 	/* Re-allocate additional bytes to align base address only if
2269*5113495bSYour Name 	 * above allocation returns unaligned address. Reason for
2270*5113495bSYour Name 	 * trying exact size allocation above is, OS tries to allocate
2271*5113495bSYour Name 	 * blocks of size power-of-2 pages and then free extra pages.
2272*5113495bSYour Name 	 * e.g., of a ring size of 1MB, the allocation below will
2273*5113495bSYour Name 	 * request 1MB plus 7 bytes for alignment, which will cause a
2274*5113495bSYour Name 	 * 2MB block allocation,and that is failing sometimes due to
2275*5113495bSYour Name 	 * memory fragmentation.
2276*5113495bSYour Name 	 */
2277*5113495bSYour Name 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2278*5113495bSYour Name 		align_alloc_size = *size + align - 1;
2279*5113495bSYour Name 
2280*5113495bSYour Name 		qdf_mem_free(*vaddr_unaligned);
2281*5113495bSYour Name 		*vaddr_unaligned = qdf_mem_malloc_fl(
2282*5113495bSYour Name 				(qdf_size_t)align_alloc_size, func, line);
2283*5113495bSYour Name 		if (!*vaddr_unaligned) {
2284*5113495bSYour Name 			qdf_warn("Failed to alloc %uB @ %s:%d",
2285*5113495bSYour Name 				 align_alloc_size, func, line);
2286*5113495bSYour Name 			return NULL;
2287*5113495bSYour Name 		}
2288*5113495bSYour Name 
2289*5113495bSYour Name 		*paddr_unaligned = qdf_mem_virt_to_phys(
2290*5113495bSYour Name 				*vaddr_unaligned);
2291*5113495bSYour Name 		*size = align_alloc_size;
2292*5113495bSYour Name 	}
2293*5113495bSYour Name 
2294*5113495bSYour Name 	*paddr_aligned = (qdf_dma_addr_t)qdf_align
2295*5113495bSYour Name 		((unsigned long)(*paddr_unaligned), align);
2296*5113495bSYour Name 
2297*5113495bSYour Name 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2298*5113495bSYour Name 			((unsigned long)(*paddr_aligned) -
2299*5113495bSYour Name 			 (unsigned long)(*paddr_unaligned)));
2300*5113495bSYour Name 
2301*5113495bSYour Name 	return vaddr_aligned;
2302*5113495bSYour Name }
2303*5113495bSYour Name 
2304*5113495bSYour Name qdf_export_symbol(qdf_aligned_malloc_fl);
2305*5113495bSYour Name 
2306*5113495bSYour Name #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
qdf_tx_desc_pool_free_bufs(void * ctxt,struct qdf_mem_multi_page_t * pages,uint32_t elem_size,uint32_t elem_count,uint8_t cacheable,qdf_mem_release_cb cb,void * elem_list)2307*5113495bSYour Name int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2308*5113495bSYour Name 			       uint32_t elem_size, uint32_t elem_count,
2309*5113495bSYour Name 			       uint8_t cacheable, qdf_mem_release_cb cb,
2310*5113495bSYour Name 			       void *elem_list)
2311*5113495bSYour Name {
2312*5113495bSYour Name 	uint16_t i, i_int;
2313*5113495bSYour Name 	void *page_info;
2314*5113495bSYour Name 	void *elem;
2315*5113495bSYour Name 	uint32_t num_elem = 0;
2316*5113495bSYour Name 
2317*5113495bSYour Name 	for (i = 0; i < pages->num_pages; i++) {
2318*5113495bSYour Name 		if (cacheable)
2319*5113495bSYour Name 			page_info = pages->cacheable_pages[i];
2320*5113495bSYour Name 		else
2321*5113495bSYour Name 			page_info = pages->dma_pages[i].page_v_addr_start;
2322*5113495bSYour Name 
2323*5113495bSYour Name 		if (!page_info)
2324*5113495bSYour Name 			return -ENOMEM;
2325*5113495bSYour Name 
2326*5113495bSYour Name 		elem = page_info;
2327*5113495bSYour Name 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2328*5113495bSYour Name 			cb(ctxt, elem, elem_list);
2329*5113495bSYour Name 			elem = ((char *)elem + elem_size);
2330*5113495bSYour Name 			num_elem++;
2331*5113495bSYour Name 
2332*5113495bSYour Name 			/* Number of desc pool elements reached */
2333*5113495bSYour Name 			if (num_elem == (elem_count - 1))
2334*5113495bSYour Name 				break;
2335*5113495bSYour Name 		}
2336*5113495bSYour Name 	}
2337*5113495bSYour Name 
2338*5113495bSYour Name 	return 0;
2339*5113495bSYour Name }
2340*5113495bSYour Name 
2341*5113495bSYour Name qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2342*5113495bSYour Name #endif
2343*5113495bSYour Name 
qdf_mem_multi_page_link(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,uint32_t elem_size,uint32_t elem_count,uint8_t cacheable)2344*5113495bSYour Name int qdf_mem_multi_page_link(qdf_device_t osdev,
2345*5113495bSYour Name 			    struct qdf_mem_multi_page_t *pages,
2346*5113495bSYour Name 			    uint32_t elem_size, uint32_t elem_count,
2347*5113495bSYour Name 			    uint8_t cacheable)
2348*5113495bSYour Name {
2349*5113495bSYour Name 	uint16_t i, i_int;
2350*5113495bSYour Name 	void *page_info;
2351*5113495bSYour Name 	void **c_elem = NULL;
2352*5113495bSYour Name 	uint32_t num_link = 0;
2353*5113495bSYour Name 
2354*5113495bSYour Name 	for (i = 0; i < pages->num_pages; i++) {
2355*5113495bSYour Name 		if (cacheable)
2356*5113495bSYour Name 			page_info = pages->cacheable_pages[i];
2357*5113495bSYour Name 		else
2358*5113495bSYour Name 			page_info = pages->dma_pages[i].page_v_addr_start;
2359*5113495bSYour Name 
2360*5113495bSYour Name 		if (!page_info)
2361*5113495bSYour Name 			return -ENOMEM;
2362*5113495bSYour Name 
2363*5113495bSYour Name 		c_elem = (void **)page_info;
2364*5113495bSYour Name 		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2365*5113495bSYour Name 			if (i_int == (pages->num_element_per_page - 1)) {
2366*5113495bSYour Name 				if ((i + 1) == pages->num_pages)
2367*5113495bSYour Name 					break;
2368*5113495bSYour Name 				if (cacheable)
2369*5113495bSYour Name 					*c_elem = pages->
2370*5113495bSYour Name 						cacheable_pages[i + 1];
2371*5113495bSYour Name 				else
2372*5113495bSYour Name 					*c_elem = pages->
2373*5113495bSYour Name 						dma_pages[i + 1].
2374*5113495bSYour Name 							page_v_addr_start;
2375*5113495bSYour Name 				num_link++;
2376*5113495bSYour Name 				break;
2377*5113495bSYour Name 			} else {
2378*5113495bSYour Name 				*c_elem =
2379*5113495bSYour Name 					(void *)(((char *)c_elem) + elem_size);
2380*5113495bSYour Name 			}
2381*5113495bSYour Name 			num_link++;
2382*5113495bSYour Name 			c_elem = (void **)*c_elem;
2383*5113495bSYour Name 
2384*5113495bSYour Name 			/* Last link established exit */
2385*5113495bSYour Name 			if (num_link == (elem_count - 1))
2386*5113495bSYour Name 				break;
2387*5113495bSYour Name 		}
2388*5113495bSYour Name 	}
2389*5113495bSYour Name 
2390*5113495bSYour Name 	if (c_elem)
2391*5113495bSYour Name 		*c_elem = NULL;
2392*5113495bSYour Name 
2393*5113495bSYour Name 	return 0;
2394*5113495bSYour Name }
2395*5113495bSYour Name qdf_export_symbol(qdf_mem_multi_page_link);
2396*5113495bSYour Name 
qdf_mem_copy(void * dst_addr,const void * src_addr,uint32_t num_bytes)2397*5113495bSYour Name void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2398*5113495bSYour Name {
2399*5113495bSYour Name 	/* special case where dst_addr or src_addr can be NULL */
2400*5113495bSYour Name 	if (!num_bytes)
2401*5113495bSYour Name 		return;
2402*5113495bSYour Name 
2403*5113495bSYour Name 	QDF_BUG(dst_addr);
2404*5113495bSYour Name 	QDF_BUG(src_addr);
2405*5113495bSYour Name 	if (!dst_addr || !src_addr)
2406*5113495bSYour Name 		return;
2407*5113495bSYour Name 
2408*5113495bSYour Name 	memcpy(dst_addr, src_addr, num_bytes);
2409*5113495bSYour Name }
2410*5113495bSYour Name qdf_export_symbol(qdf_mem_copy);
2411*5113495bSYour Name 
qdf_mem_shared_mem_alloc(qdf_device_t osdev,uint32_t size)2412*5113495bSYour Name qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2413*5113495bSYour Name {
2414*5113495bSYour Name 	qdf_shared_mem_t *shared_mem;
2415*5113495bSYour Name 	qdf_dma_addr_t dma_addr, paddr;
2416*5113495bSYour Name 	int ret;
2417*5113495bSYour Name 
2418*5113495bSYour Name 	shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2419*5113495bSYour Name 	if (!shared_mem)
2420*5113495bSYour Name 		return NULL;
2421*5113495bSYour Name 
2422*5113495bSYour Name 	shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2423*5113495bSYour Name 				size, qdf_mem_get_dma_addr_ptr(osdev,
2424*5113495bSYour Name 						&shared_mem->mem_info));
2425*5113495bSYour Name 	if (!shared_mem->vaddr) {
2426*5113495bSYour Name 		qdf_err("Unable to allocate DMA memory for shared resource");
2427*5113495bSYour Name 		qdf_mem_free(shared_mem);
2428*5113495bSYour Name 		return NULL;
2429*5113495bSYour Name 	}
2430*5113495bSYour Name 
2431*5113495bSYour Name 	qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2432*5113495bSYour Name 	size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2433*5113495bSYour Name 
2434*5113495bSYour Name 	qdf_mem_zero(shared_mem->vaddr, size);
2435*5113495bSYour Name 	dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2436*5113495bSYour Name 	paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2437*5113495bSYour Name 
2438*5113495bSYour Name 	qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2439*5113495bSYour Name 	ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2440*5113495bSYour Name 				      shared_mem->vaddr, dma_addr, size);
2441*5113495bSYour Name 	if (ret) {
2442*5113495bSYour Name 		qdf_err("Unable to get DMA sgtable");
2443*5113495bSYour Name 		qdf_mem_free_consistent(osdev, osdev->dev,
2444*5113495bSYour Name 					shared_mem->mem_info.size,
2445*5113495bSYour Name 					shared_mem->vaddr,
2446*5113495bSYour Name 					dma_addr,
2447*5113495bSYour Name 					qdf_get_dma_mem_context(shared_mem,
2448*5113495bSYour Name 								memctx));
2449*5113495bSYour Name 		qdf_mem_free(shared_mem);
2450*5113495bSYour Name 		return NULL;
2451*5113495bSYour Name 	}
2452*5113495bSYour Name 
2453*5113495bSYour Name 	qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2454*5113495bSYour Name 
2455*5113495bSYour Name 	return shared_mem;
2456*5113495bSYour Name }
2457*5113495bSYour Name 
2458*5113495bSYour Name qdf_export_symbol(qdf_mem_shared_mem_alloc);
2459*5113495bSYour Name 
qdf_mem_copy_toio(void * dst_addr,const void * src_addr,uint32_t num_bytes)2460*5113495bSYour Name void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2461*5113495bSYour Name {
2462*5113495bSYour Name 	if (0 == num_bytes) {
2463*5113495bSYour Name 		/* special case where dst_addr or src_addr can be NULL */
2464*5113495bSYour Name 		return;
2465*5113495bSYour Name 	}
2466*5113495bSYour Name 
2467*5113495bSYour Name 	if ((!dst_addr) || (!src_addr)) {
2468*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2469*5113495bSYour Name 			  "%s called with NULL parameter, source:%pK destination:%pK",
2470*5113495bSYour Name 			  __func__, src_addr, dst_addr);
2471*5113495bSYour Name 		QDF_ASSERT(0);
2472*5113495bSYour Name 		return;
2473*5113495bSYour Name 	}
2474*5113495bSYour Name 	memcpy_toio(dst_addr, src_addr, num_bytes);
2475*5113495bSYour Name }
2476*5113495bSYour Name 
2477*5113495bSYour Name qdf_export_symbol(qdf_mem_copy_toio);
2478*5113495bSYour Name 
qdf_mem_set_io(void * ptr,uint32_t num_bytes,uint32_t value)2479*5113495bSYour Name void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2480*5113495bSYour Name {
2481*5113495bSYour Name 	if (!ptr) {
2482*5113495bSYour Name 		qdf_print("%s called with NULL parameter ptr", __func__);
2483*5113495bSYour Name 		return;
2484*5113495bSYour Name 	}
2485*5113495bSYour Name 	memset_io(ptr, value, num_bytes);
2486*5113495bSYour Name }
2487*5113495bSYour Name 
2488*5113495bSYour Name qdf_export_symbol(qdf_mem_set_io);
2489*5113495bSYour Name 
qdf_mem_set(void * ptr,uint32_t num_bytes,uint32_t value)2490*5113495bSYour Name void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2491*5113495bSYour Name {
2492*5113495bSYour Name 	QDF_BUG(ptr);
2493*5113495bSYour Name 	if (!ptr)
2494*5113495bSYour Name 		return;
2495*5113495bSYour Name 
2496*5113495bSYour Name 	memset(ptr, value, num_bytes);
2497*5113495bSYour Name }
2498*5113495bSYour Name qdf_export_symbol(qdf_mem_set);
2499*5113495bSYour Name 
qdf_mem_move(void * dst_addr,const void * src_addr,uint32_t num_bytes)2500*5113495bSYour Name void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2501*5113495bSYour Name {
2502*5113495bSYour Name 	/* special case where dst_addr or src_addr can be NULL */
2503*5113495bSYour Name 	if (!num_bytes)
2504*5113495bSYour Name 		return;
2505*5113495bSYour Name 
2506*5113495bSYour Name 	QDF_BUG(dst_addr);
2507*5113495bSYour Name 	QDF_BUG(src_addr);
2508*5113495bSYour Name 	if (!dst_addr || !src_addr)
2509*5113495bSYour Name 		return;
2510*5113495bSYour Name 
2511*5113495bSYour Name 	memmove(dst_addr, src_addr, num_bytes);
2512*5113495bSYour Name }
2513*5113495bSYour Name qdf_export_symbol(qdf_mem_move);
2514*5113495bSYour Name 
qdf_mem_cmp(const void * left,const void * right,size_t size)2515*5113495bSYour Name int qdf_mem_cmp(const void *left, const void *right, size_t size)
2516*5113495bSYour Name {
2517*5113495bSYour Name 	QDF_BUG(left);
2518*5113495bSYour Name 	QDF_BUG(right);
2519*5113495bSYour Name 
2520*5113495bSYour Name 	return memcmp(left, right, size);
2521*5113495bSYour Name }
2522*5113495bSYour Name qdf_export_symbol(qdf_mem_cmp);
2523*5113495bSYour Name 
2524*5113495bSYour Name #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2525*5113495bSYour Name /**
2526*5113495bSYour Name  * qdf_mem_dma_alloc() - allocates memory for dma
2527*5113495bSYour Name  * @osdev: OS device handle
2528*5113495bSYour Name  * @dev: Pointer to device handle
2529*5113495bSYour Name  * @size: Size to be allocated
2530*5113495bSYour Name  * @phy_addr: Physical address
2531*5113495bSYour Name  *
2532*5113495bSYour Name  * Return: pointer of allocated memory or null if memory alloc fails
2533*5113495bSYour Name  */
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * phy_addr)2534*5113495bSYour Name static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2535*5113495bSYour Name 				      qdf_size_t size,
2536*5113495bSYour Name 				      qdf_dma_addr_t *phy_addr)
2537*5113495bSYour Name {
2538*5113495bSYour Name 	void *vaddr;
2539*5113495bSYour Name 
2540*5113495bSYour Name 	vaddr = qdf_mem_malloc(size);
2541*5113495bSYour Name 	*phy_addr = ((uintptr_t) vaddr);
2542*5113495bSYour Name 	/* using this type conversion to suppress "cast from pointer to integer
2543*5113495bSYour Name 	 * of different size" warning on some platforms
2544*5113495bSYour Name 	 */
2545*5113495bSYour Name 	BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2546*5113495bSYour Name 	return vaddr;
2547*5113495bSYour Name }
2548*5113495bSYour Name 
2549*5113495bSYour Name #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2550*5113495bSYour Name 	!defined(QCA_WIFI_QCN9000)
2551*5113495bSYour Name 
2552*5113495bSYour Name #define QCA8074_RAM_BASE 0x50000000
2553*5113495bSYour Name #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * phy_addr)2554*5113495bSYour Name void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2555*5113495bSYour Name 			qdf_dma_addr_t *phy_addr)
2556*5113495bSYour Name {
2557*5113495bSYour Name 	void *vaddr = NULL;
2558*5113495bSYour Name 	int i;
2559*5113495bSYour Name 
2560*5113495bSYour Name 	*phy_addr = 0;
2561*5113495bSYour Name 
2562*5113495bSYour Name 	for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2563*5113495bSYour Name 		vaddr = dma_alloc_coherent(dev, size, phy_addr,
2564*5113495bSYour Name 					   qdf_mem_malloc_flags());
2565*5113495bSYour Name 
2566*5113495bSYour Name 		if (!vaddr) {
2567*5113495bSYour Name 			qdf_err("%s failed , size: %zu!", __func__, size);
2568*5113495bSYour Name 			return NULL;
2569*5113495bSYour Name 		}
2570*5113495bSYour Name 
2571*5113495bSYour Name 		if (*phy_addr >= QCA8074_RAM_BASE)
2572*5113495bSYour Name 			return vaddr;
2573*5113495bSYour Name 
2574*5113495bSYour Name 		dma_free_coherent(dev, size, vaddr, *phy_addr);
2575*5113495bSYour Name 	}
2576*5113495bSYour Name 
2577*5113495bSYour Name 	return NULL;
2578*5113495bSYour Name }
2579*5113495bSYour Name #elif defined(QCA_DMA_PADDR_CHECK)
2580*5113495bSYour Name #ifdef CONFIG_LEAK_DETECTION
2581*5113495bSYour Name #define MAX_DEBUG_DOMAIN_COUNT QDF_DEBUG_DOMAIN_COUNT
2582*5113495bSYour Name #define debug_domain_get() qdf_debug_domain_get()
2583*5113495bSYour Name #else
2584*5113495bSYour Name #define MAX_DEBUG_DOMAIN_COUNT 1
2585*5113495bSYour Name #define debug_domain_get() DEFAULT_DEBUG_DOMAIN_INIT
2586*5113495bSYour Name #endif
2587*5113495bSYour Name /**
2588*5113495bSYour Name  * struct qdf_dma_buf_entry - DMA invalid buffer list entry
2589*5113495bSYour Name  * @node: QDF list node member
2590*5113495bSYour Name  * @size: DMA buffer size
2591*5113495bSYour Name  * @phy_addr: DMA buffer physical address
2592*5113495bSYour Name  * @vaddr: DMA buffer virtual address. if DMA buffer size is larger than entry
2593*5113495bSYour Name  *         size, we use the DMA buffer to save entry info and the starting
2594*5113495bSYour Name  *         address of the entry is the DMA buffer vaddr, in this way, we can
2595*5113495bSYour Name  *         reduce unnecessary memory consumption. if DMA buffer size is smaller
2596*5113495bSYour Name  *         than entry size, we need alloc another buffer, and vaddr will be set
2597*5113495bSYour Name  *         to the invalid dma buffer virtual address.
2598*5113495bSYour Name  */
2599*5113495bSYour Name struct qdf_dma_buf_entry {
2600*5113495bSYour Name 	qdf_list_node_t node;
2601*5113495bSYour Name 	qdf_size_t size;
2602*5113495bSYour Name 	qdf_dma_addr_t phy_addr;
2603*5113495bSYour Name 	void *vaddr;
2604*5113495bSYour Name };
2605*5113495bSYour Name 
2606*5113495bSYour Name #define DMA_PHY_ADDR_RESERVED 0x2000
2607*5113495bSYour Name #define QDF_DMA_MEM_ALLOC_MAX_RETRIES 10
2608*5113495bSYour Name #define QDF_DMA_INVALID_BUF_LIST_SIZE 128
2609*5113495bSYour Name static qdf_list_t qdf_invalid_buf_list[MAX_DEBUG_DOMAIN_COUNT];
2610*5113495bSYour Name static bool qdf_invalid_buf_list_init[MAX_DEBUG_DOMAIN_COUNT];
2611*5113495bSYour Name static qdf_spinlock_t qdf_invalid_buf_list_lock;
2612*5113495bSYour Name 
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr)2613*5113495bSYour Name static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2614*5113495bSYour Name 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2615*5113495bSYour Name {
2616*5113495bSYour Name 	void *vaddr;
2617*5113495bSYour Name 	uint32_t retry;
2618*5113495bSYour Name 	QDF_STATUS status;
2619*5113495bSYour Name 	bool is_separate;
2620*5113495bSYour Name 	qdf_list_t *cur_buf_list;
2621*5113495bSYour Name 	struct qdf_dma_buf_entry *entry;
2622*5113495bSYour Name 	uint8_t current_domain;
2623*5113495bSYour Name 
2624*5113495bSYour Name 	for (retry = 0; retry < QDF_DMA_MEM_ALLOC_MAX_RETRIES; retry++) {
2625*5113495bSYour Name 		vaddr = dma_alloc_coherent(dev, size, paddr,
2626*5113495bSYour Name 					   qdf_mem_malloc_flags());
2627*5113495bSYour Name 		if (!vaddr)
2628*5113495bSYour Name 			return NULL;
2629*5113495bSYour Name 
2630*5113495bSYour Name 		if (qdf_likely(*paddr > DMA_PHY_ADDR_RESERVED))
2631*5113495bSYour Name 			return vaddr;
2632*5113495bSYour Name 
2633*5113495bSYour Name 		current_domain = debug_domain_get();
2634*5113495bSYour Name 
2635*5113495bSYour Name 		/* if qdf_invalid_buf_list not init, so we can't store memory
2636*5113495bSYour Name 		 * info and can't hold it. let's free the invalid memory and
2637*5113495bSYour Name 		 * try to get memory with phy address greater than
2638*5113495bSYour Name 		 * DMA_PHY_ADDR_RESERVED
2639*5113495bSYour Name 		 */
2640*5113495bSYour Name 		if (current_domain >= MAX_DEBUG_DOMAIN_COUNT ||
2641*5113495bSYour Name 		    !qdf_invalid_buf_list_init[current_domain]) {
2642*5113495bSYour Name 			qdf_debug("physical address below 0x%x, re-alloc",
2643*5113495bSYour Name 				  DMA_PHY_ADDR_RESERVED);
2644*5113495bSYour Name 			dma_free_coherent(dev, size, vaddr, *paddr);
2645*5113495bSYour Name 			continue;
2646*5113495bSYour Name 		}
2647*5113495bSYour Name 
2648*5113495bSYour Name 		cur_buf_list = &qdf_invalid_buf_list[current_domain];
2649*5113495bSYour Name 		if (size >= sizeof(*entry)) {
2650*5113495bSYour Name 			entry = vaddr;
2651*5113495bSYour Name 			entry->vaddr = NULL;
2652*5113495bSYour Name 		} else {
2653*5113495bSYour Name 			entry = qdf_mem_malloc(sizeof(*entry));
2654*5113495bSYour Name 			if (!entry) {
2655*5113495bSYour Name 				dma_free_coherent(dev, size, vaddr, *paddr);
2656*5113495bSYour Name 				qdf_err("qdf_mem_malloc entry failed!");
2657*5113495bSYour Name 				continue;
2658*5113495bSYour Name 			}
2659*5113495bSYour Name 			entry->vaddr = vaddr;
2660*5113495bSYour Name 		}
2661*5113495bSYour Name 
2662*5113495bSYour Name 		entry->phy_addr = *paddr;
2663*5113495bSYour Name 		entry->size = size;
2664*5113495bSYour Name 		qdf_spin_lock_irqsave(&qdf_invalid_buf_list_lock);
2665*5113495bSYour Name 		status = qdf_list_insert_back(cur_buf_list,
2666*5113495bSYour Name 					      &entry->node);
2667*5113495bSYour Name 		qdf_spin_unlock_irqrestore(&qdf_invalid_buf_list_lock);
2668*5113495bSYour Name 		if (QDF_IS_STATUS_ERROR(status)) {
2669*5113495bSYour Name 			qdf_err("insert buf entry fail, status %d", status);
2670*5113495bSYour Name 			is_separate = !entry->vaddr ? false : true;
2671*5113495bSYour Name 			dma_free_coherent(dev, size, vaddr, *paddr);
2672*5113495bSYour Name 			if (is_separate)
2673*5113495bSYour Name 				qdf_mem_free(entry);
2674*5113495bSYour Name 		}
2675*5113495bSYour Name 	}
2676*5113495bSYour Name 
2677*5113495bSYour Name 	return NULL;
2678*5113495bSYour Name }
2679*5113495bSYour Name #else
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr)2680*5113495bSYour Name static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2681*5113495bSYour Name 				      qdf_size_t size, qdf_dma_addr_t *paddr)
2682*5113495bSYour Name {
2683*5113495bSYour Name 	return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2684*5113495bSYour Name }
2685*5113495bSYour Name #endif
2686*5113495bSYour Name 
2687*5113495bSYour Name #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2688*5113495bSYour Name static inline void
qdf_mem_dma_free(void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr)2689*5113495bSYour Name qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2690*5113495bSYour Name {
2691*5113495bSYour Name 	qdf_mem_free(vaddr);
2692*5113495bSYour Name }
2693*5113495bSYour Name #else
2694*5113495bSYour Name 
2695*5113495bSYour Name static inline void
qdf_mem_dma_free(void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr)2696*5113495bSYour Name qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2697*5113495bSYour Name {
2698*5113495bSYour Name 	dma_free_coherent(dev, size, vaddr, paddr);
2699*5113495bSYour Name }
2700*5113495bSYour Name #endif
2701*5113495bSYour Name 
2702*5113495bSYour Name #ifdef MEMORY_DEBUG
qdf_mem_alloc_consistent_debug(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr,const char * func,uint32_t line,void * caller)2703*5113495bSYour Name void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2704*5113495bSYour Name 				     qdf_size_t size, qdf_dma_addr_t *paddr,
2705*5113495bSYour Name 				     const char *func, uint32_t line,
2706*5113495bSYour Name 				     void *caller)
2707*5113495bSYour Name {
2708*5113495bSYour Name 	QDF_STATUS status;
2709*5113495bSYour Name 	enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2710*5113495bSYour Name 	qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2711*5113495bSYour Name 	struct qdf_mem_header *header;
2712*5113495bSYour Name 	void *vaddr;
2713*5113495bSYour Name 
2714*5113495bSYour Name 	if (is_initial_mem_debug_disabled)
2715*5113495bSYour Name 		return __qdf_mem_alloc_consistent(osdev, dev,
2716*5113495bSYour Name 						  size, paddr,
2717*5113495bSYour Name 						  func, line);
2718*5113495bSYour Name 
2719*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2720*5113495bSYour Name 		qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2721*5113495bSYour Name 		return NULL;
2722*5113495bSYour Name 	}
2723*5113495bSYour Name 
2724*5113495bSYour Name 	vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2725*5113495bSYour Name 				   paddr);
2726*5113495bSYour Name 
2727*5113495bSYour Name 	if (!vaddr) {
2728*5113495bSYour Name 		qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2729*5113495bSYour Name 		return NULL;
2730*5113495bSYour Name 	}
2731*5113495bSYour Name 
2732*5113495bSYour Name 	header = qdf_mem_dma_get_header(vaddr, size);
2733*5113495bSYour Name 	/* For DMA buffers we only add trailers, this function will init
2734*5113495bSYour Name 	 * the header structure at the tail
2735*5113495bSYour Name 	 * Prefix the header into DMA buffer causes SMMU faults, so
2736*5113495bSYour Name 	 * do not prefix header into the DMA buffers
2737*5113495bSYour Name 	 */
2738*5113495bSYour Name 	qdf_mem_header_init(header, size, func, line, caller);
2739*5113495bSYour Name 
2740*5113495bSYour Name 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2741*5113495bSYour Name 	status = qdf_list_insert_front(mem_list, &header->node);
2742*5113495bSYour Name 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2743*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
2744*5113495bSYour Name 		qdf_err("Failed to insert memory header; status %d", status);
2745*5113495bSYour Name 
2746*5113495bSYour Name 	qdf_mem_dma_inc(size);
2747*5113495bSYour Name 
2748*5113495bSYour Name 	return vaddr;
2749*5113495bSYour Name }
2750*5113495bSYour Name qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2751*5113495bSYour Name 
qdf_mem_free_consistent_debug(qdf_device_t osdev,void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx,const char * func,uint32_t line)2752*5113495bSYour Name void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2753*5113495bSYour Name 				   qdf_size_t size, void *vaddr,
2754*5113495bSYour Name 				   qdf_dma_addr_t paddr,
2755*5113495bSYour Name 				   qdf_dma_context_t memctx,
2756*5113495bSYour Name 				   const char *func, uint32_t line)
2757*5113495bSYour Name {
2758*5113495bSYour Name 	enum qdf_debug_domain domain = qdf_debug_domain_get();
2759*5113495bSYour Name 	struct qdf_mem_header *header;
2760*5113495bSYour Name 	enum qdf_mem_validation_bitmap error_bitmap;
2761*5113495bSYour Name 
2762*5113495bSYour Name 	if (is_initial_mem_debug_disabled) {
2763*5113495bSYour Name 		__qdf_mem_free_consistent(
2764*5113495bSYour Name 					  osdev, dev,
2765*5113495bSYour Name 					  size, vaddr,
2766*5113495bSYour Name 					  paddr, memctx);
2767*5113495bSYour Name 		return;
2768*5113495bSYour Name 	}
2769*5113495bSYour Name 
2770*5113495bSYour Name 	/* freeing a null pointer is valid */
2771*5113495bSYour Name 	if (qdf_unlikely(!vaddr))
2772*5113495bSYour Name 		return;
2773*5113495bSYour Name 
2774*5113495bSYour Name 	qdf_talloc_assert_no_children_fl(vaddr, func, line);
2775*5113495bSYour Name 
2776*5113495bSYour Name 	qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2777*5113495bSYour Name 	/* For DMA buffers we only add trailers, this function will retrieve
2778*5113495bSYour Name 	 * the header structure at the tail
2779*5113495bSYour Name 	 * Prefix the header into DMA buffer causes SMMU faults, so
2780*5113495bSYour Name 	 * do not prefix header into the DMA buffers
2781*5113495bSYour Name 	 */
2782*5113495bSYour Name 	header = qdf_mem_dma_get_header(vaddr, size);
2783*5113495bSYour Name 	error_bitmap = qdf_mem_header_validate(header, domain);
2784*5113495bSYour Name 	if (!error_bitmap) {
2785*5113495bSYour Name 		header->freed = true;
2786*5113495bSYour Name 		qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2787*5113495bSYour Name 				     &header->node);
2788*5113495bSYour Name 	}
2789*5113495bSYour Name 	qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2790*5113495bSYour Name 
2791*5113495bSYour Name 	qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2792*5113495bSYour Name 
2793*5113495bSYour Name 	qdf_mem_dma_dec(header->size);
2794*5113495bSYour Name 	qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2795*5113495bSYour Name }
2796*5113495bSYour Name qdf_export_symbol(qdf_mem_free_consistent_debug);
2797*5113495bSYour Name #endif /* MEMORY_DEBUG */
2798*5113495bSYour Name 
__qdf_mem_free_consistent(qdf_device_t osdev,void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx)2799*5113495bSYour Name void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2800*5113495bSYour Name 			       qdf_size_t size, void *vaddr,
2801*5113495bSYour Name 			       qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2802*5113495bSYour Name {
2803*5113495bSYour Name 	qdf_mem_dma_dec(size);
2804*5113495bSYour Name 	qdf_mem_dma_free(dev, size, vaddr, paddr);
2805*5113495bSYour Name }
2806*5113495bSYour Name 
2807*5113495bSYour Name qdf_export_symbol(__qdf_mem_free_consistent);
2808*5113495bSYour Name 
__qdf_mem_alloc_consistent(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr,const char * func,uint32_t line)2809*5113495bSYour Name void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2810*5113495bSYour Name 				 qdf_size_t size, qdf_dma_addr_t *paddr,
2811*5113495bSYour Name 				 const char *func, uint32_t line)
2812*5113495bSYour Name {
2813*5113495bSYour Name 	void *vaddr;
2814*5113495bSYour Name 
2815*5113495bSYour Name 	if (!size || size > QDF_MEM_MAX_MALLOC) {
2816*5113495bSYour Name 		qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2817*5113495bSYour Name 			     size, func, line);
2818*5113495bSYour Name 		return NULL;
2819*5113495bSYour Name 	}
2820*5113495bSYour Name 
2821*5113495bSYour Name 	vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2822*5113495bSYour Name 
2823*5113495bSYour Name 	if (vaddr)
2824*5113495bSYour Name 		qdf_mem_dma_inc(size);
2825*5113495bSYour Name 
2826*5113495bSYour Name 	return vaddr;
2827*5113495bSYour Name }
2828*5113495bSYour Name 
2829*5113495bSYour Name qdf_export_symbol(__qdf_mem_alloc_consistent);
2830*5113495bSYour Name 
qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev,uint32_t * size,void ** vaddr_unaligned,qdf_dma_addr_t * paddr_unaligned,qdf_dma_addr_t * paddr_aligned,uint32_t align,const char * func,uint32_t line)2831*5113495bSYour Name void *qdf_aligned_mem_alloc_consistent_fl(
2832*5113495bSYour Name 	qdf_device_t osdev, uint32_t *size,
2833*5113495bSYour Name 	void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2834*5113495bSYour Name 	qdf_dma_addr_t *paddr_aligned, uint32_t align,
2835*5113495bSYour Name 	const char *func, uint32_t line)
2836*5113495bSYour Name {
2837*5113495bSYour Name 	void *vaddr_aligned;
2838*5113495bSYour Name 	uint32_t align_alloc_size;
2839*5113495bSYour Name 
2840*5113495bSYour Name 	*vaddr_unaligned = qdf_mem_alloc_consistent(
2841*5113495bSYour Name 			osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2842*5113495bSYour Name 	if (!*vaddr_unaligned) {
2843*5113495bSYour Name 		qdf_warn("Failed to alloc %uB @ %s:%d",
2844*5113495bSYour Name 			 *size, func, line);
2845*5113495bSYour Name 		return NULL;
2846*5113495bSYour Name 	}
2847*5113495bSYour Name 
2848*5113495bSYour Name 	/* Re-allocate additional bytes to align base address only if
2849*5113495bSYour Name 	 * above allocation returns unaligned address. Reason for
2850*5113495bSYour Name 	 * trying exact size allocation above is, OS tries to allocate
2851*5113495bSYour Name 	 * blocks of size power-of-2 pages and then free extra pages.
2852*5113495bSYour Name 	 * e.g., of a ring size of 1MB, the allocation below will
2853*5113495bSYour Name 	 * request 1MB plus 7 bytes for alignment, which will cause a
2854*5113495bSYour Name 	 * 2MB block allocation,and that is failing sometimes due to
2855*5113495bSYour Name 	 * memory fragmentation.
2856*5113495bSYour Name 	 */
2857*5113495bSYour Name 	if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2858*5113495bSYour Name 		align_alloc_size = *size + align - 1;
2859*5113495bSYour Name 
2860*5113495bSYour Name 		qdf_mem_free_consistent(osdev, osdev->dev, *size,
2861*5113495bSYour Name 					*vaddr_unaligned,
2862*5113495bSYour Name 					*paddr_unaligned, 0);
2863*5113495bSYour Name 
2864*5113495bSYour Name 		*vaddr_unaligned = qdf_mem_alloc_consistent(
2865*5113495bSYour Name 				osdev, osdev->dev, align_alloc_size,
2866*5113495bSYour Name 				paddr_unaligned);
2867*5113495bSYour Name 		if (!*vaddr_unaligned) {
2868*5113495bSYour Name 			qdf_warn("Failed to alloc %uB @ %s:%d",
2869*5113495bSYour Name 				 align_alloc_size, func, line);
2870*5113495bSYour Name 			return NULL;
2871*5113495bSYour Name 		}
2872*5113495bSYour Name 
2873*5113495bSYour Name 		*size = align_alloc_size;
2874*5113495bSYour Name 	}
2875*5113495bSYour Name 
2876*5113495bSYour Name 	*paddr_aligned = (qdf_dma_addr_t)qdf_align(
2877*5113495bSYour Name 			(unsigned long)(*paddr_unaligned), align);
2878*5113495bSYour Name 
2879*5113495bSYour Name 	vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2880*5113495bSYour Name 				 ((unsigned long)(*paddr_aligned) -
2881*5113495bSYour Name 				  (unsigned long)(*paddr_unaligned)));
2882*5113495bSYour Name 
2883*5113495bSYour Name 	return vaddr_aligned;
2884*5113495bSYour Name }
2885*5113495bSYour Name qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2886*5113495bSYour Name 
qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,qdf_dma_addr_t bus_addr,qdf_size_t size,enum dma_data_direction direction)2887*5113495bSYour Name void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2888*5113495bSYour Name 					qdf_dma_addr_t bus_addr,
2889*5113495bSYour Name 					qdf_size_t size,
2890*5113495bSYour Name 					enum dma_data_direction direction)
2891*5113495bSYour Name {
2892*5113495bSYour Name 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
2893*5113495bSYour Name }
2894*5113495bSYour Name qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2895*5113495bSYour Name 
qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,qdf_dma_addr_t bus_addr,qdf_size_t size,enum dma_data_direction direction)2896*5113495bSYour Name void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2897*5113495bSYour Name 				     qdf_dma_addr_t bus_addr,
2898*5113495bSYour Name 				     qdf_size_t size,
2899*5113495bSYour Name 				     enum dma_data_direction direction)
2900*5113495bSYour Name {
2901*5113495bSYour Name 	dma_sync_single_for_cpu(osdev->dev, bus_addr,  size, direction);
2902*5113495bSYour Name }
2903*5113495bSYour Name qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2904*5113495bSYour Name 
qdf_mem_init(void)2905*5113495bSYour Name void qdf_mem_init(void)
2906*5113495bSYour Name {
2907*5113495bSYour Name 	qdf_mem_debug_init();
2908*5113495bSYour Name 	qdf_net_buf_debug_init();
2909*5113495bSYour Name 	qdf_frag_debug_init();
2910*5113495bSYour Name 	qdf_mem_debugfs_init();
2911*5113495bSYour Name 	qdf_mem_debug_debugfs_init();
2912*5113495bSYour Name }
2913*5113495bSYour Name qdf_export_symbol(qdf_mem_init);
2914*5113495bSYour Name 
qdf_mem_exit(void)2915*5113495bSYour Name void qdf_mem_exit(void)
2916*5113495bSYour Name {
2917*5113495bSYour Name 	qdf_mem_debug_debugfs_exit();
2918*5113495bSYour Name 	qdf_mem_debugfs_exit();
2919*5113495bSYour Name 	qdf_frag_debug_exit();
2920*5113495bSYour Name 	qdf_net_buf_debug_exit();
2921*5113495bSYour Name 	qdf_mem_debug_exit();
2922*5113495bSYour Name }
2923*5113495bSYour Name qdf_export_symbol(qdf_mem_exit);
2924*5113495bSYour Name 
qdf_ether_addr_copy(void * dst_addr,const void * src_addr)2925*5113495bSYour Name void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2926*5113495bSYour Name {
2927*5113495bSYour Name 	if ((!dst_addr) || (!src_addr)) {
2928*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2929*5113495bSYour Name 			  "%s called with NULL parameter, source:%pK destination:%pK",
2930*5113495bSYour Name 			  __func__, src_addr, dst_addr);
2931*5113495bSYour Name 		QDF_ASSERT(0);
2932*5113495bSYour Name 		return;
2933*5113495bSYour Name 	}
2934*5113495bSYour Name 	ether_addr_copy(dst_addr, src_addr);
2935*5113495bSYour Name }
2936*5113495bSYour Name qdf_export_symbol(qdf_ether_addr_copy);
2937*5113495bSYour Name 
qdf_dma_mem_stats_read(void)2938*5113495bSYour Name int32_t qdf_dma_mem_stats_read(void)
2939*5113495bSYour Name {
2940*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.dma);
2941*5113495bSYour Name }
2942*5113495bSYour Name 
2943*5113495bSYour Name qdf_export_symbol(qdf_dma_mem_stats_read);
2944*5113495bSYour Name 
qdf_heap_mem_stats_read(void)2945*5113495bSYour Name int32_t qdf_heap_mem_stats_read(void)
2946*5113495bSYour Name {
2947*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2948*5113495bSYour Name }
2949*5113495bSYour Name 
2950*5113495bSYour Name qdf_export_symbol(qdf_heap_mem_stats_read);
2951*5113495bSYour Name 
qdf_skb_mem_stats_read(void)2952*5113495bSYour Name int32_t qdf_skb_mem_stats_read(void)
2953*5113495bSYour Name {
2954*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.skb);
2955*5113495bSYour Name }
2956*5113495bSYour Name 
2957*5113495bSYour Name qdf_export_symbol(qdf_skb_mem_stats_read);
2958*5113495bSYour Name 
qdf_skb_total_mem_stats_read(void)2959*5113495bSYour Name int32_t qdf_skb_total_mem_stats_read(void)
2960*5113495bSYour Name {
2961*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.skb_total);
2962*5113495bSYour Name }
2963*5113495bSYour Name 
2964*5113495bSYour Name qdf_export_symbol(qdf_skb_total_mem_stats_read);
2965*5113495bSYour Name 
qdf_skb_max_mem_stats_read(void)2966*5113495bSYour Name int32_t qdf_skb_max_mem_stats_read(void)
2967*5113495bSYour Name {
2968*5113495bSYour Name 	return qdf_mem_stat.skb_mem_max;
2969*5113495bSYour Name }
2970*5113495bSYour Name 
2971*5113495bSYour Name qdf_export_symbol(qdf_skb_max_mem_stats_read);
2972*5113495bSYour Name 
qdf_dp_tx_skb_mem_stats_read(void)2973*5113495bSYour Name int32_t qdf_dp_tx_skb_mem_stats_read(void)
2974*5113495bSYour Name {
2975*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2976*5113495bSYour Name }
2977*5113495bSYour Name 
2978*5113495bSYour Name qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2979*5113495bSYour Name 
qdf_dp_rx_skb_mem_stats_read(void)2980*5113495bSYour Name int32_t qdf_dp_rx_skb_mem_stats_read(void)
2981*5113495bSYour Name {
2982*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2983*5113495bSYour Name }
2984*5113495bSYour Name 
2985*5113495bSYour Name qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2986*5113495bSYour Name 
qdf_mem_dp_tx_skb_cnt_read(void)2987*5113495bSYour Name int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2988*5113495bSYour Name {
2989*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2990*5113495bSYour Name }
2991*5113495bSYour Name 
2992*5113495bSYour Name qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2993*5113495bSYour Name 
qdf_mem_dp_tx_skb_max_cnt_read(void)2994*5113495bSYour Name int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2995*5113495bSYour Name {
2996*5113495bSYour Name 	return qdf_mem_stat.dp_tx_skb_count_max;
2997*5113495bSYour Name }
2998*5113495bSYour Name 
2999*5113495bSYour Name qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
3000*5113495bSYour Name 
qdf_mem_dp_rx_skb_cnt_read(void)3001*5113495bSYour Name int32_t qdf_mem_dp_rx_skb_cnt_read(void)
3002*5113495bSYour Name {
3003*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
3004*5113495bSYour Name }
3005*5113495bSYour Name 
3006*5113495bSYour Name qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
3007*5113495bSYour Name 
qdf_mem_dp_rx_skb_max_cnt_read(void)3008*5113495bSYour Name int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
3009*5113495bSYour Name {
3010*5113495bSYour Name 	return qdf_mem_stat.dp_rx_skb_count_max;
3011*5113495bSYour Name }
3012*5113495bSYour Name 
3013*5113495bSYour Name qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
3014*5113495bSYour Name 
qdf_dp_tx_skb_max_mem_stats_read(void)3015*5113495bSYour Name int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
3016*5113495bSYour Name {
3017*5113495bSYour Name 	return qdf_mem_stat.dp_tx_skb_mem_max;
3018*5113495bSYour Name }
3019*5113495bSYour Name 
3020*5113495bSYour Name qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
3021*5113495bSYour Name 
qdf_dp_rx_skb_max_mem_stats_read(void)3022*5113495bSYour Name int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
3023*5113495bSYour Name {
3024*5113495bSYour Name 	return qdf_mem_stat.dp_rx_skb_mem_max;
3025*5113495bSYour Name }
3026*5113495bSYour Name 
3027*5113495bSYour Name qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
3028*5113495bSYour Name 
qdf_mem_tx_desc_cnt_read(void)3029*5113495bSYour Name int32_t qdf_mem_tx_desc_cnt_read(void)
3030*5113495bSYour Name {
3031*5113495bSYour Name 	return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
3032*5113495bSYour Name }
3033*5113495bSYour Name 
3034*5113495bSYour Name qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
3035*5113495bSYour Name 
qdf_mem_tx_desc_max_read(void)3036*5113495bSYour Name int32_t qdf_mem_tx_desc_max_read(void)
3037*5113495bSYour Name {
3038*5113495bSYour Name 	return qdf_mem_stat.tx_descs_max;
3039*5113495bSYour Name }
3040*5113495bSYour Name 
3041*5113495bSYour Name qdf_export_symbol(qdf_mem_tx_desc_max_read);
3042*5113495bSYour Name 
qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,int32_t tx_descs_max)3043*5113495bSYour Name void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
3044*5113495bSYour Name 				int32_t tx_descs_max)
3045*5113495bSYour Name {
3046*5113495bSYour Name 	qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
3047*5113495bSYour Name 	qdf_mem_stat.tx_descs_max = tx_descs_max;
3048*5113495bSYour Name }
3049*5113495bSYour Name 
3050*5113495bSYour Name qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
3051*5113495bSYour Name 
qdf_mem_stats_init(void)3052*5113495bSYour Name void qdf_mem_stats_init(void)
3053*5113495bSYour Name {
3054*5113495bSYour Name 	qdf_mem_stat.skb_mem_max = 0;
3055*5113495bSYour Name 	qdf_mem_stat.dp_tx_skb_mem_max = 0;
3056*5113495bSYour Name 	qdf_mem_stat.dp_rx_skb_mem_max = 0;
3057*5113495bSYour Name 	qdf_mem_stat.dp_tx_skb_count_max = 0;
3058*5113495bSYour Name 	qdf_mem_stat.dp_rx_skb_count_max = 0;
3059*5113495bSYour Name 	qdf_mem_stat.tx_descs_max = 0;
3060*5113495bSYour Name }
3061*5113495bSYour Name 
3062*5113495bSYour Name qdf_export_symbol(qdf_mem_stats_init);
3063*5113495bSYour Name 
__qdf_mem_valloc(size_t size,const char * func,uint32_t line)3064*5113495bSYour Name void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
3065*5113495bSYour Name {
3066*5113495bSYour Name 	void *ptr;
3067*5113495bSYour Name 
3068*5113495bSYour Name 	if (!size) {
3069*5113495bSYour Name 		qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
3070*5113495bSYour Name 		return NULL;
3071*5113495bSYour Name 	}
3072*5113495bSYour Name 
3073*5113495bSYour Name 	ptr = vzalloc(size);
3074*5113495bSYour Name 
3075*5113495bSYour Name 	return ptr;
3076*5113495bSYour Name }
3077*5113495bSYour Name 
3078*5113495bSYour Name qdf_export_symbol(__qdf_mem_valloc);
3079*5113495bSYour Name 
__qdf_mem_vfree(void * ptr)3080*5113495bSYour Name void __qdf_mem_vfree(void *ptr)
3081*5113495bSYour Name {
3082*5113495bSYour Name 	if (qdf_unlikely(!ptr))
3083*5113495bSYour Name 		return;
3084*5113495bSYour Name 
3085*5113495bSYour Name 	vfree(ptr);
3086*5113495bSYour Name }
3087*5113495bSYour Name 
3088*5113495bSYour Name qdf_export_symbol(__qdf_mem_vfree);
3089*5113495bSYour Name 
3090*5113495bSYour Name #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
3091*5113495bSYour Name int
qdf_iommu_domain_get_attr(qdf_iommu_domain_t * domain,enum qdf_iommu_attr attr,void * data)3092*5113495bSYour Name qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
3093*5113495bSYour Name 			  enum qdf_iommu_attr attr, void *data)
3094*5113495bSYour Name {
3095*5113495bSYour Name 	return __qdf_iommu_domain_get_attr(domain, attr, data);
3096*5113495bSYour Name }
3097*5113495bSYour Name 
3098*5113495bSYour Name qdf_export_symbol(qdf_iommu_domain_get_attr);
3099*5113495bSYour Name #endif
3100*5113495bSYour Name 
3101*5113495bSYour Name #ifdef ENHANCED_OS_ABSTRACTION
qdf_update_mem_map_table(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_addr,uint32_t mem_size)3102*5113495bSYour Name void qdf_update_mem_map_table(qdf_device_t osdev,
3103*5113495bSYour Name 			      qdf_mem_info_t *mem_info,
3104*5113495bSYour Name 			      qdf_dma_addr_t dma_addr,
3105*5113495bSYour Name 			      uint32_t mem_size)
3106*5113495bSYour Name {
3107*5113495bSYour Name 	if (!mem_info) {
3108*5113495bSYour Name 		qdf_nofl_err("%s: NULL mem_info", __func__);
3109*5113495bSYour Name 		return;
3110*5113495bSYour Name 	}
3111*5113495bSYour Name 
3112*5113495bSYour Name 	__qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
3113*5113495bSYour Name }
3114*5113495bSYour Name 
3115*5113495bSYour Name qdf_export_symbol(qdf_update_mem_map_table);
3116*5113495bSYour Name 
qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,qdf_dma_addr_t dma_addr)3117*5113495bSYour Name qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
3118*5113495bSYour Name 					  qdf_dma_addr_t dma_addr)
3119*5113495bSYour Name {
3120*5113495bSYour Name 	return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
3121*5113495bSYour Name }
3122*5113495bSYour Name 
3123*5113495bSYour Name qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
3124*5113495bSYour Name #endif
3125*5113495bSYour Name 
3126*5113495bSYour Name #ifdef QCA_KMEM_CACHE_SUPPORT
3127*5113495bSYour Name qdf_kmem_cache_t
__qdf_kmem_cache_create(const char * cache_name,qdf_size_t size)3128*5113495bSYour Name __qdf_kmem_cache_create(const char *cache_name,
3129*5113495bSYour Name 			qdf_size_t size)
3130*5113495bSYour Name {
3131*5113495bSYour Name 	struct kmem_cache *cache;
3132*5113495bSYour Name 
3133*5113495bSYour Name 	cache = kmem_cache_create(cache_name, size,
3134*5113495bSYour Name 				  0, 0, NULL);
3135*5113495bSYour Name 
3136*5113495bSYour Name 	if (!cache)
3137*5113495bSYour Name 		return NULL;
3138*5113495bSYour Name 
3139*5113495bSYour Name 	return cache;
3140*5113495bSYour Name }
3141*5113495bSYour Name qdf_export_symbol(__qdf_kmem_cache_create);
3142*5113495bSYour Name 
3143*5113495bSYour Name void
__qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)3144*5113495bSYour Name __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3145*5113495bSYour Name {
3146*5113495bSYour Name 	kmem_cache_destroy(cache);
3147*5113495bSYour Name }
3148*5113495bSYour Name 
3149*5113495bSYour Name qdf_export_symbol(__qdf_kmem_cache_destroy);
3150*5113495bSYour Name 
3151*5113495bSYour Name void*
__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)3152*5113495bSYour Name __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3153*5113495bSYour Name {
3154*5113495bSYour Name 	int flags = GFP_KERNEL;
3155*5113495bSYour Name 
3156*5113495bSYour Name 	if (in_interrupt() || irqs_disabled() || in_atomic())
3157*5113495bSYour Name 		flags = GFP_ATOMIC;
3158*5113495bSYour Name 
3159*5113495bSYour Name 	return kmem_cache_alloc(cache, flags);
3160*5113495bSYour Name }
3161*5113495bSYour Name 
3162*5113495bSYour Name qdf_export_symbol(__qdf_kmem_cache_alloc);
3163*5113495bSYour Name 
3164*5113495bSYour Name void
__qdf_kmem_cache_free(qdf_kmem_cache_t cache,void * node)3165*5113495bSYour Name __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3166*5113495bSYour Name 
3167*5113495bSYour Name {
3168*5113495bSYour Name 	kmem_cache_free(cache, node);
3169*5113495bSYour Name }
3170*5113495bSYour Name 
3171*5113495bSYour Name qdf_export_symbol(__qdf_kmem_cache_free);
3172*5113495bSYour Name #else
3173*5113495bSYour Name qdf_kmem_cache_t
__qdf_kmem_cache_create(const char * cache_name,qdf_size_t size)3174*5113495bSYour Name __qdf_kmem_cache_create(const char *cache_name,
3175*5113495bSYour Name 			qdf_size_t size)
3176*5113495bSYour Name {
3177*5113495bSYour Name 	return NULL;
3178*5113495bSYour Name }
3179*5113495bSYour Name 
3180*5113495bSYour Name void
__qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)3181*5113495bSYour Name __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3182*5113495bSYour Name {
3183*5113495bSYour Name }
3184*5113495bSYour Name 
3185*5113495bSYour Name void *
__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)3186*5113495bSYour Name __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3187*5113495bSYour Name {
3188*5113495bSYour Name 	return NULL;
3189*5113495bSYour Name }
3190*5113495bSYour Name 
3191*5113495bSYour Name void
__qdf_kmem_cache_free(qdf_kmem_cache_t cache,void * node)3192*5113495bSYour Name __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3193*5113495bSYour Name {
3194*5113495bSYour Name }
3195*5113495bSYour Name #endif
3196*5113495bSYour Name 
3197*5113495bSYour Name #ifdef QCA_DMA_PADDR_CHECK
qdf_dma_invalid_buf_list_init(void)3198*5113495bSYour Name void qdf_dma_invalid_buf_list_init(void)
3199*5113495bSYour Name {
3200*5113495bSYour Name 	int i;
3201*5113495bSYour Name 
3202*5113495bSYour Name 	for (i = 0; i < MAX_DEBUG_DOMAIN_COUNT; i++) {
3203*5113495bSYour Name 		qdf_list_create(&qdf_invalid_buf_list[i],
3204*5113495bSYour Name 				QDF_DMA_INVALID_BUF_LIST_SIZE);
3205*5113495bSYour Name 		qdf_invalid_buf_list_init[i] = true;
3206*5113495bSYour Name 	}
3207*5113495bSYour Name 	qdf_spinlock_create(&qdf_invalid_buf_list_lock);
3208*5113495bSYour Name }
3209*5113495bSYour Name 
qdf_dma_invalid_buf_free(void * dev,uint8_t domain)3210*5113495bSYour Name void qdf_dma_invalid_buf_free(void *dev, uint8_t domain)
3211*5113495bSYour Name {
3212*5113495bSYour Name 	bool is_separate;
3213*5113495bSYour Name 	qdf_list_t *cur_buf_list;
3214*5113495bSYour Name 	struct qdf_dma_buf_entry *entry;
3215*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_EMPTY;
3216*5113495bSYour Name 
3217*5113495bSYour Name 	if (!dev)
3218*5113495bSYour Name 		return;
3219*5113495bSYour Name 
3220*5113495bSYour Name 	if (domain >= MAX_DEBUG_DOMAIN_COUNT)
3221*5113495bSYour Name 		return;
3222*5113495bSYour Name 
3223*5113495bSYour Name 	if (!qdf_invalid_buf_list_init[domain])
3224*5113495bSYour Name 		return;
3225*5113495bSYour Name 
3226*5113495bSYour Name 	cur_buf_list = &qdf_invalid_buf_list[domain];
3227*5113495bSYour Name 	do {
3228*5113495bSYour Name 		qdf_spin_lock_irqsave(&qdf_invalid_buf_list_lock);
3229*5113495bSYour Name 		status = qdf_list_remove_front(cur_buf_list,
3230*5113495bSYour Name 					       (qdf_list_node_t **)&entry);
3231*5113495bSYour Name 		qdf_spin_unlock_irqrestore(&qdf_invalid_buf_list_lock);
3232*5113495bSYour Name 
3233*5113495bSYour Name 		if (status != QDF_STATUS_SUCCESS)
3234*5113495bSYour Name 			break;
3235*5113495bSYour Name 
3236*5113495bSYour Name 		is_separate = !entry->vaddr ? false : true;
3237*5113495bSYour Name 		if (is_separate) {
3238*5113495bSYour Name 			dma_free_coherent(dev, entry->size, entry->vaddr,
3239*5113495bSYour Name 					  entry->phy_addr);
3240*5113495bSYour Name 			qdf_mem_free(entry);
3241*5113495bSYour Name 		} else
3242*5113495bSYour Name 			dma_free_coherent(dev, entry->size, entry,
3243*5113495bSYour Name 					  entry->phy_addr);
3244*5113495bSYour Name 	} while (!qdf_list_empty(cur_buf_list));
3245*5113495bSYour Name 	qdf_invalid_buf_list_init[domain] = false;
3246*5113495bSYour Name }
3247*5113495bSYour Name 
qdf_dma_invalid_buf_list_deinit(void)3248*5113495bSYour Name void qdf_dma_invalid_buf_list_deinit(void)
3249*5113495bSYour Name {
3250*5113495bSYour Name 	int i;
3251*5113495bSYour Name 
3252*5113495bSYour Name 	for (i = 0; i < MAX_DEBUG_DOMAIN_COUNT; i++)
3253*5113495bSYour Name 		qdf_list_destroy(&qdf_invalid_buf_list[i]);
3254*5113495bSYour Name 
3255*5113495bSYour Name 	qdf_spinlock_destroy(&qdf_invalid_buf_list_lock);
3256*5113495bSYour Name }
3257*5113495bSYour Name #endif /* QCA_DMA_PADDR_CHECK */
3258