1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: qdf_mem
22 * This file provides OS dependent memory management APIs
23 */
24
25 #include "qdf_debugfs.h"
26 #include "qdf_mem.h"
27 #include "qdf_nbuf.h"
28 #include "qdf_lock.h"
29 #include "qdf_mc_timer.h"
30 #include "qdf_module.h"
31 #include <qdf_trace.h>
32 #include "qdf_str.h"
33 #include "qdf_talloc.h"
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <qdf_list.h>
38
39 #ifdef CNSS_MEM_PRE_ALLOC
40 #ifdef CONFIG_CNSS_OUT_OF_TREE
41 #include "cnss_prealloc.h"
42 #else
43 #include <net/cnss_prealloc.h>
44 #endif
45 #endif
46
47 #if defined(MEMORY_DEBUG) || defined(NBUF_MEMORY_DEBUG)
48 static bool mem_debug_disabled;
49 qdf_declare_param(mem_debug_disabled, bool);
50 #endif
51
52 #ifdef MEMORY_DEBUG
53 static bool is_initial_mem_debug_disabled;
54 #endif
55
56 /* Preprocessor Definitions and Constants */
57 #define QDF_MEM_MAX_MALLOC (4096 * 1024) /* 4 Mega Bytes */
58 #define QDF_MEM_WARN_THRESHOLD 300 /* ms */
59 #define QDF_DEBUG_STRING_SIZE 512
60
61 /**
62 * struct __qdf_mem_stat - qdf memory statistics
63 * @kmalloc: total kmalloc allocations
64 * @dma: total dma allocations
65 * @skb: total skb allocations
66 * @skb_total: total skb allocations in host driver
67 * @dp_tx_skb: total Tx skb allocations in datapath
68 * @dp_rx_skb: total Rx skb allocations in datapath
69 * @skb_mem_max: high watermark for skb allocations
70 * @dp_tx_skb_mem_max: high watermark for Tx DP skb allocations
71 * @dp_rx_skb_mem_max: high watermark for Rx DP skb allocations
72 * @dp_tx_skb_count: DP Tx buffer count
73 * @dp_tx_skb_count_max: High watermark for DP Tx buffer count
74 * @dp_rx_skb_count: DP Rx buffer count
75 * @dp_rx_skb_count_max: High watermark for DP Rx buffer count
76 * @tx_descs_outstanding: Current pending Tx descs count
77 * @tx_descs_max: High watermark for pending Tx descs count
78 */
79 static struct __qdf_mem_stat {
80 qdf_atomic_t kmalloc;
81 qdf_atomic_t dma;
82 qdf_atomic_t skb;
83 qdf_atomic_t skb_total;
84 qdf_atomic_t dp_tx_skb;
85 qdf_atomic_t dp_rx_skb;
86 int32_t skb_mem_max;
87 int32_t dp_tx_skb_mem_max;
88 int32_t dp_rx_skb_mem_max;
89 qdf_atomic_t dp_tx_skb_count;
90 int32_t dp_tx_skb_count_max;
91 qdf_atomic_t dp_rx_skb_count;
92 int32_t dp_rx_skb_count_max;
93 qdf_atomic_t tx_descs_outstanding;
94 int32_t tx_descs_max;
95 } qdf_mem_stat;
96
97 #ifdef MEMORY_DEBUG
98 #include "qdf_debug_domain.h"
99
100 enum list_type {
101 LIST_TYPE_MEM = 0,
102 LIST_TYPE_DMA = 1,
103 LIST_TYPE_NBUF = 2,
104 LIST_TYPE_MAX,
105 };
106
107 /**
108 * struct major_alloc_priv - private data registered to debugfs entry
109 * created to list the list major allocations
110 * @type: type of the list to be parsed
111 * @threshold: configured by user by overwriting the respective debugfs
112 * sys entry. This is to list the functions which requested
113 * memory/dma allocations more than threshold number of times.
114 */
115 struct major_alloc_priv {
116 enum list_type type;
117 uint32_t threshold;
118 };
119
120 static qdf_list_t qdf_mem_domains[QDF_DEBUG_DOMAIN_COUNT];
121 static qdf_spinlock_t qdf_mem_list_lock;
122
123 static qdf_list_t qdf_mem_dma_domains[QDF_DEBUG_DOMAIN_COUNT];
124 static qdf_spinlock_t qdf_mem_dma_list_lock;
125
qdf_mem_list_get(enum qdf_debug_domain domain)126 static inline qdf_list_t *qdf_mem_list_get(enum qdf_debug_domain domain)
127 {
128 return &qdf_mem_domains[domain];
129 }
130
qdf_mem_dma_list(enum qdf_debug_domain domain)131 static inline qdf_list_t *qdf_mem_dma_list(enum qdf_debug_domain domain)
132 {
133 return &qdf_mem_dma_domains[domain];
134 }
135
136 /**
137 * struct qdf_mem_header - memory object to dubug
138 * @node: node to the list
139 * @domain: the active memory domain at time of allocation
140 * @freed: flag set during free, used to detect double frees
141 * Use uint8_t so we can detect corruption
142 * @func: name of the function the allocation was made from
143 * @line: line number of the file the allocation was made from
144 * @size: size of the allocation in bytes
145 * @caller: Caller of the function for which memory is allocated
146 * @header: a known value, used to detect out-of-bounds access
147 * @time: timestamp at which allocation was made
148 */
149 struct qdf_mem_header {
150 qdf_list_node_t node;
151 enum qdf_debug_domain domain;
152 uint8_t freed;
153 char func[QDF_MEM_FUNC_NAME_SIZE];
154 uint32_t line;
155 uint32_t size;
156 void *caller;
157 uint64_t header;
158 uint64_t time;
159 };
160
161 /* align the qdf_mem_header to 8 bytes */
162 #define QDF_DMA_MEM_HEADER_ALIGN 8
163
164 static uint64_t WLAN_MEM_HEADER = 0x6162636465666768;
165 static uint64_t WLAN_MEM_TRAILER = 0x8081828384858687;
166
qdf_mem_get_header(void * ptr)167 static inline struct qdf_mem_header *qdf_mem_get_header(void *ptr)
168 {
169 return (struct qdf_mem_header *)ptr - 1;
170 }
171
172 /* make sure the header pointer is 8bytes aligned */
qdf_mem_dma_get_header(void * ptr,qdf_size_t size)173 static inline struct qdf_mem_header *qdf_mem_dma_get_header(void *ptr,
174 qdf_size_t size)
175 {
176 return (struct qdf_mem_header *)
177 qdf_roundup((size_t)((uint8_t *)ptr + size),
178 QDF_DMA_MEM_HEADER_ALIGN);
179 }
180
qdf_mem_get_trailer(struct qdf_mem_header * header)181 static inline uint64_t *qdf_mem_get_trailer(struct qdf_mem_header *header)
182 {
183 return (uint64_t *)((void *)(header + 1) + header->size);
184 }
185
qdf_mem_get_ptr(struct qdf_mem_header * header)186 static inline void *qdf_mem_get_ptr(struct qdf_mem_header *header)
187 {
188 return (void *)(header + 1);
189 }
190
191 /* number of bytes needed for the qdf memory debug information */
192 #define QDF_MEM_DEBUG_SIZE \
193 (sizeof(struct qdf_mem_header) + sizeof(WLAN_MEM_TRAILER))
194
195 /* number of bytes needed for the qdf dma memory debug information */
196 #define QDF_DMA_MEM_DEBUG_SIZE \
197 (sizeof(struct qdf_mem_header) + QDF_DMA_MEM_HEADER_ALIGN)
198
qdf_mem_trailer_init(struct qdf_mem_header * header)199 static void qdf_mem_trailer_init(struct qdf_mem_header *header)
200 {
201 QDF_BUG(header);
202 if (!header)
203 return;
204 *qdf_mem_get_trailer(header) = WLAN_MEM_TRAILER;
205 }
206
qdf_mem_header_init(struct qdf_mem_header * header,qdf_size_t size,const char * func,uint32_t line,void * caller)207 static void qdf_mem_header_init(struct qdf_mem_header *header, qdf_size_t size,
208 const char *func, uint32_t line, void *caller)
209 {
210 QDF_BUG(header);
211 if (!header)
212 return;
213
214 header->domain = qdf_debug_domain_get();
215 header->freed = false;
216
217 qdf_str_lcopy(header->func, func, QDF_MEM_FUNC_NAME_SIZE);
218
219 header->line = line;
220 header->size = size;
221 header->caller = caller;
222 header->header = WLAN_MEM_HEADER;
223 header->time = qdf_get_log_timestamp();
224 }
225
226 enum qdf_mem_validation_bitmap {
227 QDF_MEM_BAD_HEADER = 1 << 0,
228 QDF_MEM_BAD_TRAILER = 1 << 1,
229 QDF_MEM_BAD_SIZE = 1 << 2,
230 QDF_MEM_DOUBLE_FREE = 1 << 3,
231 QDF_MEM_BAD_FREED = 1 << 4,
232 QDF_MEM_BAD_NODE = 1 << 5,
233 QDF_MEM_BAD_DOMAIN = 1 << 6,
234 QDF_MEM_WRONG_DOMAIN = 1 << 7,
235 };
236
237 static enum qdf_mem_validation_bitmap
qdf_mem_trailer_validate(struct qdf_mem_header * header)238 qdf_mem_trailer_validate(struct qdf_mem_header *header)
239 {
240 enum qdf_mem_validation_bitmap error_bitmap = 0;
241
242 if (*qdf_mem_get_trailer(header) != WLAN_MEM_TRAILER)
243 error_bitmap |= QDF_MEM_BAD_TRAILER;
244 return error_bitmap;
245 }
246
247 static enum qdf_mem_validation_bitmap
qdf_mem_header_validate(struct qdf_mem_header * header,enum qdf_debug_domain domain)248 qdf_mem_header_validate(struct qdf_mem_header *header,
249 enum qdf_debug_domain domain)
250 {
251 enum qdf_mem_validation_bitmap error_bitmap = 0;
252
253 if (header->header != WLAN_MEM_HEADER)
254 error_bitmap |= QDF_MEM_BAD_HEADER;
255
256 if (header->size > QDF_MEM_MAX_MALLOC)
257 error_bitmap |= QDF_MEM_BAD_SIZE;
258
259 if (header->freed == true)
260 error_bitmap |= QDF_MEM_DOUBLE_FREE;
261 else if (header->freed)
262 error_bitmap |= QDF_MEM_BAD_FREED;
263
264 if (!qdf_list_node_in_any_list(&header->node))
265 error_bitmap |= QDF_MEM_BAD_NODE;
266
267 if (header->domain < QDF_DEBUG_DOMAIN_INIT ||
268 header->domain >= QDF_DEBUG_DOMAIN_COUNT)
269 error_bitmap |= QDF_MEM_BAD_DOMAIN;
270 else if (header->domain != domain)
271 error_bitmap |= QDF_MEM_WRONG_DOMAIN;
272
273 return error_bitmap;
274 }
275
276 static void
qdf_mem_header_assert_valid(struct qdf_mem_header * header,enum qdf_debug_domain current_domain,enum qdf_mem_validation_bitmap error_bitmap,const char * func,uint32_t line)277 qdf_mem_header_assert_valid(struct qdf_mem_header *header,
278 enum qdf_debug_domain current_domain,
279 enum qdf_mem_validation_bitmap error_bitmap,
280 const char *func,
281 uint32_t line)
282 {
283 if (!error_bitmap)
284 return;
285
286 if (error_bitmap & QDF_MEM_BAD_HEADER)
287 qdf_err("Corrupted memory header 0x%llx (expected 0x%llx)",
288 header->header, WLAN_MEM_HEADER);
289
290 if (error_bitmap & QDF_MEM_BAD_SIZE)
291 qdf_err("Corrupted memory size %u (expected < %d)",
292 header->size, QDF_MEM_MAX_MALLOC);
293
294 if (error_bitmap & QDF_MEM_BAD_TRAILER)
295 qdf_err("Corrupted memory trailer 0x%llx (expected 0x%llx)",
296 *qdf_mem_get_trailer(header), WLAN_MEM_TRAILER);
297
298 if (error_bitmap & QDF_MEM_DOUBLE_FREE)
299 qdf_err("Memory has previously been freed");
300
301 if (error_bitmap & QDF_MEM_BAD_FREED)
302 qdf_err("Corrupted memory freed flag 0x%x", header->freed);
303
304 if (error_bitmap & QDF_MEM_BAD_NODE)
305 qdf_err("Corrupted memory header node or double free");
306
307 if (error_bitmap & QDF_MEM_BAD_DOMAIN)
308 qdf_err("Corrupted memory domain 0x%x", header->domain);
309
310 if (error_bitmap & QDF_MEM_WRONG_DOMAIN)
311 qdf_err("Memory domain mismatch; allocated:%s(%d), current:%s(%d)",
312 qdf_debug_domain_name(header->domain), header->domain,
313 qdf_debug_domain_name(current_domain), current_domain);
314
315 QDF_MEMDEBUG_PANIC("Fatal memory error detected @ %s:%d", func, line);
316 }
317
318 /**
319 * struct __qdf_mem_info - memory statistics
320 * @func: the function which allocated memory
321 * @line: the line at which allocation happened
322 * @size: the size of allocation
323 * @caller: Address of the caller function
324 * @count: how many allocations of same type
325 * @time: timestamp at which allocation happened
326 */
327 struct __qdf_mem_info {
328 char func[QDF_MEM_FUNC_NAME_SIZE];
329 uint32_t line;
330 uint32_t size;
331 void *caller;
332 uint32_t count;
333 uint64_t time;
334 };
335
336 /*
337 * The table depth defines the de-duplication proximity scope.
338 * A deeper table takes more time, so choose any optimum value.
339 */
340 #define QDF_MEM_STAT_TABLE_SIZE 8
341
342 /**
343 * qdf_mem_debug_print_header() - memory debug header print logic
344 * @print: the print adapter function
345 * @print_priv: the private data to be consumed by @print
346 * @threshold: the threshold value set by user to list top allocations
347 *
348 * Return: None
349 */
qdf_mem_debug_print_header(qdf_abstract_print print,void * print_priv,uint32_t threshold)350 static void qdf_mem_debug_print_header(qdf_abstract_print print,
351 void *print_priv,
352 uint32_t threshold)
353 {
354 if (threshold)
355 print(print_priv, "APIs requested allocations >= %u no of time",
356 threshold);
357 print(print_priv,
358 "--------------------------------------------------------------");
359 print(print_priv,
360 " count size total filename caller timestamp");
361 print(print_priv,
362 "--------------------------------------------------------------");
363 }
364
365 /**
366 * qdf_mem_meta_table_insert() - insert memory metadata into the given table
367 * @table: the memory metadata table to insert into
368 * @meta: the memory metadata to insert
369 *
370 * Return: true if the table is full after inserting, false otherwise
371 */
qdf_mem_meta_table_insert(struct __qdf_mem_info * table,struct qdf_mem_header * meta)372 static bool qdf_mem_meta_table_insert(struct __qdf_mem_info *table,
373 struct qdf_mem_header *meta)
374 {
375 int i;
376
377 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
378 if (!table[i].count) {
379 qdf_str_lcopy(table[i].func, meta->func,
380 QDF_MEM_FUNC_NAME_SIZE);
381 table[i].line = meta->line;
382 table[i].size = meta->size;
383 table[i].count = 1;
384 table[i].caller = meta->caller;
385 table[i].time = meta->time;
386 break;
387 }
388
389 if (qdf_str_eq(table[i].func, meta->func) &&
390 table[i].line == meta->line &&
391 table[i].size == meta->size &&
392 table[i].caller == meta->caller) {
393 table[i].count++;
394 break;
395 }
396 }
397
398 /* return true if the table is now full */
399 return i >= QDF_MEM_STAT_TABLE_SIZE - 1;
400 }
401
402 /**
403 * qdf_mem_domain_print() - output agnostic memory domain print logic
404 * @domain: the memory domain to print
405 * @print: the print adapter function
406 * @print_priv: the private data to be consumed by @print
407 * @threshold: the threshold value set by uset to list top allocations
408 * @mem_print: pointer to function which prints the memory allocation data
409 *
410 * Return: None
411 */
qdf_mem_domain_print(qdf_list_t * domain,qdf_abstract_print print,void * print_priv,uint32_t threshold,void (* mem_print)(struct __qdf_mem_info *,qdf_abstract_print,void *,uint32_t))412 static void qdf_mem_domain_print(qdf_list_t *domain,
413 qdf_abstract_print print,
414 void *print_priv,
415 uint32_t threshold,
416 void (*mem_print)(struct __qdf_mem_info *,
417 qdf_abstract_print,
418 void *, uint32_t))
419 {
420 QDF_STATUS status;
421 struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
422 qdf_list_node_t *node;
423
424 qdf_mem_zero(table, sizeof(table));
425 qdf_mem_debug_print_header(print, print_priv, threshold);
426
427 /* hold lock while inserting to avoid use-after free of the metadata */
428 qdf_spin_lock(&qdf_mem_list_lock);
429 status = qdf_list_peek_front(domain, &node);
430 while (QDF_IS_STATUS_SUCCESS(status)) {
431 struct qdf_mem_header *meta = (struct qdf_mem_header *)node;
432 bool is_full = qdf_mem_meta_table_insert(table, meta);
433
434 qdf_spin_unlock(&qdf_mem_list_lock);
435
436 if (is_full) {
437 (*mem_print)(table, print, print_priv, threshold);
438 qdf_mem_zero(table, sizeof(table));
439 }
440
441 qdf_spin_lock(&qdf_mem_list_lock);
442 status = qdf_list_peek_next(domain, node, &node);
443 }
444 qdf_spin_unlock(&qdf_mem_list_lock);
445
446 (*mem_print)(table, print, print_priv, threshold);
447 }
448
449 /**
450 * qdf_mem_meta_table_print() - memory metadata table print logic
451 * @table: the memory metadata table to print
452 * @print: the print adapter function
453 * @print_priv: the private data to be consumed by @print
454 * @threshold: the threshold value set by user to list top allocations
455 *
456 * Return: None
457 */
qdf_mem_meta_table_print(struct __qdf_mem_info * table,qdf_abstract_print print,void * print_priv,uint32_t threshold)458 static void qdf_mem_meta_table_print(struct __qdf_mem_info *table,
459 qdf_abstract_print print,
460 void *print_priv,
461 uint32_t threshold)
462 {
463 int i;
464 char debug_str[QDF_DEBUG_STRING_SIZE];
465 size_t len = 0;
466 char *debug_prefix = "WLAN_BUG_RCA: memory leak detected";
467
468 len += qdf_scnprintf(debug_str, sizeof(debug_str) - len,
469 "%s", debug_prefix);
470
471 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
472 if (!table[i].count)
473 break;
474
475 print(print_priv,
476 "%6u x %5u = %7uB @ %s:%u %pS %llu",
477 table[i].count,
478 table[i].size,
479 table[i].count * table[i].size,
480 table[i].func,
481 table[i].line, table[i].caller,
482 table[i].time);
483 len += qdf_scnprintf(debug_str + len,
484 sizeof(debug_str) - len,
485 " @ %s:%u %pS",
486 table[i].func,
487 table[i].line,
488 table[i].caller);
489 }
490 print(print_priv, "%s", debug_str);
491 }
492
qdf_err_printer(void * priv,const char * fmt,...)493 static int qdf_err_printer(void *priv, const char *fmt, ...)
494 {
495 va_list args;
496
497 va_start(args, fmt);
498 QDF_VTRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, (char *)fmt, args);
499 va_end(args);
500
501 return 0;
502 }
503
504 #endif /* MEMORY_DEBUG */
505
506 bool prealloc_disabled = 1;
507 qdf_declare_param(prealloc_disabled, bool);
508 qdf_export_symbol(prealloc_disabled);
509
qdf_mem_malloc_flags(void)510 int qdf_mem_malloc_flags(void)
511 {
512 if (in_interrupt() || !preemptible() || rcu_preempt_depth())
513 return GFP_ATOMIC;
514
515 return GFP_KERNEL;
516 }
517
518 qdf_export_symbol(qdf_mem_malloc_flags);
519
qdf_prealloc_disabled_config_get(void)520 bool qdf_prealloc_disabled_config_get(void)
521 {
522 return prealloc_disabled;
523 }
524
525 qdf_export_symbol(qdf_prealloc_disabled_config_get);
526
527 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
qdf_prealloc_disabled_config_set(const char * str_value)528 QDF_STATUS qdf_prealloc_disabled_config_set(const char *str_value)
529 {
530 QDF_STATUS status;
531
532 status = qdf_bool_parse(str_value, &prealloc_disabled);
533 return status;
534 }
535 #endif
536
537 #if defined WLAN_DEBUGFS
538
539 /* Debugfs root directory for qdf_mem */
540 static struct dentry *qdf_mem_debugfs_root;
541
542 #ifdef MEMORY_DEBUG
seq_printf_printer(void * priv,const char * fmt,...)543 static int seq_printf_printer(void *priv, const char *fmt, ...)
544 {
545 struct seq_file *file = priv;
546 va_list args;
547
548 va_start(args, fmt);
549 seq_vprintf(file, fmt, args);
550 seq_puts(file, "\n");
551 va_end(args);
552
553 return 0;
554 }
555
556 /**
557 * qdf_print_major_alloc() - memory metadata table print logic
558 * @table: the memory metadata table to print
559 * @print: the print adapter function
560 * @print_priv: the private data to be consumed by @print
561 * @threshold: the threshold value set by uset to list top allocations
562 *
563 * Return: None
564 */
qdf_print_major_alloc(struct __qdf_mem_info * table,qdf_abstract_print print,void * print_priv,uint32_t threshold)565 static void qdf_print_major_alloc(struct __qdf_mem_info *table,
566 qdf_abstract_print print,
567 void *print_priv,
568 uint32_t threshold)
569 {
570 int i;
571
572 for (i = 0; i < QDF_MEM_STAT_TABLE_SIZE; i++) {
573 if (!table[i].count)
574 break;
575 if (table[i].count >= threshold)
576 print(print_priv,
577 "%6u x %5u = %7uB @ %s:%u %pS %llu",
578 table[i].count,
579 table[i].size,
580 table[i].count * table[i].size,
581 table[i].func,
582 table[i].line, table[i].caller,
583 table[i].time);
584 }
585 }
586
587 /**
588 * qdf_mem_seq_start() - sequential callback to start
589 * @seq: seq_file handle
590 * @pos: The start position of the sequence
591 *
592 * Return: iterator pointer, or NULL if iteration is complete
593 */
qdf_mem_seq_start(struct seq_file * seq,loff_t * pos)594 static void *qdf_mem_seq_start(struct seq_file *seq, loff_t *pos)
595 {
596 enum qdf_debug_domain domain = *pos;
597
598 if (!qdf_debug_domain_valid(domain))
599 return NULL;
600
601 /* just use the current position as our iterator */
602 return pos;
603 }
604
605 /**
606 * qdf_mem_seq_next() - next sequential callback
607 * @seq: seq_file handle
608 * @v: the current iterator
609 * @pos: the current position
610 *
611 * Get the next node and release previous node.
612 *
613 * Return: iterator pointer, or NULL if iteration is complete
614 */
qdf_mem_seq_next(struct seq_file * seq,void * v,loff_t * pos)615 static void *qdf_mem_seq_next(struct seq_file *seq, void *v, loff_t *pos)
616 {
617 ++*pos;
618
619 return qdf_mem_seq_start(seq, pos);
620 }
621
622 /**
623 * qdf_mem_seq_stop() - stop sequential callback
624 * @seq: seq_file handle
625 * @v: current iterator
626 *
627 * Return: None
628 */
qdf_mem_seq_stop(struct seq_file * seq,void * v)629 static void qdf_mem_seq_stop(struct seq_file *seq, void *v) { }
630
631 /**
632 * qdf_mem_seq_show() - print sequential callback
633 * @seq: seq_file handle
634 * @v: current iterator
635 *
636 * Return: 0 - success
637 */
qdf_mem_seq_show(struct seq_file * seq,void * v)638 static int qdf_mem_seq_show(struct seq_file *seq, void *v)
639 {
640 enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
641
642 seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
643 qdf_debug_domain_name(domain_id), domain_id);
644 qdf_mem_domain_print(qdf_mem_list_get(domain_id),
645 seq_printf_printer,
646 seq,
647 0,
648 qdf_mem_meta_table_print);
649
650 return 0;
651 }
652
653 /* sequential file operation table */
654 static const struct seq_operations qdf_mem_seq_ops = {
655 .start = qdf_mem_seq_start,
656 .next = qdf_mem_seq_next,
657 .stop = qdf_mem_seq_stop,
658 .show = qdf_mem_seq_show,
659 };
660
661
qdf_mem_debugfs_open(struct inode * inode,struct file * file)662 static int qdf_mem_debugfs_open(struct inode *inode, struct file *file)
663 {
664 return seq_open(file, &qdf_mem_seq_ops);
665 }
666
667 /**
668 * qdf_major_alloc_show() - print sequential callback
669 * @seq: seq_file handle
670 * @v: current iterator
671 *
672 * Return: 0 - success
673 */
qdf_major_alloc_show(struct seq_file * seq,void * v)674 static int qdf_major_alloc_show(struct seq_file *seq, void *v)
675 {
676 enum qdf_debug_domain domain_id = *(enum qdf_debug_domain *)v;
677 struct major_alloc_priv *priv;
678 qdf_list_t *list;
679
680 priv = (struct major_alloc_priv *)seq->private;
681 seq_printf(seq, "\n%s Memory Domain (Id %d)\n",
682 qdf_debug_domain_name(domain_id), domain_id);
683
684 switch (priv->type) {
685 case LIST_TYPE_MEM:
686 list = qdf_mem_list_get(domain_id);
687 break;
688 case LIST_TYPE_DMA:
689 list = qdf_mem_dma_list(domain_id);
690 break;
691 default:
692 list = NULL;
693 break;
694 }
695
696 if (list)
697 qdf_mem_domain_print(list,
698 seq_printf_printer,
699 seq,
700 priv->threshold,
701 qdf_print_major_alloc);
702
703 return 0;
704 }
705
706 /* sequential file operation table created to track major allocs */
707 static const struct seq_operations qdf_major_allocs_seq_ops = {
708 .start = qdf_mem_seq_start,
709 .next = qdf_mem_seq_next,
710 .stop = qdf_mem_seq_stop,
711 .show = qdf_major_alloc_show,
712 };
713
qdf_major_allocs_open(struct inode * inode,struct file * file)714 static int qdf_major_allocs_open(struct inode *inode, struct file *file)
715 {
716 void *private = inode->i_private;
717 struct seq_file *seq;
718 int rc;
719
720 rc = seq_open(file, &qdf_major_allocs_seq_ops);
721 if (rc == 0) {
722 seq = file->private_data;
723 seq->private = private;
724 }
725 return rc;
726 }
727
qdf_major_alloc_set_threshold(struct file * file,const char __user * user_buf,size_t count,loff_t * pos)728 static ssize_t qdf_major_alloc_set_threshold(struct file *file,
729 const char __user *user_buf,
730 size_t count,
731 loff_t *pos)
732 {
733 char buf[32];
734 ssize_t buf_size;
735 uint32_t threshold;
736 struct seq_file *seq = file->private_data;
737 struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
738
739 buf_size = min(count, (sizeof(buf) - 1));
740 if (buf_size <= 0)
741 return 0;
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744 buf[buf_size] = '\0';
745 if (!kstrtou32(buf, 10, &threshold))
746 priv->threshold = threshold;
747 return buf_size;
748 }
749
750 /**
751 * qdf_print_major_nbuf_allocs() - output agnostic nbuf print logic
752 * @threshold: the threshold value set by uset to list top allocations
753 * @print: the print adapter function
754 * @print_priv: the private data to be consumed by @print
755 * @mem_print: pointer to function which prints the memory allocation data
756 *
757 * Return: None
758 */
759 static void
qdf_print_major_nbuf_allocs(uint32_t threshold,qdf_abstract_print print,void * print_priv,void (* mem_print)(struct __qdf_mem_info *,qdf_abstract_print,void *,uint32_t))760 qdf_print_major_nbuf_allocs(uint32_t threshold,
761 qdf_abstract_print print,
762 void *print_priv,
763 void (*mem_print)(struct __qdf_mem_info *,
764 qdf_abstract_print,
765 void *, uint32_t))
766 {
767 uint32_t nbuf_iter;
768 unsigned long irq_flag = 0;
769 QDF_NBUF_TRACK *p_node;
770 struct __qdf_mem_info table[QDF_MEM_STAT_TABLE_SIZE];
771 struct qdf_mem_header meta;
772 bool is_full;
773
774 qdf_mem_zero(table, sizeof(table));
775 qdf_mem_debug_print_header(print, print_priv, threshold);
776
777 if (is_initial_mem_debug_disabled)
778 return;
779
780 qdf_rl_info("major nbuf print with threshold %u", threshold);
781
782 for (nbuf_iter = 0; nbuf_iter < QDF_NET_BUF_TRACK_MAX_SIZE;
783 nbuf_iter++) {
784 qdf_nbuf_acquire_track_lock(nbuf_iter, irq_flag);
785 p_node = qdf_nbuf_get_track_tbl(nbuf_iter);
786 while (p_node) {
787 meta.line = p_node->line_num;
788 meta.size = p_node->size;
789 meta.caller = NULL;
790 meta.time = p_node->time;
791 qdf_str_lcopy(meta.func, p_node->func_name,
792 QDF_MEM_FUNC_NAME_SIZE);
793
794 is_full = qdf_mem_meta_table_insert(table, &meta);
795
796 if (is_full) {
797 (*mem_print)(table, print,
798 print_priv, threshold);
799 qdf_mem_zero(table, sizeof(table));
800 }
801
802 p_node = p_node->p_next;
803 }
804 qdf_nbuf_release_track_lock(nbuf_iter, irq_flag);
805 }
806
807 (*mem_print)(table, print, print_priv, threshold);
808
809 qdf_rl_info("major nbuf print end");
810 }
811
812 /**
813 * qdf_major_nbuf_alloc_show() - print sequential callback
814 * @seq: seq_file handle
815 * @v: current iterator
816 *
817 * Return: 0 - success
818 */
qdf_major_nbuf_alloc_show(struct seq_file * seq,void * v)819 static int qdf_major_nbuf_alloc_show(struct seq_file *seq, void *v)
820 {
821 struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
822
823 if (!priv) {
824 qdf_err("priv is null");
825 return -EINVAL;
826 }
827
828 qdf_print_major_nbuf_allocs(priv->threshold,
829 seq_printf_printer,
830 seq,
831 qdf_print_major_alloc);
832
833 return 0;
834 }
835
836 /**
837 * qdf_nbuf_seq_start() - sequential callback to start
838 * @seq: seq_file handle
839 * @pos: The start position of the sequence
840 *
841 * Return: iterator pointer, or NULL if iteration is complete
842 */
qdf_nbuf_seq_start(struct seq_file * seq,loff_t * pos)843 static void *qdf_nbuf_seq_start(struct seq_file *seq, loff_t *pos)
844 {
845 enum qdf_debug_domain domain = *pos;
846
847 if (domain > QDF_DEBUG_NBUF_DOMAIN)
848 return NULL;
849
850 return pos;
851 }
852
853 /**
854 * qdf_nbuf_seq_next() - next sequential callback
855 * @seq: seq_file handle
856 * @v: the current iterator
857 * @pos: the current position
858 *
859 * Get the next node and release previous node.
860 *
861 * Return: iterator pointer, or NULL if iteration is complete
862 */
qdf_nbuf_seq_next(struct seq_file * seq,void * v,loff_t * pos)863 static void *qdf_nbuf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
864 {
865 ++*pos;
866
867 return qdf_nbuf_seq_start(seq, pos);
868 }
869
870 /**
871 * qdf_nbuf_seq_stop() - stop sequential callback
872 * @seq: seq_file handle
873 * @v: current iterator
874 *
875 * Return: None
876 */
qdf_nbuf_seq_stop(struct seq_file * seq,void * v)877 static void qdf_nbuf_seq_stop(struct seq_file *seq, void *v) { }
878
879 /* sequential file operation table created to track major skb allocs */
880 static const struct seq_operations qdf_major_nbuf_allocs_seq_ops = {
881 .start = qdf_nbuf_seq_start,
882 .next = qdf_nbuf_seq_next,
883 .stop = qdf_nbuf_seq_stop,
884 .show = qdf_major_nbuf_alloc_show,
885 };
886
qdf_major_nbuf_allocs_open(struct inode * inode,struct file * file)887 static int qdf_major_nbuf_allocs_open(struct inode *inode, struct file *file)
888 {
889 void *private = inode->i_private;
890 struct seq_file *seq;
891 int rc;
892
893 rc = seq_open(file, &qdf_major_nbuf_allocs_seq_ops);
894 if (rc == 0) {
895 seq = file->private_data;
896 seq->private = private;
897 }
898 return rc;
899 }
900
qdf_major_nbuf_alloc_set_threshold(struct file * file,const char __user * user_buf,size_t count,loff_t * pos)901 static ssize_t qdf_major_nbuf_alloc_set_threshold(struct file *file,
902 const char __user *user_buf,
903 size_t count,
904 loff_t *pos)
905 {
906 char buf[32];
907 ssize_t buf_size;
908 uint32_t threshold;
909 struct seq_file *seq = file->private_data;
910 struct major_alloc_priv *priv = (struct major_alloc_priv *)seq->private;
911
912 buf_size = min(count, (sizeof(buf) - 1));
913 if (buf_size <= 0)
914 return 0;
915 if (copy_from_user(buf, user_buf, buf_size))
916 return -EFAULT;
917 buf[buf_size] = '\0';
918 if (!kstrtou32(buf, 10, &threshold))
919 priv->threshold = threshold;
920 return buf_size;
921 }
922
923 /* file operation table for listing major allocs */
924 static const struct file_operations fops_qdf_major_allocs = {
925 .owner = THIS_MODULE,
926 .open = qdf_major_allocs_open,
927 .read = seq_read,
928 .llseek = seq_lseek,
929 .release = seq_release,
930 .write = qdf_major_alloc_set_threshold,
931 };
932
933 /* debugfs file operation table */
934 static const struct file_operations fops_qdf_mem_debugfs = {
935 .owner = THIS_MODULE,
936 .open = qdf_mem_debugfs_open,
937 .read = seq_read,
938 .llseek = seq_lseek,
939 .release = seq_release,
940 };
941
942 /* file operation table for listing major allocs */
943 static const struct file_operations fops_qdf_nbuf_major_allocs = {
944 .owner = THIS_MODULE,
945 .open = qdf_major_nbuf_allocs_open,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = seq_release,
949 .write = qdf_major_nbuf_alloc_set_threshold,
950 };
951
952 static struct major_alloc_priv mem_priv = {
953 /* List type set to mem */
954 LIST_TYPE_MEM,
955 /* initial threshold to list APIs which allocates mem >= 50 times */
956 50
957 };
958
959 static struct major_alloc_priv dma_priv = {
960 /* List type set to DMA */
961 LIST_TYPE_DMA,
962 /* initial threshold to list APIs which allocates dma >= 50 times */
963 50
964 };
965
966 static struct major_alloc_priv nbuf_priv = {
967 /* List type set to NBUF */
968 LIST_TYPE_NBUF,
969 /* initial threshold to list APIs which allocates nbuf >= 50 times */
970 50
971 };
972
qdf_mem_debug_debugfs_init(void)973 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
974 {
975 if (is_initial_mem_debug_disabled)
976 return QDF_STATUS_SUCCESS;
977
978 if (!qdf_mem_debugfs_root)
979 return QDF_STATUS_E_FAILURE;
980
981 debugfs_create_file("list",
982 S_IRUSR,
983 qdf_mem_debugfs_root,
984 NULL,
985 &fops_qdf_mem_debugfs);
986
987 debugfs_create_file("major_mem_allocs",
988 0600,
989 qdf_mem_debugfs_root,
990 &mem_priv,
991 &fops_qdf_major_allocs);
992
993 debugfs_create_file("major_dma_allocs",
994 0600,
995 qdf_mem_debugfs_root,
996 &dma_priv,
997 &fops_qdf_major_allocs);
998
999 debugfs_create_file("major_nbuf_allocs",
1000 0600,
1001 qdf_mem_debugfs_root,
1002 &nbuf_priv,
1003 &fops_qdf_nbuf_major_allocs);
1004
1005 return QDF_STATUS_SUCCESS;
1006 }
1007
qdf_mem_debug_debugfs_exit(void)1008 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1009 {
1010 return QDF_STATUS_SUCCESS;
1011 }
1012
1013 #else /* MEMORY_DEBUG */
1014
qdf_mem_debug_debugfs_init(void)1015 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1016 {
1017 return QDF_STATUS_E_NOSUPPORT;
1018 }
1019
qdf_mem_debug_debugfs_exit(void)1020 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1021 {
1022 return QDF_STATUS_E_NOSUPPORT;
1023 }
1024
1025 #endif /* MEMORY_DEBUG */
1026
1027
qdf_mem_debugfs_exit(void)1028 static void qdf_mem_debugfs_exit(void)
1029 {
1030 debugfs_remove_recursive(qdf_mem_debugfs_root);
1031 qdf_mem_debugfs_root = NULL;
1032 }
1033
qdf_mem_debugfs_init(void)1034 static QDF_STATUS qdf_mem_debugfs_init(void)
1035 {
1036 struct dentry *qdf_debugfs_root = qdf_debugfs_get_root();
1037
1038 if (!qdf_debugfs_root)
1039 return QDF_STATUS_E_FAILURE;
1040
1041 qdf_mem_debugfs_root = debugfs_create_dir("mem", qdf_debugfs_root);
1042
1043 if (!qdf_mem_debugfs_root)
1044 return QDF_STATUS_E_FAILURE;
1045
1046
1047 debugfs_create_atomic_t("kmalloc",
1048 S_IRUSR,
1049 qdf_mem_debugfs_root,
1050 &qdf_mem_stat.kmalloc);
1051
1052 debugfs_create_atomic_t("dma",
1053 S_IRUSR,
1054 qdf_mem_debugfs_root,
1055 &qdf_mem_stat.dma);
1056
1057 debugfs_create_atomic_t("skb",
1058 S_IRUSR,
1059 qdf_mem_debugfs_root,
1060 &qdf_mem_stat.skb);
1061
1062 return QDF_STATUS_SUCCESS;
1063 }
1064
1065 #else /* WLAN_DEBUGFS */
1066
qdf_mem_debugfs_init(void)1067 static QDF_STATUS qdf_mem_debugfs_init(void)
1068 {
1069 return QDF_STATUS_E_NOSUPPORT;
1070 }
qdf_mem_debugfs_exit(void)1071 static void qdf_mem_debugfs_exit(void) {}
1072
1073
qdf_mem_debug_debugfs_init(void)1074 static QDF_STATUS qdf_mem_debug_debugfs_init(void)
1075 {
1076 return QDF_STATUS_E_NOSUPPORT;
1077 }
1078
qdf_mem_debug_debugfs_exit(void)1079 static QDF_STATUS qdf_mem_debug_debugfs_exit(void)
1080 {
1081 return QDF_STATUS_E_NOSUPPORT;
1082 }
1083
1084 #endif /* WLAN_DEBUGFS */
1085
qdf_mem_kmalloc_inc(qdf_size_t size)1086 void qdf_mem_kmalloc_inc(qdf_size_t size)
1087 {
1088 qdf_atomic_add(size, &qdf_mem_stat.kmalloc);
1089 }
1090
qdf_mem_dma_inc(qdf_size_t size)1091 static void qdf_mem_dma_inc(qdf_size_t size)
1092 {
1093 qdf_atomic_add(size, &qdf_mem_stat.dma);
1094 }
1095
1096 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
qdf_mem_skb_inc(qdf_size_t size)1097 void qdf_mem_skb_inc(qdf_size_t size)
1098 {
1099 qdf_atomic_add(size, &qdf_mem_stat.skb);
1100 }
1101
qdf_mem_skb_dec(qdf_size_t size)1102 void qdf_mem_skb_dec(qdf_size_t size)
1103 {
1104 qdf_atomic_sub(size, &qdf_mem_stat.skb);
1105 }
1106
qdf_mem_skb_total_inc(qdf_size_t size)1107 void qdf_mem_skb_total_inc(qdf_size_t size)
1108 {
1109 int32_t skb_mem_max = 0;
1110
1111 qdf_atomic_add(size, &qdf_mem_stat.skb_total);
1112 skb_mem_max = qdf_atomic_read(&qdf_mem_stat.skb_total);
1113 if (qdf_mem_stat.skb_mem_max < skb_mem_max)
1114 qdf_mem_stat.skb_mem_max = skb_mem_max;
1115 }
1116
qdf_mem_skb_total_dec(qdf_size_t size)1117 void qdf_mem_skb_total_dec(qdf_size_t size)
1118 {
1119 qdf_atomic_sub(size, &qdf_mem_stat.skb_total);
1120 }
1121
qdf_mem_dp_tx_skb_inc(qdf_size_t size)1122 void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
1123 {
1124 int32_t curr_dp_tx_skb_mem_max = 0;
1125
1126 qdf_atomic_add(size, &qdf_mem_stat.dp_tx_skb);
1127 curr_dp_tx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
1128 if (qdf_mem_stat.dp_tx_skb_mem_max < curr_dp_tx_skb_mem_max)
1129 qdf_mem_stat.dp_tx_skb_mem_max = curr_dp_tx_skb_mem_max;
1130 }
1131
qdf_mem_dp_tx_skb_dec(qdf_size_t size)1132 void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
1133 {
1134 qdf_atomic_sub(size, &qdf_mem_stat.dp_tx_skb);
1135 }
1136
qdf_mem_dp_rx_skb_inc(qdf_size_t size)1137 void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
1138 {
1139 int32_t curr_dp_rx_skb_mem_max = 0;
1140
1141 qdf_atomic_add(size, &qdf_mem_stat.dp_rx_skb);
1142 curr_dp_rx_skb_mem_max = qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
1143 if (qdf_mem_stat.dp_rx_skb_mem_max < curr_dp_rx_skb_mem_max)
1144 qdf_mem_stat.dp_rx_skb_mem_max = curr_dp_rx_skb_mem_max;
1145 }
1146
qdf_mem_dp_rx_skb_dec(qdf_size_t size)1147 void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
1148 {
1149 qdf_atomic_sub(size, &qdf_mem_stat.dp_rx_skb);
1150 }
1151
qdf_mem_dp_tx_skb_cnt_inc(void)1152 void qdf_mem_dp_tx_skb_cnt_inc(void)
1153 {
1154 int32_t curr_dp_tx_skb_count_max = 0;
1155
1156 qdf_atomic_add(1, &qdf_mem_stat.dp_tx_skb_count);
1157 curr_dp_tx_skb_count_max =
1158 qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
1159 if (qdf_mem_stat.dp_tx_skb_count_max < curr_dp_tx_skb_count_max)
1160 qdf_mem_stat.dp_tx_skb_count_max = curr_dp_tx_skb_count_max;
1161 }
1162
qdf_mem_dp_tx_skb_cnt_dec(void)1163 void qdf_mem_dp_tx_skb_cnt_dec(void)
1164 {
1165 qdf_atomic_sub(1, &qdf_mem_stat.dp_tx_skb_count);
1166 }
1167
qdf_mem_dp_rx_skb_cnt_inc(void)1168 void qdf_mem_dp_rx_skb_cnt_inc(void)
1169 {
1170 int32_t curr_dp_rx_skb_count_max = 0;
1171
1172 qdf_atomic_add(1, &qdf_mem_stat.dp_rx_skb_count);
1173 curr_dp_rx_skb_count_max =
1174 qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
1175 if (qdf_mem_stat.dp_rx_skb_count_max < curr_dp_rx_skb_count_max)
1176 qdf_mem_stat.dp_rx_skb_count_max = curr_dp_rx_skb_count_max;
1177 }
1178
qdf_mem_dp_rx_skb_cnt_dec(void)1179 void qdf_mem_dp_rx_skb_cnt_dec(void)
1180 {
1181 qdf_atomic_sub(1, &qdf_mem_stat.dp_rx_skb_count);
1182 }
1183 #endif
1184
qdf_mem_kmalloc_dec(qdf_size_t size)1185 void qdf_mem_kmalloc_dec(qdf_size_t size)
1186 {
1187 qdf_atomic_sub(size, &qdf_mem_stat.kmalloc);
1188 }
1189
qdf_mem_dma_dec(qdf_size_t size)1190 static inline void qdf_mem_dma_dec(qdf_size_t size)
1191 {
1192 qdf_atomic_sub(size, &qdf_mem_stat.dma);
1193 }
1194
__qdf_mempool_init(qdf_device_t osdev,__qdf_mempool_t * pool_addr,int elem_cnt,size_t elem_size,u_int32_t flags)1195 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
1196 int elem_cnt, size_t elem_size, u_int32_t flags)
1197 {
1198 __qdf_mempool_ctxt_t *new_pool = NULL;
1199 u_int32_t align = L1_CACHE_BYTES;
1200 unsigned long aligned_pool_mem;
1201 int pool_id;
1202 int i;
1203
1204 if (prealloc_disabled) {
1205 /* TBD: We can maintain a list of pools in qdf_device_t
1206 * to help debugging
1207 * when pre-allocation is not enabled
1208 */
1209 new_pool = (__qdf_mempool_ctxt_t *)
1210 kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1211 if (!new_pool)
1212 return QDF_STATUS_E_NOMEM;
1213
1214 memset(new_pool, 0, sizeof(*new_pool));
1215 /* TBD: define flags for zeroing buffers etc */
1216 new_pool->flags = flags;
1217 new_pool->elem_size = elem_size;
1218 new_pool->max_elem = elem_cnt;
1219 *pool_addr = new_pool;
1220 return 0;
1221 }
1222
1223 for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
1224 if (!osdev->mem_pool[pool_id])
1225 break;
1226 }
1227
1228 if (pool_id == MAX_MEM_POOLS)
1229 return -ENOMEM;
1230
1231 new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
1232 kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
1233 if (!new_pool)
1234 return -ENOMEM;
1235
1236 memset(new_pool, 0, sizeof(*new_pool));
1237 /* TBD: define flags for zeroing buffers etc */
1238 new_pool->flags = flags;
1239 new_pool->pool_id = pool_id;
1240
1241 /* Round up the element size to cacheline */
1242 new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
1243 new_pool->mem_size = elem_cnt * new_pool->elem_size +
1244 ((align)?(align - 1):0);
1245
1246 new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
1247 if (!new_pool->pool_mem) {
1248 /* TBD: Check if we need get_free_pages above */
1249 kfree(new_pool);
1250 osdev->mem_pool[pool_id] = NULL;
1251 return -ENOMEM;
1252 }
1253
1254 spin_lock_init(&new_pool->lock);
1255
1256 /* Initialize free list */
1257 aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
1258 ((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
1259 STAILQ_INIT(&new_pool->free_list);
1260
1261 for (i = 0; i < elem_cnt; i++)
1262 STAILQ_INSERT_TAIL(&(new_pool->free_list),
1263 (mempool_elem_t *)(aligned_pool_mem +
1264 (new_pool->elem_size * i)), mempool_entry);
1265
1266
1267 new_pool->free_cnt = elem_cnt;
1268 *pool_addr = new_pool;
1269 return 0;
1270 }
1271 qdf_export_symbol(__qdf_mempool_init);
1272
__qdf_mempool_destroy(qdf_device_t osdev,__qdf_mempool_t pool)1273 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
1274 {
1275 int pool_id = 0;
1276
1277 if (!pool)
1278 return;
1279
1280 if (prealloc_disabled) {
1281 kfree(pool);
1282 return;
1283 }
1284
1285 pool_id = pool->pool_id;
1286
1287 /* TBD: Check if free count matches elem_cnt if debug is enabled */
1288 kfree(pool->pool_mem);
1289 kfree(pool);
1290 osdev->mem_pool[pool_id] = NULL;
1291 }
1292 qdf_export_symbol(__qdf_mempool_destroy);
1293
__qdf_mempool_alloc(qdf_device_t osdev,__qdf_mempool_t pool)1294 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
1295 {
1296 void *buf = NULL;
1297
1298 if (!pool)
1299 return NULL;
1300
1301 if (prealloc_disabled)
1302 return qdf_mem_malloc(pool->elem_size);
1303
1304 spin_lock_bh(&pool->lock);
1305
1306 buf = STAILQ_FIRST(&pool->free_list);
1307 if (buf) {
1308 STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
1309 pool->free_cnt--;
1310 }
1311
1312 /* TBD: Update free count if debug is enabled */
1313 spin_unlock_bh(&pool->lock);
1314
1315 return buf;
1316 }
1317 qdf_export_symbol(__qdf_mempool_alloc);
1318
__qdf_mempool_free(qdf_device_t osdev,__qdf_mempool_t pool,void * buf)1319 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
1320 {
1321 if (!pool)
1322 return;
1323
1324
1325 if (prealloc_disabled)
1326 return qdf_mem_free(buf);
1327
1328 spin_lock_bh(&pool->lock);
1329 pool->free_cnt++;
1330
1331 STAILQ_INSERT_TAIL
1332 (&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
1333 spin_unlock_bh(&pool->lock);
1334 }
1335 qdf_export_symbol(__qdf_mempool_free);
1336
1337 #ifdef CNSS_MEM_PRE_ALLOC
qdf_might_be_prealloc(void * ptr)1338 static bool qdf_might_be_prealloc(void *ptr)
1339 {
1340 if (ksize(ptr) > WCNSS_PRE_ALLOC_GET_THRESHOLD)
1341 return true;
1342 else
1343 return false;
1344 }
1345
1346 /**
1347 * qdf_mem_prealloc_get() - conditionally pre-allocate memory
1348 * @size: the number of bytes to allocate
1349 *
1350 * If size if greater than WCNSS_PRE_ALLOC_GET_THRESHOLD, this function returns
1351 * a chunk of pre-allocated memory. If size if less than or equal to
1352 * WCNSS_PRE_ALLOC_GET_THRESHOLD, or an error occurs, NULL is returned instead.
1353 *
1354 * Return: NULL on failure, non-NULL on success
1355 */
qdf_mem_prealloc_get(size_t size)1356 static void *qdf_mem_prealloc_get(size_t size)
1357 {
1358 void *ptr;
1359
1360 if (size <= WCNSS_PRE_ALLOC_GET_THRESHOLD)
1361 return NULL;
1362
1363 ptr = wcnss_prealloc_get(size);
1364 if (!ptr)
1365 return NULL;
1366
1367 memset(ptr, 0, size);
1368
1369 return ptr;
1370 }
1371
qdf_mem_prealloc_put(void * ptr)1372 static inline bool qdf_mem_prealloc_put(void *ptr)
1373 {
1374 return wcnss_prealloc_put(ptr);
1375 }
1376 #else
qdf_might_be_prealloc(void * ptr)1377 static bool qdf_might_be_prealloc(void *ptr)
1378 {
1379 return false;
1380 }
1381
qdf_mem_prealloc_get(size_t size)1382 static inline void *qdf_mem_prealloc_get(size_t size)
1383 {
1384 return NULL;
1385 }
1386
qdf_mem_prealloc_put(void * ptr)1387 static inline bool qdf_mem_prealloc_put(void *ptr)
1388 {
1389 return false;
1390 }
1391 #endif /* CNSS_MEM_PRE_ALLOC */
1392
1393 /* External Function implementation */
1394 #ifdef MEMORY_DEBUG
1395 #ifdef DISABLE_MEM_DBG_LOAD_CONFIG
qdf_mem_debug_config_get(void)1396 bool qdf_mem_debug_config_get(void)
1397 {
1398 /* Return false if DISABLE_LOAD_MEM_DBG_CONFIG flag is enabled */
1399 return false;
1400 }
1401 #else
qdf_mem_debug_config_get(void)1402 bool qdf_mem_debug_config_get(void)
1403 {
1404 return mem_debug_disabled;
1405 }
1406 #endif /* DISABLE_MEM_DBG_LOAD_CONFIG */
1407
1408 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
qdf_mem_debug_disabled_config_set(const char * str_value)1409 QDF_STATUS qdf_mem_debug_disabled_config_set(const char *str_value)
1410 {
1411 QDF_STATUS status;
1412
1413 status = qdf_bool_parse(str_value, &mem_debug_disabled);
1414 return status;
1415 }
1416 #endif
1417
1418 /**
1419 * qdf_mem_debug_init() - initialize qdf memory debug functionality
1420 *
1421 * Return: none
1422 */
qdf_mem_debug_init(void)1423 static void qdf_mem_debug_init(void)
1424 {
1425 int i;
1426
1427 is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
1428
1429 if (is_initial_mem_debug_disabled)
1430 return;
1431
1432 /* Initializing the list with maximum size of 60000 */
1433 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1434 qdf_list_create(&qdf_mem_domains[i], 60000);
1435 qdf_spinlock_create(&qdf_mem_list_lock);
1436
1437 /* dma */
1438 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1439 qdf_list_create(&qdf_mem_dma_domains[i], 0);
1440 qdf_spinlock_create(&qdf_mem_dma_list_lock);
1441 }
1442
1443 static uint32_t
qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,qdf_list_t * mem_list)1444 qdf_mem_domain_check_for_leaks(enum qdf_debug_domain domain,
1445 qdf_list_t *mem_list)
1446 {
1447 if (is_initial_mem_debug_disabled)
1448 return 0;
1449
1450 if (qdf_list_empty(mem_list))
1451 return 0;
1452
1453 qdf_err("Memory leaks detected in %s domain!",
1454 qdf_debug_domain_name(domain));
1455 qdf_mem_domain_print(mem_list,
1456 qdf_err_printer,
1457 NULL,
1458 0,
1459 qdf_mem_meta_table_print);
1460
1461 return mem_list->count;
1462 }
1463
qdf_mem_domain_set_check_for_leaks(qdf_list_t * domains)1464 static void qdf_mem_domain_set_check_for_leaks(qdf_list_t *domains)
1465 {
1466 uint32_t leak_count = 0;
1467 int i;
1468
1469 if (is_initial_mem_debug_disabled)
1470 return;
1471
1472 /* detect and print leaks */
1473 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1474 leak_count += qdf_mem_domain_check_for_leaks(i, domains + i);
1475
1476 if (leak_count)
1477 QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1478 leak_count);
1479 }
1480
1481 /**
1482 * qdf_mem_debug_exit() - exit qdf memory debug functionality
1483 *
1484 * Return: none
1485 */
qdf_mem_debug_exit(void)1486 static void qdf_mem_debug_exit(void)
1487 {
1488 int i;
1489
1490 if (is_initial_mem_debug_disabled)
1491 return;
1492
1493 /* mem */
1494 qdf_mem_domain_set_check_for_leaks(qdf_mem_domains);
1495 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1496 qdf_list_destroy(qdf_mem_list_get(i));
1497
1498 qdf_spinlock_destroy(&qdf_mem_list_lock);
1499
1500 /* dma */
1501 qdf_mem_domain_set_check_for_leaks(qdf_mem_dma_domains);
1502 for (i = 0; i < QDF_DEBUG_DOMAIN_COUNT; ++i)
1503 qdf_list_destroy(&qdf_mem_dma_domains[i]);
1504 qdf_spinlock_destroy(&qdf_mem_dma_list_lock);
1505 }
1506
qdf_mem_malloc_debug(size_t size,const char * func,uint32_t line,void * caller,uint32_t flag)1507 void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
1508 void *caller, uint32_t flag)
1509 {
1510 QDF_STATUS status;
1511 enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1512 qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1513 struct qdf_mem_header *header;
1514 void *ptr;
1515 unsigned long start, duration;
1516
1517 if (is_initial_mem_debug_disabled)
1518 return __qdf_mem_malloc(size, func, line);
1519
1520 if (!size || size > QDF_MEM_MAX_MALLOC) {
1521 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1522 return NULL;
1523 }
1524
1525 ptr = qdf_mem_prealloc_get(size);
1526 if (ptr)
1527 return ptr;
1528
1529 if (!flag)
1530 flag = qdf_mem_malloc_flags();
1531
1532 start = qdf_mc_timer_get_system_time();
1533 header = kzalloc(size + QDF_MEM_DEBUG_SIZE, flag);
1534 duration = qdf_mc_timer_get_system_time() - start;
1535
1536 if (duration > QDF_MEM_WARN_THRESHOLD)
1537 qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1538 duration, size, func, line);
1539
1540 if (!header) {
1541 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1542 return NULL;
1543 }
1544
1545 qdf_mem_header_init(header, size, func, line, caller);
1546 qdf_mem_trailer_init(header);
1547 ptr = qdf_mem_get_ptr(header);
1548
1549 qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1550 status = qdf_list_insert_front(mem_list, &header->node);
1551 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1552 if (QDF_IS_STATUS_ERROR(status))
1553 qdf_err("Failed to insert memory header; status %d", status);
1554
1555 qdf_mem_kmalloc_inc(ksize(header));
1556
1557 return ptr;
1558 }
1559 qdf_export_symbol(qdf_mem_malloc_debug);
1560
qdf_mem_malloc_atomic_debug(size_t size,const char * func,uint32_t line,void * caller)1561 void *qdf_mem_malloc_atomic_debug(size_t size, const char *func,
1562 uint32_t line, void *caller)
1563 {
1564 QDF_STATUS status;
1565 enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1566 qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1567 struct qdf_mem_header *header;
1568 void *ptr;
1569 unsigned long start, duration;
1570
1571 if (is_initial_mem_debug_disabled)
1572 return qdf_mem_malloc_atomic_debug_fl(size, func, line);
1573
1574 if (!size || size > QDF_MEM_MAX_MALLOC) {
1575 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
1576 return NULL;
1577 }
1578
1579 ptr = qdf_mem_prealloc_get(size);
1580 if (ptr)
1581 return ptr;
1582
1583 start = qdf_mc_timer_get_system_time();
1584 header = kzalloc(size + QDF_MEM_DEBUG_SIZE, GFP_ATOMIC);
1585 duration = qdf_mc_timer_get_system_time() - start;
1586
1587 if (duration > QDF_MEM_WARN_THRESHOLD)
1588 qdf_warn("Malloc slept; %lums, %zuB @ %s:%d",
1589 duration, size, func, line);
1590
1591 if (!header) {
1592 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
1593 return NULL;
1594 }
1595
1596 qdf_mem_header_init(header, size, func, line, caller);
1597 qdf_mem_trailer_init(header);
1598 ptr = qdf_mem_get_ptr(header);
1599
1600 qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1601 status = qdf_list_insert_front(mem_list, &header->node);
1602 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1603 if (QDF_IS_STATUS_ERROR(status))
1604 qdf_err("Failed to insert memory header; status %d", status);
1605
1606 qdf_mem_kmalloc_inc(ksize(header));
1607
1608 return ptr;
1609 }
1610
1611 qdf_export_symbol(qdf_mem_malloc_atomic_debug);
1612
qdf_mem_malloc_atomic_debug_fl(size_t size,const char * func,uint32_t line)1613 void *qdf_mem_malloc_atomic_debug_fl(size_t size, const char *func,
1614 uint32_t line)
1615 {
1616 void *ptr;
1617
1618 if (!size || size > QDF_MEM_MAX_MALLOC) {
1619 qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1620 line);
1621 return NULL;
1622 }
1623
1624 ptr = qdf_mem_prealloc_get(size);
1625 if (ptr)
1626 return ptr;
1627
1628 ptr = kzalloc(size, GFP_ATOMIC);
1629 if (!ptr) {
1630 qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1631 size, func, line);
1632 return NULL;
1633 }
1634
1635 qdf_mem_kmalloc_inc(ksize(ptr));
1636
1637 return ptr;
1638 }
1639
1640 qdf_export_symbol(qdf_mem_malloc_atomic_debug_fl);
1641
qdf_mem_free_debug(void * ptr,const char * func,uint32_t line)1642 void qdf_mem_free_debug(void *ptr, const char *func, uint32_t line)
1643 {
1644 enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1645 struct qdf_mem_header *header;
1646 enum qdf_mem_validation_bitmap error_bitmap;
1647
1648 if (is_initial_mem_debug_disabled) {
1649 __qdf_mem_free(ptr);
1650 return;
1651 }
1652
1653 /* freeing a null pointer is valid */
1654 if (qdf_unlikely(!ptr))
1655 return;
1656
1657 if (qdf_mem_prealloc_put(ptr))
1658 return;
1659
1660 if (qdf_unlikely((qdf_size_t)ptr <= sizeof(*header)))
1661 QDF_MEMDEBUG_PANIC("Failed to free invalid memory location %pK",
1662 ptr);
1663
1664 qdf_talloc_assert_no_children_fl(ptr, func, line);
1665
1666 qdf_spin_lock_irqsave(&qdf_mem_list_lock);
1667 header = qdf_mem_get_header(ptr);
1668 error_bitmap = qdf_mem_header_validate(header, current_domain);
1669 error_bitmap |= qdf_mem_trailer_validate(header);
1670
1671 if (!error_bitmap) {
1672 header->freed = true;
1673 qdf_list_remove_node(qdf_mem_list_get(header->domain),
1674 &header->node);
1675 }
1676 qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
1677
1678 qdf_mem_header_assert_valid(header, current_domain, error_bitmap,
1679 func, line);
1680
1681 qdf_mem_kmalloc_dec(ksize(header));
1682 kfree(header);
1683 }
1684 qdf_export_symbol(qdf_mem_free_debug);
1685
qdf_mem_check_for_leaks(void)1686 void qdf_mem_check_for_leaks(void)
1687 {
1688 enum qdf_debug_domain current_domain = qdf_debug_domain_get();
1689 qdf_list_t *mem_list = qdf_mem_list_get(current_domain);
1690 qdf_list_t *dma_list = qdf_mem_dma_list(current_domain);
1691 uint32_t leaks_count = 0;
1692
1693 if (is_initial_mem_debug_disabled)
1694 return;
1695
1696 leaks_count += qdf_mem_domain_check_for_leaks(current_domain, mem_list);
1697 leaks_count += qdf_mem_domain_check_for_leaks(current_domain, dma_list);
1698
1699 if (leaks_count)
1700 QDF_MEMDEBUG_PANIC("%u fatal memory leaks detected!",
1701 leaks_count);
1702 }
1703
qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable,const char * func,uint32_t line,void * caller)1704 void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
1705 struct qdf_mem_multi_page_t *pages,
1706 size_t element_size, uint32_t element_num,
1707 qdf_dma_context_t memctxt, bool cacheable,
1708 const char *func, uint32_t line,
1709 void *caller)
1710 {
1711 uint16_t page_idx;
1712 struct qdf_mem_dma_page_t *dma_pages;
1713 void **cacheable_pages = NULL;
1714 uint16_t i;
1715
1716 if (!pages->page_size)
1717 pages->page_size = qdf_page_size;
1718
1719 pages->num_element_per_page = pages->page_size / element_size;
1720 if (!pages->num_element_per_page) {
1721 qdf_print("Invalid page %d or element size %d",
1722 (int)pages->page_size, (int)element_size);
1723 goto out_fail;
1724 }
1725
1726 pages->num_pages = element_num / pages->num_element_per_page;
1727 if (element_num % pages->num_element_per_page)
1728 pages->num_pages++;
1729
1730 if (cacheable) {
1731 /* Pages information storage */
1732 pages->cacheable_pages = qdf_mem_malloc_debug(
1733 pages->num_pages * sizeof(pages->cacheable_pages),
1734 func, line, caller, 0);
1735 if (!pages->cacheable_pages)
1736 goto out_fail;
1737
1738 cacheable_pages = pages->cacheable_pages;
1739 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1740 cacheable_pages[page_idx] = qdf_mem_malloc_debug(
1741 pages->page_size, func, line, caller, 0);
1742 if (!cacheable_pages[page_idx])
1743 goto page_alloc_fail;
1744 }
1745 pages->dma_pages = NULL;
1746 } else {
1747 pages->dma_pages = qdf_mem_malloc_debug(
1748 pages->num_pages * sizeof(struct qdf_mem_dma_page_t),
1749 func, line, caller, 0);
1750 if (!pages->dma_pages)
1751 goto out_fail;
1752
1753 dma_pages = pages->dma_pages;
1754 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1755 dma_pages->page_v_addr_start =
1756 qdf_mem_alloc_consistent_debug(
1757 osdev, osdev->dev, pages->page_size,
1758 &dma_pages->page_p_addr,
1759 func, line, caller);
1760 if (!dma_pages->page_v_addr_start) {
1761 qdf_print("dmaable page alloc fail pi %d",
1762 page_idx);
1763 goto page_alloc_fail;
1764 }
1765 dma_pages->page_v_addr_end =
1766 dma_pages->page_v_addr_start + pages->page_size;
1767 dma_pages++;
1768 }
1769 pages->cacheable_pages = NULL;
1770 }
1771 return;
1772
1773 page_alloc_fail:
1774 if (cacheable) {
1775 for (i = 0; i < page_idx; i++)
1776 qdf_mem_free_debug(pages->cacheable_pages[i],
1777 func, line);
1778 qdf_mem_free_debug(pages->cacheable_pages, func, line);
1779 } else {
1780 dma_pages = pages->dma_pages;
1781 for (i = 0; i < page_idx; i++) {
1782 qdf_mem_free_consistent_debug(
1783 osdev, osdev->dev,
1784 pages->page_size, dma_pages->page_v_addr_start,
1785 dma_pages->page_p_addr, memctxt, func, line);
1786 dma_pages++;
1787 }
1788 qdf_mem_free_debug(pages->dma_pages, func, line);
1789 }
1790
1791 out_fail:
1792 pages->cacheable_pages = NULL;
1793 pages->dma_pages = NULL;
1794 pages->num_pages = 0;
1795 }
1796
1797 qdf_export_symbol(qdf_mem_multi_pages_alloc_debug);
1798
qdf_mem_multi_pages_free_debug(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable,const char * func,uint32_t line)1799 void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
1800 struct qdf_mem_multi_page_t *pages,
1801 qdf_dma_context_t memctxt, bool cacheable,
1802 const char *func, uint32_t line)
1803 {
1804 unsigned int page_idx;
1805 struct qdf_mem_dma_page_t *dma_pages;
1806
1807 if (!pages->page_size)
1808 pages->page_size = qdf_page_size;
1809
1810 if (cacheable) {
1811 for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
1812 qdf_mem_free_debug(pages->cacheable_pages[page_idx],
1813 func, line);
1814 qdf_mem_free_debug(pages->cacheable_pages, func, line);
1815 } else {
1816 dma_pages = pages->dma_pages;
1817 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1818 qdf_mem_free_consistent_debug(
1819 osdev, osdev->dev, pages->page_size,
1820 dma_pages->page_v_addr_start,
1821 dma_pages->page_p_addr, memctxt, func, line);
1822 dma_pages++;
1823 }
1824 qdf_mem_free_debug(pages->dma_pages, func, line);
1825 }
1826
1827 pages->cacheable_pages = NULL;
1828 pages->dma_pages = NULL;
1829 pages->num_pages = 0;
1830 }
1831
1832 qdf_export_symbol(qdf_mem_multi_pages_free_debug);
1833
1834 #else
qdf_mem_debug_init(void)1835 static void qdf_mem_debug_init(void) {}
1836
qdf_mem_debug_exit(void)1837 static void qdf_mem_debug_exit(void) {}
1838
qdf_mem_malloc_atomic_fl(size_t size,const char * func,uint32_t line)1839 void *qdf_mem_malloc_atomic_fl(size_t size, const char *func, uint32_t line)
1840 {
1841 void *ptr;
1842
1843 if (!size || size > QDF_MEM_MAX_MALLOC) {
1844 qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
1845 line);
1846 return NULL;
1847 }
1848
1849 ptr = qdf_mem_prealloc_get(size);
1850 if (ptr)
1851 return ptr;
1852
1853 ptr = kzalloc(size, GFP_ATOMIC);
1854 if (!ptr) {
1855 qdf_nofl_warn("Failed to malloc %zuB @ %s:%d",
1856 size, func, line);
1857 return NULL;
1858 }
1859
1860 qdf_mem_kmalloc_inc(ksize(ptr));
1861
1862 return ptr;
1863 }
1864 qdf_export_symbol(qdf_mem_malloc_atomic_fl);
1865
1866 #ifndef ALLOC_CONTIGUOUS_MULTI_PAGE
qdf_mem_multi_pages_alloc(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable)1867 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1868 struct qdf_mem_multi_page_t *pages,
1869 size_t element_size, uint32_t element_num,
1870 qdf_dma_context_t memctxt, bool cacheable)
1871 {
1872 uint16_t page_idx;
1873 struct qdf_mem_dma_page_t *dma_pages;
1874 void **cacheable_pages = NULL;
1875 uint16_t i;
1876
1877 if (!pages->page_size)
1878 pages->page_size = qdf_page_size;
1879
1880 pages->num_element_per_page = pages->page_size / element_size;
1881 if (!pages->num_element_per_page) {
1882 qdf_print("Invalid page %d or element size %d",
1883 (int)pages->page_size, (int)element_size);
1884 goto out_fail;
1885 }
1886
1887 pages->num_pages = element_num / pages->num_element_per_page;
1888 if (element_num % pages->num_element_per_page)
1889 pages->num_pages++;
1890
1891 if (cacheable) {
1892 /* Pages information storage */
1893 pages->cacheable_pages = qdf_mem_malloc(
1894 pages->num_pages * sizeof(pages->cacheable_pages));
1895 if (!pages->cacheable_pages)
1896 goto out_fail;
1897
1898 cacheable_pages = pages->cacheable_pages;
1899 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1900 cacheable_pages[page_idx] =
1901 qdf_mem_malloc(pages->page_size);
1902 if (!cacheable_pages[page_idx])
1903 goto page_alloc_fail;
1904 }
1905 pages->dma_pages = NULL;
1906 } else {
1907 pages->dma_pages = qdf_mem_malloc(
1908 pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
1909 if (!pages->dma_pages)
1910 goto out_fail;
1911
1912 dma_pages = pages->dma_pages;
1913 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1914 dma_pages->page_v_addr_start =
1915 qdf_mem_alloc_consistent(osdev, osdev->dev,
1916 pages->page_size,
1917 &dma_pages->page_p_addr);
1918 if (!dma_pages->page_v_addr_start) {
1919 qdf_print("dmaable page alloc fail pi %d",
1920 page_idx);
1921 goto page_alloc_fail;
1922 }
1923 dma_pages->page_v_addr_end =
1924 dma_pages->page_v_addr_start + pages->page_size;
1925 dma_pages++;
1926 }
1927 pages->cacheable_pages = NULL;
1928 }
1929 return;
1930
1931 page_alloc_fail:
1932 if (cacheable) {
1933 for (i = 0; i < page_idx; i++)
1934 qdf_mem_free(pages->cacheable_pages[i]);
1935 qdf_mem_free(pages->cacheable_pages);
1936 } else {
1937 dma_pages = pages->dma_pages;
1938 for (i = 0; i < page_idx; i++) {
1939 qdf_mem_free_consistent(
1940 osdev, osdev->dev, pages->page_size,
1941 dma_pages->page_v_addr_start,
1942 dma_pages->page_p_addr, memctxt);
1943 dma_pages++;
1944 }
1945 qdf_mem_free(pages->dma_pages);
1946 }
1947
1948 out_fail:
1949 pages->cacheable_pages = NULL;
1950 pages->dma_pages = NULL;
1951 pages->num_pages = 0;
1952 return;
1953 }
1954 #else
qdf_mem_multi_pages_alloc(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,size_t element_size,uint32_t element_num,qdf_dma_context_t memctxt,bool cacheable)1955 void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
1956 struct qdf_mem_multi_page_t *pages,
1957 size_t element_size, uint32_t element_num,
1958 qdf_dma_context_t memctxt, bool cacheable)
1959 {
1960 uint16_t page_idx;
1961 struct qdf_mem_dma_page_t *dma_pages;
1962 void **cacheable_pages = NULL;
1963 uint16_t i;
1964 struct qdf_mem_dma_page_t temp_dma_pages;
1965 struct qdf_mem_dma_page_t *total_dma_pages = &temp_dma_pages;
1966 qdf_size_t total_size = 0;
1967
1968 pages->contiguous_dma_pages = false;
1969
1970 if (!pages->page_size)
1971 pages->page_size = qdf_page_size;
1972
1973 pages->num_element_per_page = pages->page_size / element_size;
1974 if (!pages->num_element_per_page) {
1975 qdf_print("Invalid page %d or element size %d",
1976 (int)pages->page_size, (int)element_size);
1977 goto out_fail;
1978 }
1979
1980 pages->num_pages = element_num / pages->num_element_per_page;
1981 if (element_num % pages->num_element_per_page)
1982 pages->num_pages++;
1983
1984 if (cacheable) {
1985 /* Pages information storage */
1986 pages->cacheable_pages = qdf_mem_malloc(
1987 pages->num_pages * sizeof(pages->cacheable_pages));
1988 if (!pages->cacheable_pages)
1989 goto out_fail;
1990
1991 cacheable_pages = pages->cacheable_pages;
1992 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
1993 cacheable_pages[page_idx] =
1994 qdf_mem_malloc(pages->page_size);
1995 if (!cacheable_pages[page_idx])
1996 goto page_alloc_fail;
1997 }
1998 pages->dma_pages = NULL;
1999 } else {
2000 pages->dma_pages = qdf_mem_malloc(
2001 pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
2002 if (!pages->dma_pages)
2003 goto out_fail;
2004
2005 dma_pages = pages->dma_pages;
2006 total_size = pages->page_size * pages->num_pages;
2007 total_dma_pages->page_v_addr_start =
2008 qdf_mem_alloc_consistent(osdev, osdev->dev,
2009 total_size,
2010 &total_dma_pages->page_p_addr);
2011 total_dma_pages->page_v_addr_end =
2012 total_dma_pages->page_v_addr_start + total_size;
2013 if (!total_dma_pages->page_v_addr_start) {
2014 qdf_print("mem allocate fail, total_size: %zu",
2015 total_size);
2016 goto page_alloc_default;
2017 }
2018
2019 pages->contiguous_dma_pages = true;
2020 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2021 dma_pages->page_v_addr_start =
2022 total_dma_pages->page_v_addr_start +
2023 (pages->page_size * page_idx);
2024 dma_pages->page_p_addr =
2025 total_dma_pages->page_p_addr +
2026 (pages->page_size * page_idx);
2027 dma_pages->page_v_addr_end =
2028 dma_pages->page_v_addr_start + pages->page_size;
2029 dma_pages++;
2030 }
2031 pages->cacheable_pages = NULL;
2032 return;
2033
2034 page_alloc_default:
2035 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2036 dma_pages->page_v_addr_start =
2037 qdf_mem_alloc_consistent(osdev, osdev->dev,
2038 pages->page_size,
2039 &dma_pages->page_p_addr);
2040 if (!dma_pages->page_v_addr_start) {
2041 qdf_print("dmaable page alloc fail pi %d",
2042 page_idx);
2043 goto page_alloc_fail;
2044 }
2045 dma_pages->page_v_addr_end =
2046 dma_pages->page_v_addr_start + pages->page_size;
2047 dma_pages++;
2048 }
2049 pages->cacheable_pages = NULL;
2050 }
2051 return;
2052
2053 page_alloc_fail:
2054 if (cacheable) {
2055 for (i = 0; i < page_idx; i++)
2056 qdf_mem_free(pages->cacheable_pages[i]);
2057 qdf_mem_free(pages->cacheable_pages);
2058 } else {
2059 dma_pages = pages->dma_pages;
2060 for (i = 0; i < page_idx; i++) {
2061 qdf_mem_free_consistent(
2062 osdev, osdev->dev, pages->page_size,
2063 dma_pages->page_v_addr_start,
2064 dma_pages->page_p_addr, memctxt);
2065 dma_pages++;
2066 }
2067 qdf_mem_free(pages->dma_pages);
2068 }
2069
2070 out_fail:
2071 pages->cacheable_pages = NULL;
2072 pages->dma_pages = NULL;
2073 pages->num_pages = 0;
2074 }
2075 #endif
2076 qdf_export_symbol(qdf_mem_multi_pages_alloc);
2077
2078 #ifndef ALLOC_CONTIGUOUS_MULTI_PAGE
qdf_mem_multi_pages_free(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable)2079 void qdf_mem_multi_pages_free(qdf_device_t osdev,
2080 struct qdf_mem_multi_page_t *pages,
2081 qdf_dma_context_t memctxt, bool cacheable)
2082 {
2083 unsigned int page_idx;
2084 struct qdf_mem_dma_page_t *dma_pages;
2085
2086 if (!pages->page_size)
2087 pages->page_size = qdf_page_size;
2088
2089 if (cacheable) {
2090 for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2091 qdf_mem_free(pages->cacheable_pages[page_idx]);
2092 qdf_mem_free(pages->cacheable_pages);
2093 } else {
2094 dma_pages = pages->dma_pages;
2095 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2096 qdf_mem_free_consistent(
2097 osdev, osdev->dev, pages->page_size,
2098 dma_pages->page_v_addr_start,
2099 dma_pages->page_p_addr, memctxt);
2100 dma_pages++;
2101 }
2102 qdf_mem_free(pages->dma_pages);
2103 }
2104
2105 pages->cacheable_pages = NULL;
2106 pages->dma_pages = NULL;
2107 pages->num_pages = 0;
2108 return;
2109 }
2110 #else
qdf_mem_multi_pages_free(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,qdf_dma_context_t memctxt,bool cacheable)2111 void qdf_mem_multi_pages_free(qdf_device_t osdev,
2112 struct qdf_mem_multi_page_t *pages,
2113 qdf_dma_context_t memctxt, bool cacheable)
2114 {
2115 unsigned int page_idx;
2116 struct qdf_mem_dma_page_t *dma_pages;
2117 qdf_size_t total_size = 0;
2118
2119 if (!pages->page_size)
2120 pages->page_size = qdf_page_size;
2121
2122 if (cacheable) {
2123 for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2124 qdf_mem_free(pages->cacheable_pages[page_idx]);
2125 qdf_mem_free(pages->cacheable_pages);
2126 } else {
2127 dma_pages = pages->dma_pages;
2128 total_size = pages->page_size * pages->num_pages;
2129 if (pages->contiguous_dma_pages) {
2130 qdf_mem_free_consistent(
2131 osdev, osdev->dev, total_size,
2132 dma_pages->page_v_addr_start,
2133 dma_pages->page_p_addr, memctxt);
2134 goto pages_free_default;
2135 }
2136 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2137 qdf_mem_free_consistent(
2138 osdev, osdev->dev, pages->page_size,
2139 dma_pages->page_v_addr_start,
2140 dma_pages->page_p_addr, memctxt);
2141 dma_pages++;
2142 }
2143 pages_free_default:
2144 qdf_mem_free(pages->dma_pages);
2145 }
2146
2147 pages->cacheable_pages = NULL;
2148 pages->dma_pages = NULL;
2149 pages->num_pages = 0;
2150 }
2151 #endif
2152 qdf_export_symbol(qdf_mem_multi_pages_free);
2153 #endif
2154
qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t * pages,bool cacheable)2155 void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
2156 bool cacheable)
2157 {
2158 unsigned int page_idx;
2159 struct qdf_mem_dma_page_t *dma_pages;
2160
2161 if (!pages->page_size)
2162 pages->page_size = qdf_page_size;
2163
2164 if (cacheable) {
2165 for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
2166 qdf_mem_zero(pages->cacheable_pages[page_idx],
2167 pages->page_size);
2168 } else {
2169 dma_pages = pages->dma_pages;
2170 for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
2171 qdf_mem_zero(dma_pages->page_v_addr_start,
2172 pages->page_size);
2173 dma_pages++;
2174 }
2175 }
2176 }
2177
2178 qdf_export_symbol(qdf_mem_multi_pages_zero);
2179
__qdf_mem_free(void * ptr)2180 void __qdf_mem_free(void *ptr)
2181 {
2182 if (!ptr)
2183 return;
2184
2185 if (qdf_might_be_prealloc(ptr)) {
2186 if (qdf_mem_prealloc_put(ptr))
2187 return;
2188 }
2189
2190 qdf_mem_kmalloc_dec(ksize(ptr));
2191
2192 kfree(ptr);
2193 }
2194
2195 qdf_export_symbol(__qdf_mem_free);
2196
__qdf_mem_malloc(size_t size,const char * func,uint32_t line)2197 void *__qdf_mem_malloc(size_t size, const char *func, uint32_t line)
2198 {
2199 void *ptr;
2200
2201 if (!size || size > QDF_MEM_MAX_MALLOC) {
2202 qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2203 line);
2204 return NULL;
2205 }
2206
2207 ptr = qdf_mem_prealloc_get(size);
2208 if (ptr)
2209 return ptr;
2210
2211 ptr = kzalloc(size, qdf_mem_malloc_flags());
2212 if (!ptr)
2213 return NULL;
2214
2215 qdf_mem_kmalloc_inc(ksize(ptr));
2216
2217 return ptr;
2218 }
2219
2220 qdf_export_symbol(__qdf_mem_malloc);
2221
2222 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
__qdf_untracked_mem_free(void * ptr)2223 void __qdf_untracked_mem_free(void *ptr)
2224 {
2225 if (!ptr)
2226 return;
2227
2228 kfree(ptr);
2229 }
2230
__qdf_untracked_mem_malloc(size_t size,const char * func,uint32_t line)2231 void *__qdf_untracked_mem_malloc(size_t size, const char *func, uint32_t line)
2232 {
2233 void *ptr;
2234
2235 if (!size || size > QDF_MEM_MAX_MALLOC) {
2236 qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d", size, func,
2237 line);
2238 return NULL;
2239 }
2240
2241 ptr = kzalloc(size, qdf_mem_malloc_flags());
2242 if (!ptr)
2243 return NULL;
2244
2245 return ptr;
2246 }
2247 #endif
2248
qdf_aligned_malloc_fl(uint32_t * size,void ** vaddr_unaligned,qdf_dma_addr_t * paddr_unaligned,qdf_dma_addr_t * paddr_aligned,uint32_t align,const char * func,uint32_t line)2249 void *qdf_aligned_malloc_fl(uint32_t *size,
2250 void **vaddr_unaligned,
2251 qdf_dma_addr_t *paddr_unaligned,
2252 qdf_dma_addr_t *paddr_aligned,
2253 uint32_t align,
2254 const char *func, uint32_t line)
2255 {
2256 void *vaddr_aligned;
2257 uint32_t align_alloc_size;
2258
2259 *vaddr_unaligned = qdf_mem_malloc_fl((qdf_size_t)*size, func,
2260 line);
2261 if (!*vaddr_unaligned) {
2262 qdf_warn("Failed to alloc %uB @ %s:%d", *size, func, line);
2263 return NULL;
2264 }
2265
2266 *paddr_unaligned = qdf_mem_virt_to_phys(*vaddr_unaligned);
2267
2268 /* Re-allocate additional bytes to align base address only if
2269 * above allocation returns unaligned address. Reason for
2270 * trying exact size allocation above is, OS tries to allocate
2271 * blocks of size power-of-2 pages and then free extra pages.
2272 * e.g., of a ring size of 1MB, the allocation below will
2273 * request 1MB plus 7 bytes for alignment, which will cause a
2274 * 2MB block allocation,and that is failing sometimes due to
2275 * memory fragmentation.
2276 */
2277 if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2278 align_alloc_size = *size + align - 1;
2279
2280 qdf_mem_free(*vaddr_unaligned);
2281 *vaddr_unaligned = qdf_mem_malloc_fl(
2282 (qdf_size_t)align_alloc_size, func, line);
2283 if (!*vaddr_unaligned) {
2284 qdf_warn("Failed to alloc %uB @ %s:%d",
2285 align_alloc_size, func, line);
2286 return NULL;
2287 }
2288
2289 *paddr_unaligned = qdf_mem_virt_to_phys(
2290 *vaddr_unaligned);
2291 *size = align_alloc_size;
2292 }
2293
2294 *paddr_aligned = (qdf_dma_addr_t)qdf_align
2295 ((unsigned long)(*paddr_unaligned), align);
2296
2297 vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2298 ((unsigned long)(*paddr_aligned) -
2299 (unsigned long)(*paddr_unaligned)));
2300
2301 return vaddr_aligned;
2302 }
2303
2304 qdf_export_symbol(qdf_aligned_malloc_fl);
2305
2306 #if defined(DP_UMAC_HW_RESET_SUPPORT) || defined(WLAN_SUPPORT_PPEDS)
qdf_tx_desc_pool_free_bufs(void * ctxt,struct qdf_mem_multi_page_t * pages,uint32_t elem_size,uint32_t elem_count,uint8_t cacheable,qdf_mem_release_cb cb,void * elem_list)2307 int qdf_tx_desc_pool_free_bufs(void *ctxt, struct qdf_mem_multi_page_t *pages,
2308 uint32_t elem_size, uint32_t elem_count,
2309 uint8_t cacheable, qdf_mem_release_cb cb,
2310 void *elem_list)
2311 {
2312 uint16_t i, i_int;
2313 void *page_info;
2314 void *elem;
2315 uint32_t num_elem = 0;
2316
2317 for (i = 0; i < pages->num_pages; i++) {
2318 if (cacheable)
2319 page_info = pages->cacheable_pages[i];
2320 else
2321 page_info = pages->dma_pages[i].page_v_addr_start;
2322
2323 if (!page_info)
2324 return -ENOMEM;
2325
2326 elem = page_info;
2327 for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2328 cb(ctxt, elem, elem_list);
2329 elem = ((char *)elem + elem_size);
2330 num_elem++;
2331
2332 /* Number of desc pool elements reached */
2333 if (num_elem == (elem_count - 1))
2334 break;
2335 }
2336 }
2337
2338 return 0;
2339 }
2340
2341 qdf_export_symbol(qdf_tx_desc_pool_free_bufs);
2342 #endif
2343
qdf_mem_multi_page_link(qdf_device_t osdev,struct qdf_mem_multi_page_t * pages,uint32_t elem_size,uint32_t elem_count,uint8_t cacheable)2344 int qdf_mem_multi_page_link(qdf_device_t osdev,
2345 struct qdf_mem_multi_page_t *pages,
2346 uint32_t elem_size, uint32_t elem_count,
2347 uint8_t cacheable)
2348 {
2349 uint16_t i, i_int;
2350 void *page_info;
2351 void **c_elem = NULL;
2352 uint32_t num_link = 0;
2353
2354 for (i = 0; i < pages->num_pages; i++) {
2355 if (cacheable)
2356 page_info = pages->cacheable_pages[i];
2357 else
2358 page_info = pages->dma_pages[i].page_v_addr_start;
2359
2360 if (!page_info)
2361 return -ENOMEM;
2362
2363 c_elem = (void **)page_info;
2364 for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
2365 if (i_int == (pages->num_element_per_page - 1)) {
2366 if ((i + 1) == pages->num_pages)
2367 break;
2368 if (cacheable)
2369 *c_elem = pages->
2370 cacheable_pages[i + 1];
2371 else
2372 *c_elem = pages->
2373 dma_pages[i + 1].
2374 page_v_addr_start;
2375 num_link++;
2376 break;
2377 } else {
2378 *c_elem =
2379 (void *)(((char *)c_elem) + elem_size);
2380 }
2381 num_link++;
2382 c_elem = (void **)*c_elem;
2383
2384 /* Last link established exit */
2385 if (num_link == (elem_count - 1))
2386 break;
2387 }
2388 }
2389
2390 if (c_elem)
2391 *c_elem = NULL;
2392
2393 return 0;
2394 }
2395 qdf_export_symbol(qdf_mem_multi_page_link);
2396
qdf_mem_copy(void * dst_addr,const void * src_addr,uint32_t num_bytes)2397 void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2398 {
2399 /* special case where dst_addr or src_addr can be NULL */
2400 if (!num_bytes)
2401 return;
2402
2403 QDF_BUG(dst_addr);
2404 QDF_BUG(src_addr);
2405 if (!dst_addr || !src_addr)
2406 return;
2407
2408 memcpy(dst_addr, src_addr, num_bytes);
2409 }
2410 qdf_export_symbol(qdf_mem_copy);
2411
qdf_mem_shared_mem_alloc(qdf_device_t osdev,uint32_t size)2412 qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size)
2413 {
2414 qdf_shared_mem_t *shared_mem;
2415 qdf_dma_addr_t dma_addr, paddr;
2416 int ret;
2417
2418 shared_mem = qdf_mem_malloc(sizeof(*shared_mem));
2419 if (!shared_mem)
2420 return NULL;
2421
2422 shared_mem->vaddr = qdf_mem_alloc_consistent(osdev, osdev->dev,
2423 size, qdf_mem_get_dma_addr_ptr(osdev,
2424 &shared_mem->mem_info));
2425 if (!shared_mem->vaddr) {
2426 qdf_err("Unable to allocate DMA memory for shared resource");
2427 qdf_mem_free(shared_mem);
2428 return NULL;
2429 }
2430
2431 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
2432 size = qdf_mem_get_dma_size(osdev, &shared_mem->mem_info);
2433
2434 qdf_mem_zero(shared_mem->vaddr, size);
2435 dma_addr = qdf_mem_get_dma_addr(osdev, &shared_mem->mem_info);
2436 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
2437
2438 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
2439 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
2440 shared_mem->vaddr, dma_addr, size);
2441 if (ret) {
2442 qdf_err("Unable to get DMA sgtable");
2443 qdf_mem_free_consistent(osdev, osdev->dev,
2444 shared_mem->mem_info.size,
2445 shared_mem->vaddr,
2446 dma_addr,
2447 qdf_get_dma_mem_context(shared_mem,
2448 memctx));
2449 qdf_mem_free(shared_mem);
2450 return NULL;
2451 }
2452
2453 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
2454
2455 return shared_mem;
2456 }
2457
2458 qdf_export_symbol(qdf_mem_shared_mem_alloc);
2459
qdf_mem_copy_toio(void * dst_addr,const void * src_addr,uint32_t num_bytes)2460 void qdf_mem_copy_toio(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2461 {
2462 if (0 == num_bytes) {
2463 /* special case where dst_addr or src_addr can be NULL */
2464 return;
2465 }
2466
2467 if ((!dst_addr) || (!src_addr)) {
2468 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2469 "%s called with NULL parameter, source:%pK destination:%pK",
2470 __func__, src_addr, dst_addr);
2471 QDF_ASSERT(0);
2472 return;
2473 }
2474 memcpy_toio(dst_addr, src_addr, num_bytes);
2475 }
2476
2477 qdf_export_symbol(qdf_mem_copy_toio);
2478
qdf_mem_set_io(void * ptr,uint32_t num_bytes,uint32_t value)2479 void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value)
2480 {
2481 if (!ptr) {
2482 qdf_print("%s called with NULL parameter ptr", __func__);
2483 return;
2484 }
2485 memset_io(ptr, value, num_bytes);
2486 }
2487
2488 qdf_export_symbol(qdf_mem_set_io);
2489
qdf_mem_set(void * ptr,uint32_t num_bytes,uint32_t value)2490 void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
2491 {
2492 QDF_BUG(ptr);
2493 if (!ptr)
2494 return;
2495
2496 memset(ptr, value, num_bytes);
2497 }
2498 qdf_export_symbol(qdf_mem_set);
2499
qdf_mem_move(void * dst_addr,const void * src_addr,uint32_t num_bytes)2500 void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
2501 {
2502 /* special case where dst_addr or src_addr can be NULL */
2503 if (!num_bytes)
2504 return;
2505
2506 QDF_BUG(dst_addr);
2507 QDF_BUG(src_addr);
2508 if (!dst_addr || !src_addr)
2509 return;
2510
2511 memmove(dst_addr, src_addr, num_bytes);
2512 }
2513 qdf_export_symbol(qdf_mem_move);
2514
qdf_mem_cmp(const void * left,const void * right,size_t size)2515 int qdf_mem_cmp(const void *left, const void *right, size_t size)
2516 {
2517 QDF_BUG(left);
2518 QDF_BUG(right);
2519
2520 return memcmp(left, right, size);
2521 }
2522 qdf_export_symbol(qdf_mem_cmp);
2523
2524 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2525 /**
2526 * qdf_mem_dma_alloc() - allocates memory for dma
2527 * @osdev: OS device handle
2528 * @dev: Pointer to device handle
2529 * @size: Size to be allocated
2530 * @phy_addr: Physical address
2531 *
2532 * Return: pointer of allocated memory or null if memory alloc fails
2533 */
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * phy_addr)2534 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2535 qdf_size_t size,
2536 qdf_dma_addr_t *phy_addr)
2537 {
2538 void *vaddr;
2539
2540 vaddr = qdf_mem_malloc(size);
2541 *phy_addr = ((uintptr_t) vaddr);
2542 /* using this type conversion to suppress "cast from pointer to integer
2543 * of different size" warning on some platforms
2544 */
2545 BUILD_BUG_ON(sizeof(*phy_addr) < sizeof(vaddr));
2546 return vaddr;
2547 }
2548
2549 #elif defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
2550 !defined(QCA_WIFI_QCN9000)
2551
2552 #define QCA8074_RAM_BASE 0x50000000
2553 #define QDF_MEM_ALLOC_X86_MAX_RETRIES 10
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * phy_addr)2554 void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev, qdf_size_t size,
2555 qdf_dma_addr_t *phy_addr)
2556 {
2557 void *vaddr = NULL;
2558 int i;
2559
2560 *phy_addr = 0;
2561
2562 for (i = 0; i < QDF_MEM_ALLOC_X86_MAX_RETRIES; i++) {
2563 vaddr = dma_alloc_coherent(dev, size, phy_addr,
2564 qdf_mem_malloc_flags());
2565
2566 if (!vaddr) {
2567 qdf_err("%s failed , size: %zu!", __func__, size);
2568 return NULL;
2569 }
2570
2571 if (*phy_addr >= QCA8074_RAM_BASE)
2572 return vaddr;
2573
2574 dma_free_coherent(dev, size, vaddr, *phy_addr);
2575 }
2576
2577 return NULL;
2578 }
2579 #elif defined(QCA_DMA_PADDR_CHECK)
2580 #ifdef CONFIG_LEAK_DETECTION
2581 #define MAX_DEBUG_DOMAIN_COUNT QDF_DEBUG_DOMAIN_COUNT
2582 #define debug_domain_get() qdf_debug_domain_get()
2583 #else
2584 #define MAX_DEBUG_DOMAIN_COUNT 1
2585 #define debug_domain_get() DEFAULT_DEBUG_DOMAIN_INIT
2586 #endif
2587 /**
2588 * struct qdf_dma_buf_entry - DMA invalid buffer list entry
2589 * @node: QDF list node member
2590 * @size: DMA buffer size
2591 * @phy_addr: DMA buffer physical address
2592 * @vaddr: DMA buffer virtual address. if DMA buffer size is larger than entry
2593 * size, we use the DMA buffer to save entry info and the starting
2594 * address of the entry is the DMA buffer vaddr, in this way, we can
2595 * reduce unnecessary memory consumption. if DMA buffer size is smaller
2596 * than entry size, we need alloc another buffer, and vaddr will be set
2597 * to the invalid dma buffer virtual address.
2598 */
2599 struct qdf_dma_buf_entry {
2600 qdf_list_node_t node;
2601 qdf_size_t size;
2602 qdf_dma_addr_t phy_addr;
2603 void *vaddr;
2604 };
2605
2606 #define DMA_PHY_ADDR_RESERVED 0x2000
2607 #define QDF_DMA_MEM_ALLOC_MAX_RETRIES 10
2608 #define QDF_DMA_INVALID_BUF_LIST_SIZE 128
2609 static qdf_list_t qdf_invalid_buf_list[MAX_DEBUG_DOMAIN_COUNT];
2610 static bool qdf_invalid_buf_list_init[MAX_DEBUG_DOMAIN_COUNT];
2611 static qdf_spinlock_t qdf_invalid_buf_list_lock;
2612
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr)2613 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2614 qdf_size_t size, qdf_dma_addr_t *paddr)
2615 {
2616 void *vaddr;
2617 uint32_t retry;
2618 QDF_STATUS status;
2619 bool is_separate;
2620 qdf_list_t *cur_buf_list;
2621 struct qdf_dma_buf_entry *entry;
2622 uint8_t current_domain;
2623
2624 for (retry = 0; retry < QDF_DMA_MEM_ALLOC_MAX_RETRIES; retry++) {
2625 vaddr = dma_alloc_coherent(dev, size, paddr,
2626 qdf_mem_malloc_flags());
2627 if (!vaddr)
2628 return NULL;
2629
2630 if (qdf_likely(*paddr > DMA_PHY_ADDR_RESERVED))
2631 return vaddr;
2632
2633 current_domain = debug_domain_get();
2634
2635 /* if qdf_invalid_buf_list not init, so we can't store memory
2636 * info and can't hold it. let's free the invalid memory and
2637 * try to get memory with phy address greater than
2638 * DMA_PHY_ADDR_RESERVED
2639 */
2640 if (current_domain >= MAX_DEBUG_DOMAIN_COUNT ||
2641 !qdf_invalid_buf_list_init[current_domain]) {
2642 qdf_debug("physical address below 0x%x, re-alloc",
2643 DMA_PHY_ADDR_RESERVED);
2644 dma_free_coherent(dev, size, vaddr, *paddr);
2645 continue;
2646 }
2647
2648 cur_buf_list = &qdf_invalid_buf_list[current_domain];
2649 if (size >= sizeof(*entry)) {
2650 entry = vaddr;
2651 entry->vaddr = NULL;
2652 } else {
2653 entry = qdf_mem_malloc(sizeof(*entry));
2654 if (!entry) {
2655 dma_free_coherent(dev, size, vaddr, *paddr);
2656 qdf_err("qdf_mem_malloc entry failed!");
2657 continue;
2658 }
2659 entry->vaddr = vaddr;
2660 }
2661
2662 entry->phy_addr = *paddr;
2663 entry->size = size;
2664 qdf_spin_lock_irqsave(&qdf_invalid_buf_list_lock);
2665 status = qdf_list_insert_back(cur_buf_list,
2666 &entry->node);
2667 qdf_spin_unlock_irqrestore(&qdf_invalid_buf_list_lock);
2668 if (QDF_IS_STATUS_ERROR(status)) {
2669 qdf_err("insert buf entry fail, status %d", status);
2670 is_separate = !entry->vaddr ? false : true;
2671 dma_free_coherent(dev, size, vaddr, *paddr);
2672 if (is_separate)
2673 qdf_mem_free(entry);
2674 }
2675 }
2676
2677 return NULL;
2678 }
2679 #else
qdf_mem_dma_alloc(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr)2680 static inline void *qdf_mem_dma_alloc(qdf_device_t osdev, void *dev,
2681 qdf_size_t size, qdf_dma_addr_t *paddr)
2682 {
2683 return dma_alloc_coherent(dev, size, paddr, qdf_mem_malloc_flags());
2684 }
2685 #endif
2686
2687 #if defined(A_SIMOS_DEVHOST) || defined(HIF_SDIO) || defined(HIF_USB)
2688 static inline void
qdf_mem_dma_free(void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr)2689 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2690 {
2691 qdf_mem_free(vaddr);
2692 }
2693 #else
2694
2695 static inline void
qdf_mem_dma_free(void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr)2696 qdf_mem_dma_free(void *dev, qdf_size_t size, void *vaddr, qdf_dma_addr_t paddr)
2697 {
2698 dma_free_coherent(dev, size, vaddr, paddr);
2699 }
2700 #endif
2701
2702 #ifdef MEMORY_DEBUG
qdf_mem_alloc_consistent_debug(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr,const char * func,uint32_t line,void * caller)2703 void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
2704 qdf_size_t size, qdf_dma_addr_t *paddr,
2705 const char *func, uint32_t line,
2706 void *caller)
2707 {
2708 QDF_STATUS status;
2709 enum qdf_debug_domain current_domain = qdf_debug_domain_get();
2710 qdf_list_t *mem_list = qdf_mem_dma_list(current_domain);
2711 struct qdf_mem_header *header;
2712 void *vaddr;
2713
2714 if (is_initial_mem_debug_disabled)
2715 return __qdf_mem_alloc_consistent(osdev, dev,
2716 size, paddr,
2717 func, line);
2718
2719 if (!size || size > QDF_MEM_MAX_MALLOC) {
2720 qdf_err("Cannot malloc %zu bytes @ %s:%d", size, func, line);
2721 return NULL;
2722 }
2723
2724 vaddr = qdf_mem_dma_alloc(osdev, dev, size + QDF_DMA_MEM_DEBUG_SIZE,
2725 paddr);
2726
2727 if (!vaddr) {
2728 qdf_warn("Failed to malloc %zuB @ %s:%d", size, func, line);
2729 return NULL;
2730 }
2731
2732 header = qdf_mem_dma_get_header(vaddr, size);
2733 /* For DMA buffers we only add trailers, this function will init
2734 * the header structure at the tail
2735 * Prefix the header into DMA buffer causes SMMU faults, so
2736 * do not prefix header into the DMA buffers
2737 */
2738 qdf_mem_header_init(header, size, func, line, caller);
2739
2740 qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2741 status = qdf_list_insert_front(mem_list, &header->node);
2742 qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2743 if (QDF_IS_STATUS_ERROR(status))
2744 qdf_err("Failed to insert memory header; status %d", status);
2745
2746 qdf_mem_dma_inc(size);
2747
2748 return vaddr;
2749 }
2750 qdf_export_symbol(qdf_mem_alloc_consistent_debug);
2751
qdf_mem_free_consistent_debug(qdf_device_t osdev,void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx,const char * func,uint32_t line)2752 void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
2753 qdf_size_t size, void *vaddr,
2754 qdf_dma_addr_t paddr,
2755 qdf_dma_context_t memctx,
2756 const char *func, uint32_t line)
2757 {
2758 enum qdf_debug_domain domain = qdf_debug_domain_get();
2759 struct qdf_mem_header *header;
2760 enum qdf_mem_validation_bitmap error_bitmap;
2761
2762 if (is_initial_mem_debug_disabled) {
2763 __qdf_mem_free_consistent(
2764 osdev, dev,
2765 size, vaddr,
2766 paddr, memctx);
2767 return;
2768 }
2769
2770 /* freeing a null pointer is valid */
2771 if (qdf_unlikely(!vaddr))
2772 return;
2773
2774 qdf_talloc_assert_no_children_fl(vaddr, func, line);
2775
2776 qdf_spin_lock_irqsave(&qdf_mem_dma_list_lock);
2777 /* For DMA buffers we only add trailers, this function will retrieve
2778 * the header structure at the tail
2779 * Prefix the header into DMA buffer causes SMMU faults, so
2780 * do not prefix header into the DMA buffers
2781 */
2782 header = qdf_mem_dma_get_header(vaddr, size);
2783 error_bitmap = qdf_mem_header_validate(header, domain);
2784 if (!error_bitmap) {
2785 header->freed = true;
2786 qdf_list_remove_node(qdf_mem_dma_list(header->domain),
2787 &header->node);
2788 }
2789 qdf_spin_unlock_irqrestore(&qdf_mem_dma_list_lock);
2790
2791 qdf_mem_header_assert_valid(header, domain, error_bitmap, func, line);
2792
2793 qdf_mem_dma_dec(header->size);
2794 qdf_mem_dma_free(dev, size + QDF_DMA_MEM_DEBUG_SIZE, vaddr, paddr);
2795 }
2796 qdf_export_symbol(qdf_mem_free_consistent_debug);
2797 #endif /* MEMORY_DEBUG */
2798
__qdf_mem_free_consistent(qdf_device_t osdev,void * dev,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx)2799 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
2800 qdf_size_t size, void *vaddr,
2801 qdf_dma_addr_t paddr, qdf_dma_context_t memctx)
2802 {
2803 qdf_mem_dma_dec(size);
2804 qdf_mem_dma_free(dev, size, vaddr, paddr);
2805 }
2806
2807 qdf_export_symbol(__qdf_mem_free_consistent);
2808
__qdf_mem_alloc_consistent(qdf_device_t osdev,void * dev,qdf_size_t size,qdf_dma_addr_t * paddr,const char * func,uint32_t line)2809 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
2810 qdf_size_t size, qdf_dma_addr_t *paddr,
2811 const char *func, uint32_t line)
2812 {
2813 void *vaddr;
2814
2815 if (!size || size > QDF_MEM_MAX_MALLOC) {
2816 qdf_nofl_err("Cannot malloc %zu bytes @ %s:%d",
2817 size, func, line);
2818 return NULL;
2819 }
2820
2821 vaddr = qdf_mem_dma_alloc(osdev, dev, size, paddr);
2822
2823 if (vaddr)
2824 qdf_mem_dma_inc(size);
2825
2826 return vaddr;
2827 }
2828
2829 qdf_export_symbol(__qdf_mem_alloc_consistent);
2830
qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev,uint32_t * size,void ** vaddr_unaligned,qdf_dma_addr_t * paddr_unaligned,qdf_dma_addr_t * paddr_aligned,uint32_t align,const char * func,uint32_t line)2831 void *qdf_aligned_mem_alloc_consistent_fl(
2832 qdf_device_t osdev, uint32_t *size,
2833 void **vaddr_unaligned, qdf_dma_addr_t *paddr_unaligned,
2834 qdf_dma_addr_t *paddr_aligned, uint32_t align,
2835 const char *func, uint32_t line)
2836 {
2837 void *vaddr_aligned;
2838 uint32_t align_alloc_size;
2839
2840 *vaddr_unaligned = qdf_mem_alloc_consistent(
2841 osdev, osdev->dev, (qdf_size_t)*size, paddr_unaligned);
2842 if (!*vaddr_unaligned) {
2843 qdf_warn("Failed to alloc %uB @ %s:%d",
2844 *size, func, line);
2845 return NULL;
2846 }
2847
2848 /* Re-allocate additional bytes to align base address only if
2849 * above allocation returns unaligned address. Reason for
2850 * trying exact size allocation above is, OS tries to allocate
2851 * blocks of size power-of-2 pages and then free extra pages.
2852 * e.g., of a ring size of 1MB, the allocation below will
2853 * request 1MB plus 7 bytes for alignment, which will cause a
2854 * 2MB block allocation,and that is failing sometimes due to
2855 * memory fragmentation.
2856 */
2857 if ((unsigned long)(*paddr_unaligned) & (align - 1)) {
2858 align_alloc_size = *size + align - 1;
2859
2860 qdf_mem_free_consistent(osdev, osdev->dev, *size,
2861 *vaddr_unaligned,
2862 *paddr_unaligned, 0);
2863
2864 *vaddr_unaligned = qdf_mem_alloc_consistent(
2865 osdev, osdev->dev, align_alloc_size,
2866 paddr_unaligned);
2867 if (!*vaddr_unaligned) {
2868 qdf_warn("Failed to alloc %uB @ %s:%d",
2869 align_alloc_size, func, line);
2870 return NULL;
2871 }
2872
2873 *size = align_alloc_size;
2874 }
2875
2876 *paddr_aligned = (qdf_dma_addr_t)qdf_align(
2877 (unsigned long)(*paddr_unaligned), align);
2878
2879 vaddr_aligned = (void *)((unsigned long)(*vaddr_unaligned) +
2880 ((unsigned long)(*paddr_aligned) -
2881 (unsigned long)(*paddr_unaligned)));
2882
2883 return vaddr_aligned;
2884 }
2885 qdf_export_symbol(qdf_aligned_mem_alloc_consistent_fl);
2886
qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,qdf_dma_addr_t bus_addr,qdf_size_t size,enum dma_data_direction direction)2887 void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
2888 qdf_dma_addr_t bus_addr,
2889 qdf_size_t size,
2890 enum dma_data_direction direction)
2891 {
2892 dma_sync_single_for_device(osdev->dev, bus_addr, size, direction);
2893 }
2894 qdf_export_symbol(qdf_mem_dma_sync_single_for_device);
2895
qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,qdf_dma_addr_t bus_addr,qdf_size_t size,enum dma_data_direction direction)2896 void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
2897 qdf_dma_addr_t bus_addr,
2898 qdf_size_t size,
2899 enum dma_data_direction direction)
2900 {
2901 dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction);
2902 }
2903 qdf_export_symbol(qdf_mem_dma_sync_single_for_cpu);
2904
qdf_mem_init(void)2905 void qdf_mem_init(void)
2906 {
2907 qdf_mem_debug_init();
2908 qdf_net_buf_debug_init();
2909 qdf_frag_debug_init();
2910 qdf_mem_debugfs_init();
2911 qdf_mem_debug_debugfs_init();
2912 }
2913 qdf_export_symbol(qdf_mem_init);
2914
qdf_mem_exit(void)2915 void qdf_mem_exit(void)
2916 {
2917 qdf_mem_debug_debugfs_exit();
2918 qdf_mem_debugfs_exit();
2919 qdf_frag_debug_exit();
2920 qdf_net_buf_debug_exit();
2921 qdf_mem_debug_exit();
2922 }
2923 qdf_export_symbol(qdf_mem_exit);
2924
qdf_ether_addr_copy(void * dst_addr,const void * src_addr)2925 void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
2926 {
2927 if ((!dst_addr) || (!src_addr)) {
2928 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
2929 "%s called with NULL parameter, source:%pK destination:%pK",
2930 __func__, src_addr, dst_addr);
2931 QDF_ASSERT(0);
2932 return;
2933 }
2934 ether_addr_copy(dst_addr, src_addr);
2935 }
2936 qdf_export_symbol(qdf_ether_addr_copy);
2937
qdf_dma_mem_stats_read(void)2938 int32_t qdf_dma_mem_stats_read(void)
2939 {
2940 return qdf_atomic_read(&qdf_mem_stat.dma);
2941 }
2942
2943 qdf_export_symbol(qdf_dma_mem_stats_read);
2944
qdf_heap_mem_stats_read(void)2945 int32_t qdf_heap_mem_stats_read(void)
2946 {
2947 return qdf_atomic_read(&qdf_mem_stat.kmalloc);
2948 }
2949
2950 qdf_export_symbol(qdf_heap_mem_stats_read);
2951
qdf_skb_mem_stats_read(void)2952 int32_t qdf_skb_mem_stats_read(void)
2953 {
2954 return qdf_atomic_read(&qdf_mem_stat.skb);
2955 }
2956
2957 qdf_export_symbol(qdf_skb_mem_stats_read);
2958
qdf_skb_total_mem_stats_read(void)2959 int32_t qdf_skb_total_mem_stats_read(void)
2960 {
2961 return qdf_atomic_read(&qdf_mem_stat.skb_total);
2962 }
2963
2964 qdf_export_symbol(qdf_skb_total_mem_stats_read);
2965
qdf_skb_max_mem_stats_read(void)2966 int32_t qdf_skb_max_mem_stats_read(void)
2967 {
2968 return qdf_mem_stat.skb_mem_max;
2969 }
2970
2971 qdf_export_symbol(qdf_skb_max_mem_stats_read);
2972
qdf_dp_tx_skb_mem_stats_read(void)2973 int32_t qdf_dp_tx_skb_mem_stats_read(void)
2974 {
2975 return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb);
2976 }
2977
2978 qdf_export_symbol(qdf_dp_tx_skb_mem_stats_read);
2979
qdf_dp_rx_skb_mem_stats_read(void)2980 int32_t qdf_dp_rx_skb_mem_stats_read(void)
2981 {
2982 return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb);
2983 }
2984
2985 qdf_export_symbol(qdf_dp_rx_skb_mem_stats_read);
2986
qdf_mem_dp_tx_skb_cnt_read(void)2987 int32_t qdf_mem_dp_tx_skb_cnt_read(void)
2988 {
2989 return qdf_atomic_read(&qdf_mem_stat.dp_tx_skb_count);
2990 }
2991
2992 qdf_export_symbol(qdf_mem_dp_tx_skb_cnt_read);
2993
qdf_mem_dp_tx_skb_max_cnt_read(void)2994 int32_t qdf_mem_dp_tx_skb_max_cnt_read(void)
2995 {
2996 return qdf_mem_stat.dp_tx_skb_count_max;
2997 }
2998
2999 qdf_export_symbol(qdf_mem_dp_tx_skb_max_cnt_read);
3000
qdf_mem_dp_rx_skb_cnt_read(void)3001 int32_t qdf_mem_dp_rx_skb_cnt_read(void)
3002 {
3003 return qdf_atomic_read(&qdf_mem_stat.dp_rx_skb_count);
3004 }
3005
3006 qdf_export_symbol(qdf_mem_dp_rx_skb_cnt_read);
3007
qdf_mem_dp_rx_skb_max_cnt_read(void)3008 int32_t qdf_mem_dp_rx_skb_max_cnt_read(void)
3009 {
3010 return qdf_mem_stat.dp_rx_skb_count_max;
3011 }
3012
3013 qdf_export_symbol(qdf_mem_dp_rx_skb_max_cnt_read);
3014
qdf_dp_tx_skb_max_mem_stats_read(void)3015 int32_t qdf_dp_tx_skb_max_mem_stats_read(void)
3016 {
3017 return qdf_mem_stat.dp_tx_skb_mem_max;
3018 }
3019
3020 qdf_export_symbol(qdf_dp_tx_skb_max_mem_stats_read);
3021
qdf_dp_rx_skb_max_mem_stats_read(void)3022 int32_t qdf_dp_rx_skb_max_mem_stats_read(void)
3023 {
3024 return qdf_mem_stat.dp_rx_skb_mem_max;
3025 }
3026
3027 qdf_export_symbol(qdf_dp_rx_skb_max_mem_stats_read);
3028
qdf_mem_tx_desc_cnt_read(void)3029 int32_t qdf_mem_tx_desc_cnt_read(void)
3030 {
3031 return qdf_atomic_read(&qdf_mem_stat.tx_descs_outstanding);
3032 }
3033
3034 qdf_export_symbol(qdf_mem_tx_desc_cnt_read);
3035
qdf_mem_tx_desc_max_read(void)3036 int32_t qdf_mem_tx_desc_max_read(void)
3037 {
3038 return qdf_mem_stat.tx_descs_max;
3039 }
3040
3041 qdf_export_symbol(qdf_mem_tx_desc_max_read);
3042
qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,int32_t tx_descs_max)3043 void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
3044 int32_t tx_descs_max)
3045 {
3046 qdf_mem_stat.tx_descs_outstanding = pending_tx_descs;
3047 qdf_mem_stat.tx_descs_max = tx_descs_max;
3048 }
3049
3050 qdf_export_symbol(qdf_mem_tx_desc_cnt_update);
3051
qdf_mem_stats_init(void)3052 void qdf_mem_stats_init(void)
3053 {
3054 qdf_mem_stat.skb_mem_max = 0;
3055 qdf_mem_stat.dp_tx_skb_mem_max = 0;
3056 qdf_mem_stat.dp_rx_skb_mem_max = 0;
3057 qdf_mem_stat.dp_tx_skb_count_max = 0;
3058 qdf_mem_stat.dp_rx_skb_count_max = 0;
3059 qdf_mem_stat.tx_descs_max = 0;
3060 }
3061
3062 qdf_export_symbol(qdf_mem_stats_init);
3063
__qdf_mem_valloc(size_t size,const char * func,uint32_t line)3064 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line)
3065 {
3066 void *ptr;
3067
3068 if (!size) {
3069 qdf_err("Valloc called with 0 bytes @ %s:%d", func, line);
3070 return NULL;
3071 }
3072
3073 ptr = vzalloc(size);
3074
3075 return ptr;
3076 }
3077
3078 qdf_export_symbol(__qdf_mem_valloc);
3079
__qdf_mem_vfree(void * ptr)3080 void __qdf_mem_vfree(void *ptr)
3081 {
3082 if (qdf_unlikely(!ptr))
3083 return;
3084
3085 vfree(ptr);
3086 }
3087
3088 qdf_export_symbol(__qdf_mem_vfree);
3089
3090 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
3091 int
qdf_iommu_domain_get_attr(qdf_iommu_domain_t * domain,enum qdf_iommu_attr attr,void * data)3092 qdf_iommu_domain_get_attr(qdf_iommu_domain_t *domain,
3093 enum qdf_iommu_attr attr, void *data)
3094 {
3095 return __qdf_iommu_domain_get_attr(domain, attr, data);
3096 }
3097
3098 qdf_export_symbol(qdf_iommu_domain_get_attr);
3099 #endif
3100
3101 #ifdef ENHANCED_OS_ABSTRACTION
qdf_update_mem_map_table(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_addr,uint32_t mem_size)3102 void qdf_update_mem_map_table(qdf_device_t osdev,
3103 qdf_mem_info_t *mem_info,
3104 qdf_dma_addr_t dma_addr,
3105 uint32_t mem_size)
3106 {
3107 if (!mem_info) {
3108 qdf_nofl_err("%s: NULL mem_info", __func__);
3109 return;
3110 }
3111
3112 __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
3113 }
3114
3115 qdf_export_symbol(qdf_update_mem_map_table);
3116
qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,qdf_dma_addr_t dma_addr)3117 qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
3118 qdf_dma_addr_t dma_addr)
3119 {
3120 return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
3121 }
3122
3123 qdf_export_symbol(qdf_mem_paddr_from_dmaaddr);
3124 #endif
3125
3126 #ifdef QCA_KMEM_CACHE_SUPPORT
3127 qdf_kmem_cache_t
__qdf_kmem_cache_create(const char * cache_name,qdf_size_t size)3128 __qdf_kmem_cache_create(const char *cache_name,
3129 qdf_size_t size)
3130 {
3131 struct kmem_cache *cache;
3132
3133 cache = kmem_cache_create(cache_name, size,
3134 0, 0, NULL);
3135
3136 if (!cache)
3137 return NULL;
3138
3139 return cache;
3140 }
3141 qdf_export_symbol(__qdf_kmem_cache_create);
3142
3143 void
__qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)3144 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3145 {
3146 kmem_cache_destroy(cache);
3147 }
3148
3149 qdf_export_symbol(__qdf_kmem_cache_destroy);
3150
3151 void*
__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)3152 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3153 {
3154 int flags = GFP_KERNEL;
3155
3156 if (in_interrupt() || irqs_disabled() || in_atomic())
3157 flags = GFP_ATOMIC;
3158
3159 return kmem_cache_alloc(cache, flags);
3160 }
3161
3162 qdf_export_symbol(__qdf_kmem_cache_alloc);
3163
3164 void
__qdf_kmem_cache_free(qdf_kmem_cache_t cache,void * node)3165 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3166
3167 {
3168 kmem_cache_free(cache, node);
3169 }
3170
3171 qdf_export_symbol(__qdf_kmem_cache_free);
3172 #else
3173 qdf_kmem_cache_t
__qdf_kmem_cache_create(const char * cache_name,qdf_size_t size)3174 __qdf_kmem_cache_create(const char *cache_name,
3175 qdf_size_t size)
3176 {
3177 return NULL;
3178 }
3179
3180 void
__qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)3181 __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache)
3182 {
3183 }
3184
3185 void *
__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)3186 __qdf_kmem_cache_alloc(qdf_kmem_cache_t cache)
3187 {
3188 return NULL;
3189 }
3190
3191 void
__qdf_kmem_cache_free(qdf_kmem_cache_t cache,void * node)3192 __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node)
3193 {
3194 }
3195 #endif
3196
3197 #ifdef QCA_DMA_PADDR_CHECK
qdf_dma_invalid_buf_list_init(void)3198 void qdf_dma_invalid_buf_list_init(void)
3199 {
3200 int i;
3201
3202 for (i = 0; i < MAX_DEBUG_DOMAIN_COUNT; i++) {
3203 qdf_list_create(&qdf_invalid_buf_list[i],
3204 QDF_DMA_INVALID_BUF_LIST_SIZE);
3205 qdf_invalid_buf_list_init[i] = true;
3206 }
3207 qdf_spinlock_create(&qdf_invalid_buf_list_lock);
3208 }
3209
qdf_dma_invalid_buf_free(void * dev,uint8_t domain)3210 void qdf_dma_invalid_buf_free(void *dev, uint8_t domain)
3211 {
3212 bool is_separate;
3213 qdf_list_t *cur_buf_list;
3214 struct qdf_dma_buf_entry *entry;
3215 QDF_STATUS status = QDF_STATUS_E_EMPTY;
3216
3217 if (!dev)
3218 return;
3219
3220 if (domain >= MAX_DEBUG_DOMAIN_COUNT)
3221 return;
3222
3223 if (!qdf_invalid_buf_list_init[domain])
3224 return;
3225
3226 cur_buf_list = &qdf_invalid_buf_list[domain];
3227 do {
3228 qdf_spin_lock_irqsave(&qdf_invalid_buf_list_lock);
3229 status = qdf_list_remove_front(cur_buf_list,
3230 (qdf_list_node_t **)&entry);
3231 qdf_spin_unlock_irqrestore(&qdf_invalid_buf_list_lock);
3232
3233 if (status != QDF_STATUS_SUCCESS)
3234 break;
3235
3236 is_separate = !entry->vaddr ? false : true;
3237 if (is_separate) {
3238 dma_free_coherent(dev, entry->size, entry->vaddr,
3239 entry->phy_addr);
3240 qdf_mem_free(entry);
3241 } else
3242 dma_free_coherent(dev, entry->size, entry,
3243 entry->phy_addr);
3244 } while (!qdf_list_empty(cur_buf_list));
3245 qdf_invalid_buf_list_init[domain] = false;
3246 }
3247
qdf_dma_invalid_buf_list_deinit(void)3248 void qdf_dma_invalid_buf_list_deinit(void)
3249 {
3250 int i;
3251
3252 for (i = 0; i < MAX_DEBUG_DOMAIN_COUNT; i++)
3253 qdf_list_destroy(&qdf_invalid_buf_list[i]);
3254
3255 qdf_spinlock_destroy(&qdf_invalid_buf_list_lock);
3256 }
3257 #endif /* QCA_DMA_PADDR_CHECK */
3258