1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/mempool.h>
10 #include <linux/mm.h>
11 #include <linux/err.h>
12 #include <linux/of.h>
13 #include <linux/version.h>
14 #include "cnss_common.h"
15 #ifdef CONFIG_CNSS_OUT_OF_TREE
16 #include "cnss_prealloc.h"
17 #else
18 #include <net/cnss_prealloc.h>
19 #endif
20 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
21 /* Ideally header should be from standard include path. So this is not an
22 * ideal way of header inclusion but use of slab struct to derive cache
23 * from a mem ptr helps in avoiding additional tracking and/or adding headroom
24 * of 8 bytes for cache in the beginning of buffer and wasting extra memory,
25 * particulary in the case when size of memory requested falls around the edge
26 * of a page boundary. We also have precedence of minidump_memory.c which
27 * includes mm/slab.h using this style.
28 */
29 #include "../mm/slab.h"
30 #endif
31
32 MODULE_LICENSE("GPL v2");
33 MODULE_DESCRIPTION("CNSS prealloc driver");
34
35 /* cnss preallocation scheme is a memory pool that always tries to keep a
36 * list of free memory for use in emergencies. It is implemented on kernel
37 * features: memorypool and kmem cache.
38 */
39
40 struct cnss_pool {
41 size_t size;
42 int min;
43 const char name[50];
44 mempool_t *mp;
45 struct kmem_cache *cache;
46 };
47
48 /**
49 * Memory pool
50 * -----------
51 *
52 * How to update this table:
53 *
54 * 1. Add a new row with following elements
55 * size : Size of one allocation unit in bytes.
56 * min : Minimum units to be reserved. Used only if a regular
57 * allocation fails.
58 * name : Name of the cache/pool. Will be displayed in /proc/slabinfo
59 * if not merged with another pool.
60 * mp : A pointer to memory pool. Updated during init.
61 * cache : A pointer to cache. Updated during init.
62 * 2. Always keep the table in increasing order
63 * 3. Please keep the reserve pool as minimum as possible as it's always
64 * preallocated.
65 * 4. Always profile with different use cases after updating this table.
66 * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
67 * 6. Each pool has a sys node at /sys/kernel/slab/<name>
68 *
69 */
70
71 /* size, min pool reserve, name, memorypool handler, cache handler*/
72 static struct cnss_pool cnss_pools_default[] = {
73 {8 * 1024, 16, "cnss-pool-8k", NULL, NULL},
74 {16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
75 {32 * 1024, 22, "cnss-pool-32k", NULL, NULL},
76 {64 * 1024, 38, "cnss-pool-64k", NULL, NULL},
77 {128 * 1024, 10, "cnss-pool-128k", NULL, NULL},
78 };
79
80 static struct cnss_pool cnss_pools_adrastea[] = {
81 {8 * 1024, 2, "cnss-pool-8k", NULL, NULL},
82 {16 * 1024, 10, "cnss-pool-16k", NULL, NULL},
83 {32 * 1024, 8, "cnss-pool-32k", NULL, NULL},
84 {64 * 1024, 4, "cnss-pool-64k", NULL, NULL},
85 {128 * 1024, 2, "cnss-pool-128k", NULL, NULL},
86 };
87
88 static struct cnss_pool cnss_pools_wcn6750[] = {
89 {8 * 1024, 2, "cnss-pool-8k", NULL, NULL},
90 {16 * 1024, 8, "cnss-pool-16k", NULL, NULL},
91 {32 * 1024, 11, "cnss-pool-32k", NULL, NULL},
92 {64 * 1024, 15, "cnss-pool-64k", NULL, NULL},
93 {128 * 1024, 4, "cnss-pool-128k", NULL, NULL},
94 };
95
96 struct cnss_pool *cnss_pools;
97 unsigned int cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_default);
98
99 /**
100 * cnss_pool_alloc_threshold() - Allocation threshold
101 *
102 * Minimum memory size to be part of cnss pool.
103 *
104 * Return: Size
105 *
106 */
cnss_pool_alloc_threshold(void)107 static inline size_t cnss_pool_alloc_threshold(void)
108 {
109 return cnss_pools[0].size;
110 }
111
112 /**
113 * cnss_pool_int() - Initialize memory pools.
114 *
115 * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
116 * the caller to invoke cnss_pool_deinit() routine to clean it up. This
117 * function needs to be called at early boot to preallocate minimum buffers in
118 * the pool.
119 *
120 * Return: 0 - success, otherwise error code.
121 *
122 */
cnss_pool_init(void)123 static int cnss_pool_init(void)
124 {
125 int i;
126
127 for (i = 0; i < cnss_prealloc_pool_size; i++) {
128 /* Create the slab cache */
129 cnss_pools[i].cache =
130 kmem_cache_create_usercopy(cnss_pools[i].name,
131 cnss_pools[i].size, 0,
132 SLAB_ACCOUNT, 0,
133 cnss_pools[i].size, NULL);
134 if (!cnss_pools[i].cache) {
135 pr_err("cnss_prealloc: cache %s failed\n",
136 cnss_pools[i].name);
137 continue;
138 }
139
140 /* Create the pool and associate to slab cache */
141 cnss_pools[i].mp =
142 mempool_create(cnss_pools[i].min, mempool_alloc_slab,
143 mempool_free_slab, cnss_pools[i].cache);
144
145 if (!cnss_pools[i].mp) {
146 pr_err("cnss_prealloc: mempool %s failed\n",
147 cnss_pools[i].name);
148 kmem_cache_destroy(cnss_pools[i].cache);
149 cnss_pools[i].cache = NULL;
150 continue;
151 }
152
153 pr_info("cnss_prealloc: created mempool %s of min size %d * %zu\n",
154 cnss_pools[i].name, cnss_pools[i].min,
155 cnss_pools[i].size);
156 }
157
158 return 0;
159 }
160
161 /**
162 * cnss_pool_deinit() - Free memory pools.
163 *
164 * Free the memory pools and return resources back to the system. It warns
165 * if there is any pending element in memory pool or cache.
166 *
167 */
cnss_pool_deinit(void)168 static void cnss_pool_deinit(void)
169 {
170 int i;
171
172 if (!cnss_pools)
173 return;
174
175 for (i = 0; i < cnss_prealloc_pool_size; i++) {
176 pr_info("cnss_prealloc: destroy mempool %s\n",
177 cnss_pools[i].name);
178 mempool_destroy(cnss_pools[i].mp);
179 kmem_cache_destroy(cnss_pools[i].cache);
180 cnss_pools[i].mp = NULL;
181 cnss_pools[i].cache = NULL;
182 }
183 }
184
cnss_assign_prealloc_pool(unsigned long device_id)185 void cnss_assign_prealloc_pool(unsigned long device_id)
186 {
187 pr_info("cnss_prealloc: assign cnss pool for device id 0x%lx", device_id);
188
189 switch (device_id) {
190 case ADRASTEA_DEVICE_ID:
191 cnss_pools = cnss_pools_adrastea;
192 cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_adrastea);
193 break;
194 case WCN6750_DEVICE_ID:
195 cnss_pools = cnss_pools_wcn6750;
196 cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_wcn6750);
197 break;
198 case WCN6450_DEVICE_ID:
199 case QCA6390_DEVICE_ID:
200 case QCA6490_DEVICE_ID:
201 case MANGO_DEVICE_ID:
202 case PEACH_DEVICE_ID:
203 case KIWI_DEVICE_ID:
204 default:
205 cnss_pools = cnss_pools_default;
206 cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_default);
207 }
208 }
209
cnss_initialize_prealloc_pool(unsigned long device_id)210 void cnss_initialize_prealloc_pool(unsigned long device_id)
211 {
212 cnss_assign_prealloc_pool(device_id);
213 cnss_pool_init();
214 }
215 EXPORT_SYMBOL(cnss_initialize_prealloc_pool);
216
cnss_deinitialize_prealloc_pool(void)217 void cnss_deinitialize_prealloc_pool(void)
218 {
219 cnss_pool_deinit();
220 }
221 EXPORT_SYMBOL(cnss_deinitialize_prealloc_pool);
222
223 /**
224 * cnss_pool_get_index() - Get the index of memory pool
225 * @mem: Allocated memory
226 *
227 * Returns the index of the memory pool which fits the reqested memory. The
228 * complexity of this check is O(num of memory pools). Returns a negative
229 * value with error code in case of failure.
230 *
231 */
232 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
cnss_pool_get_index(void * mem)233 static int cnss_pool_get_index(void *mem)
234 {
235 struct slab *slab;
236 struct kmem_cache *cache;
237 int i;
238
239 if (!virt_addr_valid(mem))
240 return -EINVAL;
241
242 /* mem -> slab -> cache */
243 slab = virt_to_slab(mem);
244 if (!slab)
245 return -ENOENT;
246
247 cache = slab->slab_cache;
248 if (!cache)
249 return -ENOENT;
250
251 /* Check if memory belongs to a pool */
252 for (i = 0; i < cnss_prealloc_pool_size; i++) {
253 if (cnss_pools[i].cache == cache)
254 return i;
255 }
256
257 return -ENOENT;
258 }
259 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
cnss_pool_get_index(void * mem)260 static int cnss_pool_get_index(void *mem)
261 {
262 struct page *page;
263 struct kmem_cache *cache;
264 int i;
265
266 if (!virt_addr_valid(mem))
267 return -EINVAL;
268
269 /* mem -> page -> cache */
270 page = virt_to_head_page(mem);
271 if (!page)
272 return -ENOENT;
273
274 cache = page->slab_cache;
275 if (!cache)
276 return -ENOENT;
277
278 /* Check if memory belongs to a pool */
279 for (i = 0; i < cnss_prealloc_pool_size; i++) {
280 if (cnss_pools[i].cache == cache)
281 return i;
282 }
283
284 return -ENOENT;
285 }
286 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
287
288 /**
289 * wcnss_prealloc_get() - Get preallocated memory from a pool
290 * @size: Size to allocate
291 *
292 * Memory pool is chosen based on the size. If memory is not available in a
293 * given pool it goes to next higher sized pool until it succeeds.
294 *
295 * Return: A void pointer to allocated memory
296 */
wcnss_prealloc_get(size_t size)297 void *wcnss_prealloc_get(size_t size)
298 {
299
300 void *mem = NULL;
301 gfp_t gfp_mask = __GFP_ZERO;
302 int i;
303
304 if (!cnss_pools)
305 return mem;
306
307 if (in_interrupt() || !preemptible() || rcu_preempt_depth())
308 gfp_mask |= GFP_ATOMIC;
309 else
310 gfp_mask |= GFP_KERNEL;
311
312 if (size >= cnss_pool_alloc_threshold()) {
313
314 for (i = 0; i < cnss_prealloc_pool_size; i++) {
315 if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
316 mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
317 if (mem)
318 break;
319 }
320 }
321 }
322
323 if (!mem && size >= cnss_pool_alloc_threshold()) {
324 pr_debug("cnss_prealloc: not available for size %zu, flag %x\n",
325 size, gfp_mask);
326 }
327
328 return mem;
329 }
330 EXPORT_SYMBOL(wcnss_prealloc_get);
331
332 /**
333 * wcnss_prealloc_put() - Relase allocated memory
334 * @mem: Allocated memory
335 *
336 * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
337 * pool doesn't have enough elements.
338 *
339 * Return: 1 - success
340 * 0 - fail
341 */
wcnss_prealloc_put(void * mem)342 int wcnss_prealloc_put(void *mem)
343 {
344 int i;
345
346 if (!mem || !cnss_pools)
347 return 0;
348
349 i = cnss_pool_get_index(mem);
350 if (i >= 0 && i < cnss_prealloc_pool_size && cnss_pools[i].mp) {
351 mempool_free(mem, cnss_pools[i].mp);
352 return 1;
353 }
354
355 return 0;
356 }
357 EXPORT_SYMBOL(wcnss_prealloc_put);
358
359 /* Not implemented. Make use of Linux SLAB features. */
wcnss_prealloc_check_memory_leak(void)360 void wcnss_prealloc_check_memory_leak(void) {}
361 EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
362
363 /* Not implemented. Make use of Linux SLAB features. */
wcnss_pre_alloc_reset(void)364 int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
365 EXPORT_SYMBOL(wcnss_pre_alloc_reset);
366
367 /**
368 * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
369 * present
370 *
371 * Valid device tree node means a node with "qcom,wlan" property present
372 * and "status" property not disabled.
373 *
374 * Return: true if valid device tree node found, false if not found
375 */
cnss_prealloc_is_valid_dt_node_found(void)376 static bool cnss_prealloc_is_valid_dt_node_found(void)
377 {
378 struct device_node *dn = NULL;
379
380 for_each_node_with_property(dn, "qcom,wlan") {
381 if (of_device_is_available(dn))
382 break;
383 }
384
385 if (dn)
386 return true;
387
388 return false;
389 }
390
cnss_prealloc_init(void)391 static int __init cnss_prealloc_init(void)
392 {
393 if (!cnss_prealloc_is_valid_dt_node_found())
394 return -ENODEV;
395
396 return 0;
397 }
398
cnss_prealloc_exit(void)399 static void __exit cnss_prealloc_exit(void)
400 {
401 return;
402 }
403
404 module_init(cnss_prealloc_init);
405 module_exit(cnss_prealloc_exit);
406
407