1 /*
2 * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19 #include <wlan_objmgr_pdev_obj.h>
20 #include <wlan_dp_main.h>
21 #include <wlan_dp_priv.h>
22 #include <wlan_dp_prealloc.h>
23 #include <dp_types.h>
24 #include <dp_internal.h>
25 #include <cdp_txrx_cmn.h>
26 #include <cdp_txrx_misc.h>
27 #include <dp_tx_desc.h>
28 #include <dp_rx.h>
29 #include <ce_api.h>
30 #include <ce_internal.h>
31 #include <wlan_cfg.h>
32 #include "wlan_dp_prealloc.h"
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef WLAN_PKT_CAPTURE_TX_2_0
37 #include "mon_ingress_ring.h"
38 #include "mon_destination_ring.h"
39 #include "dp_mon_2.0.h"
40 #endif
41
42 #ifdef DP_MEM_PRE_ALLOC
43
44 /* Max entries in FISA Flow table */
45 #define FISA_RX_FT_SIZE 256
46
47 /* Num elements in REO ring */
48 #define REO_DST_RING_SIZE 1024
49
50 /* Num elements in TCL Data ring */
51 #define TCL_DATA_RING_SIZE 5120
52
53 /* Num elements in WBM2SW ring */
54 #define WBM2SW_RELEASE_RING_SIZE 8192
55
56 /* Num elements in WBM Idle Link */
57 #define WBM_IDLE_LINK_RING_SIZE (32 * 1024)
58
59 /* Num TX desc in TX desc pool */
60 #define DP_TX_DESC_POOL_SIZE 6144
61
62 #define DP_TX_RX_DESC_MAX_NUM \
63 (WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS + \
64 WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS)
65
66 /**
67 * struct dp_consistent_prealloc - element representing DP pre-alloc memory
68 * @ring_type: HAL ring type
69 * @size: size of pre-alloc memory
70 * @in_use: whether this element is in use (occupied)
71 * @va_unaligned: Unaligned virtual address
72 * @va_aligned: aligned virtual address.
73 * @pa_unaligned: Unaligned physical address.
74 * @pa_aligned: Aligned physical address.
75 */
76
77 struct dp_consistent_prealloc {
78 enum hal_ring_type ring_type;
79 uint32_t size;
80 uint8_t in_use;
81 void *va_unaligned;
82 void *va_aligned;
83 qdf_dma_addr_t pa_unaligned;
84 qdf_dma_addr_t pa_aligned;
85 };
86
87 /**
88 * struct dp_multi_page_prealloc - element representing DP pre-alloc multiple
89 * pages memory
90 * @desc_type: source descriptor type for memory allocation
91 * @element_size: single element size
92 * @element_num: total number of elements should be allocated
93 * @in_use: whether this element is in use (occupied)
94 * @cacheable: coherent memory or cacheable memory
95 * @pages: multi page information storage
96 */
97 struct dp_multi_page_prealloc {
98 enum qdf_dp_desc_type desc_type;
99 qdf_size_t element_size;
100 uint16_t element_num;
101 bool in_use;
102 bool cacheable;
103 struct qdf_mem_multi_page_t pages;
104 };
105
106 /**
107 * struct dp_consistent_prealloc_unaligned - element representing DP pre-alloc
108 * unaligned memory
109 * @ring_type: HAL ring type
110 * @size: size of pre-alloc memory
111 * @in_use: whether this element is in use (occupied)
112 * @va_unaligned: unaligned virtual address
113 * @pa_unaligned: unaligned physical address
114 */
115 struct dp_consistent_prealloc_unaligned {
116 enum hal_ring_type ring_type;
117 uint32_t size;
118 bool in_use;
119 void *va_unaligned;
120 qdf_dma_addr_t pa_unaligned;
121 };
122
123 /**
124 * struct dp_prealloc_context - element representing DP prealloc context memory
125 * @ctxt_type: DP context type
126 * @size: size of pre-alloc memory
127 * @in_use: check if element is being used
128 * @is_critical: critical prealloc failure would cause prealloc_init to fail
129 * @addr: address of memory allocated
130 */
131 struct dp_prealloc_context {
132 enum dp_ctxt_type ctxt_type;
133 uint32_t size;
134 bool in_use;
135 bool is_critical;
136 void *addr;
137 };
138
139 static struct dp_prealloc_context g_dp_context_allocs[] = {
140 {DP_PDEV_TYPE, (sizeof(struct dp_pdev)), false, true, NULL},
141 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
142 /* 4 Rx ring history */
143 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
144 NULL},
145 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
146 NULL},
147 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
148 NULL},
149 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
150 NULL},
151 #ifdef CONFIG_BERYLLIUM
152 /* 4 extra Rx ring history */
153 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
154 NULL},
155 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
156 NULL},
157 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
158 NULL},
159 {DP_RX_RING_HIST_TYPE, sizeof(struct dp_rx_history), false, false,
160 NULL},
161 #endif /* CONFIG_BERYLLIUM */
162 /* 1 Rx error ring history */
163 {DP_RX_ERR_RING_HIST_TYPE, sizeof(struct dp_rx_err_history),
164 false, false, NULL},
165 #ifndef RX_DEFRAG_DO_NOT_REINJECT
166 /* 1 Rx reinject ring history */
167 {DP_RX_REINJECT_RING_HIST_TYPE, sizeof(struct dp_rx_reinject_history),
168 false, false, NULL},
169 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
170 /* 1 Rx refill ring history */
171 {DP_RX_REFILL_RING_HIST_TYPE, sizeof(struct dp_rx_refill_history),
172 false, false, NULL},
173 #endif /* WLAN_FEATURE_DP_RX_RING_HISTORY */
174 #ifdef DP_TX_HW_DESC_HISTORY
175 {DP_TX_HW_DESC_HIST_TYPE,
176 DP_TX_HW_DESC_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_hw_desc_evt),
177 false, false, NULL},
178 {DP_TX_HW_DESC_HIST_TYPE,
179 DP_TX_HW_DESC_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_hw_desc_evt),
180 false, false, NULL},
181 {DP_TX_HW_DESC_HIST_TYPE,
182 DP_TX_HW_DESC_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_hw_desc_evt),
183 false, false, NULL},
184 #endif
185 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
186 {DP_TX_TCL_HIST_TYPE,
187 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
188 false, false, NULL},
189 {DP_TX_TCL_HIST_TYPE,
190 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
191 false, false, NULL},
192 {DP_TX_TCL_HIST_TYPE,
193 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
194 false, false, NULL},
195 {DP_TX_TCL_HIST_TYPE,
196 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
197 false, false, NULL},
198 {DP_TX_TCL_HIST_TYPE,
199 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
200 false, false, NULL},
201 {DP_TX_TCL_HIST_TYPE,
202 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
203 false, false, NULL},
204 {DP_TX_TCL_HIST_TYPE,
205 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
206 false, false, NULL},
207 {DP_TX_TCL_HIST_TYPE,
208 DP_TX_TCL_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
209 false, false, NULL},
210
211 {DP_TX_COMP_HIST_TYPE,
212 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
213 false, false, NULL},
214 {DP_TX_COMP_HIST_TYPE,
215 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
216 false, false, NULL},
217 {DP_TX_COMP_HIST_TYPE,
218 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
219 false, false, NULL},
220 {DP_TX_COMP_HIST_TYPE,
221 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
222 false, false, NULL},
223 {DP_TX_COMP_HIST_TYPE,
224 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
225 false, false, NULL},
226 {DP_TX_COMP_HIST_TYPE,
227 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
228 false, false, NULL},
229 {DP_TX_COMP_HIST_TYPE,
230 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
231 false, false, NULL},
232 {DP_TX_COMP_HIST_TYPE,
233 DP_TX_COMP_HIST_PER_SLOT_MAX * sizeof(struct dp_tx_desc_event),
234 false, false, NULL},
235
236 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
237 #ifdef WLAN_SUPPORT_RX_FISA
238 {DP_FISA_RX_FT_TYPE, sizeof(struct dp_fisa_rx_sw_ft) * FISA_RX_FT_SIZE,
239 false, true, NULL},
240 #endif
241 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
242 {DP_MON_STATUS_BUF_HIST_TYPE, sizeof(struct dp_mon_status_ring_history),
243 false, false, NULL},
244 #endif
245 #ifdef WIFI_MONITOR_SUPPORT
246 {DP_MON_PDEV_TYPE, sizeof(struct dp_mon_pdev),
247 false, false, NULL},
248 #endif
249 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
250 {DP_CFG_EVENT_HIST_TYPE,
251 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
252 false, false, NULL},
253 {DP_CFG_EVENT_HIST_TYPE,
254 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
255 false, false, NULL},
256 {DP_CFG_EVENT_HIST_TYPE,
257 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
258 false, false, NULL},
259 {DP_CFG_EVENT_HIST_TYPE,
260 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
261 false, false, NULL},
262 {DP_CFG_EVENT_HIST_TYPE,
263 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
264 false, false, NULL},
265 {DP_CFG_EVENT_HIST_TYPE,
266 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
267 false, false, NULL},
268 {DP_CFG_EVENT_HIST_TYPE,
269 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
270 false, false, NULL},
271 {DP_CFG_EVENT_HIST_TYPE,
272 DP_CFG_EVT_HIST_PER_SLOT_MAX * sizeof(struct dp_cfg_event),
273 false, false, NULL},
274 #endif
275 #ifdef WLAN_PKT_CAPTURE_TX_2_0
276 {DP_MON_TX_DESC_POOL_TYPE, 0, false, false, NULL},
277 #endif
278 };
279
280 static struct dp_consistent_prealloc g_dp_consistent_allocs[] = {
281 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
282 NULL, NULL, 0, 0},
283 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
284 NULL, NULL, 0, 0},
285 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
286 NULL, NULL, 0, 0},
287 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
288 NULL, NULL, 0, 0},
289 #ifdef CONFIG_BERYLLIUM
290 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
291 NULL, NULL, 0, 0},
292 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
293 NULL, NULL, 0, 0},
294 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
295 NULL, NULL, 0, 0},
296 {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0,
297 NULL, NULL, 0, 0},
298 #endif
299 /* 3 TCL data rings */
300 {TCL_DATA, 0, 0, NULL, NULL, 0, 0},
301 {TCL_DATA, 0, 0, NULL, NULL, 0, 0},
302 {TCL_DATA, 0, 0, NULL, NULL, 0, 0},
303 /* 4 WBM2SW rings */
304 {WBM2SW_RELEASE, 0, 0, NULL, NULL, 0, 0},
305 {WBM2SW_RELEASE, 0, 0, NULL, NULL, 0, 0},
306 {WBM2SW_RELEASE, 0, 0, NULL, NULL, 0, 0},
307 {WBM2SW_RELEASE, 0, 0, NULL, 0, 0},
308 /* SW2WBM link descriptor return ring */
309 {SW2WBM_RELEASE, 0, 0, NULL, 0, 0},
310 /* 1 WBM idle link desc ring */
311 {WBM_IDLE_LINK, (sizeof(struct wbm_link_descriptor_ring)) *
312 WBM_IDLE_LINK_RING_SIZE, 0, NULL, NULL, 0, 0},
313 /* 2 RXDMA DST ERR rings */
314 {RXDMA_DST, 0, 0, NULL, NULL, 0, 0},
315 {RXDMA_DST, 0, 0, NULL, NULL, 0, 0},
316 /* REFILL ring 0 */
317 {RXDMA_BUF, 0, 0, NULL, NULL, 0, 0},
318 /* 2 RXDMA buffer rings */
319 {RXDMA_BUF, 0, 0, NULL, NULL, 0, 0},
320 {RXDMA_BUF, 0, 0, NULL, NULL, 0, 0},
321 /* REO Exception ring */
322 {REO_EXCEPTION, 0, 0, NULL, NULL, 0, 0},
323 /* 1 REO status ring */
324 {REO_STATUS, 0, 0, NULL, NULL, 0, 0},
325 /* 2 monitor status rings */
326 {RXDMA_MONITOR_STATUS, 0, 0, NULL, NULL, 0, 0},
327 {RXDMA_MONITOR_STATUS, 0, 0, NULL, NULL, 0, 0},
328 #ifdef WLAN_PKT_CAPTURE_TX_2_0
329 /* 2 MON2SW Tx monitor rings */
330 {TX_MONITOR_DST, 0, 0, NULL, NULL, 0, 0},
331 {TX_MONITOR_DST, 0, 0, NULL, NULL, 0, 0},
332 #endif
333 };
334
335 /* Number of HW link descriptors needed (rounded to power of 2) */
336 #define NUM_HW_LINK_DESCS (32 * 1024)
337
338 /* Size in bytes of HW LINK DESC */
339 #define HW_LINK_DESC_SIZE 128
340
341 /* Size in bytes of TX Desc (rounded to power of 2) */
342 #define TX_DESC_SIZE 128
343
344 /* Size in bytes of TX TSO Desc (rounded to power of 2) */
345 #define TX_TSO_DESC_SIZE 256
346
347 /* Size in bytes of TX TSO Num Seg Desc (rounded to power of 2) */
348 #define TX_TSO_NUM_SEG_DESC_SIZE 16
349
350 #define NON_CACHEABLE 0
351 #define CACHEABLE 1
352
353 #define DIRECT_LINK_CE_RX_BUF_SIZE 256
354 #define DIRECT_LINK_DEFAULT_BUF_SZ 2048
355 #define TX_DIRECT_LINK_BUF_NUM 380
356 #define TX_DIRECT_LINK_CE_BUF_NUM 8
357 #define RX_DIRECT_LINK_CE_BUF_NUM 30
358
359 static struct dp_multi_page_prealloc g_dp_multi_page_allocs[] = {
360 /* 4 TX DESC pools */
361 {QDF_DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
362 {QDF_DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
363 {QDF_DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
364 {QDF_DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
365
366 /* 4 Tx EXT DESC NON Cacheable pools */
367 {QDF_DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
368 NON_CACHEABLE, { 0 } },
369 {QDF_DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
370 NON_CACHEABLE, { 0 } },
371 {QDF_DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
372 NON_CACHEABLE, { 0 } },
373 {QDF_DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
374 NON_CACHEABLE, { 0 } },
375
376 /* 4 Tx EXT DESC Link Cacheable pools */
377 {QDF_DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0,
378 0, CACHEABLE, { 0 } },
379 {QDF_DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0,
380 0, CACHEABLE, { 0 } },
381 {QDF_DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0,
382 0, CACHEABLE, { 0 } },
383 {QDF_DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0,
384 0, CACHEABLE, { 0 } },
385
386 /* 4 TX TSO DESC pools */
387 {QDF_DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
388 {QDF_DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
389 {QDF_DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
390 {QDF_DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
391
392 /* 4 TX TSO NUM SEG DESC pools */
393 {QDF_DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
394 CACHEABLE, { 0 } },
395 {QDF_DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
396 CACHEABLE, { 0 } },
397 {QDF_DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
398 CACHEABLE, { 0 } },
399 {QDF_DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
400 CACHEABLE, { 0 } },
401
402 /* DP RX DESCs BUF pools */
403 {QDF_DP_RX_DESC_BUF_TYPE, sizeof(union dp_rx_desc_list_elem_t),
404 0, 0, CACHEABLE, { 0 } },
405
406 #ifdef DISABLE_MON_CONFIG
407 /* no op */
408 #else
409 /* 2 DP RX DESCs Status pools */
410 {QDF_DP_RX_DESC_STATUS_TYPE, sizeof(union dp_rx_desc_list_elem_t),
411 WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE + 1, 0, CACHEABLE, { 0 } },
412 {QDF_DP_RX_DESC_STATUS_TYPE, sizeof(union dp_rx_desc_list_elem_t),
413 WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE + 1, 0, CACHEABLE, { 0 } },
414 #endif
415 /* DP HW Link DESCs pools */
416 {QDF_DP_HW_LINK_DESC_TYPE, HW_LINK_DESC_SIZE, NUM_HW_LINK_DESCS, 0,
417 NON_CACHEABLE, { 0 } },
418 #ifdef CONFIG_BERYLLIUM
419 {QDF_DP_HW_CC_SPT_PAGE_TYPE, qdf_page_size,
420 ((DP_TX_RX_DESC_MAX_NUM * sizeof(uint64_t)) / qdf_page_size),
421 0, NON_CACHEABLE, { 0 } },
422 #endif
423 #ifdef FEATURE_DIRECT_LINK
424 {QDF_DP_TX_DIRECT_LINK_CE_BUF_TYPE, DIRECT_LINK_DEFAULT_BUF_SZ,
425 TX_DIRECT_LINK_CE_BUF_NUM, 0, NON_CACHEABLE, { 0 } },
426 {QDF_DP_TX_DIRECT_LINK_BUF_TYPE, DIRECT_LINK_DEFAULT_BUF_SZ,
427 TX_DIRECT_LINK_BUF_NUM, 0, NON_CACHEABLE, { 0 } },
428 {QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE, DIRECT_LINK_CE_RX_BUF_SIZE,
429 RX_DIRECT_LINK_CE_BUF_NUM, 0, NON_CACHEABLE, { 0 } },
430 #endif
431 };
432
433 static struct dp_consistent_prealloc_unaligned
434 g_dp_consistent_unaligned_allocs[] = {
435 /* CE-0 */
436 {CE_SRC, (sizeof(struct ce_srng_src_desc) * 16 + CE_DESC_RING_ALIGN),
437 false, NULL, 0},
438 /* CE-1 */
439 {CE_DST, (sizeof(struct ce_srng_dest_desc) * 512 + CE_DESC_RING_ALIGN),
440 false, NULL, 0},
441 {CE_DST_STATUS, (sizeof(struct ce_srng_dest_status_desc) * 512
442 + CE_DESC_RING_ALIGN), false, NULL, 0},
443 /* CE-2 */
444 {CE_DST, (sizeof(struct ce_srng_dest_desc) * 32 + CE_DESC_RING_ALIGN),
445 false, NULL, 0},
446 {CE_DST_STATUS, (sizeof(struct ce_srng_dest_status_desc) * 32
447 + CE_DESC_RING_ALIGN), false, NULL, 0},
448 /* CE-3 */
449 {CE_SRC, (sizeof(struct ce_srng_src_desc) * 32 + CE_DESC_RING_ALIGN),
450 false, NULL, 0},
451 /* CE-4 */
452 {CE_SRC, (sizeof(struct ce_srng_src_desc) * 256 + CE_DESC_RING_ALIGN),
453 false, NULL, 0},
454 /* CE-5 */
455 {CE_DST, (sizeof(struct ce_srng_dest_desc) * 512 + CE_DESC_RING_ALIGN),
456 false, NULL, 0},
457 {CE_DST_STATUS, (sizeof(struct ce_srng_dest_status_desc) * 512
458 + CE_DESC_RING_ALIGN), false, NULL, 0},
459 };
460
dp_prealloc_deinit(void)461 void dp_prealloc_deinit(void)
462 {
463 int i;
464 struct dp_prealloc_context *cp;
465 struct dp_consistent_prealloc *p;
466 struct dp_multi_page_prealloc *mp;
467 struct dp_consistent_prealloc_unaligned *up;
468 qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
469
470 if (!qdf_ctx)
471 return;
472
473 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_allocs); i++) {
474 p = &g_dp_consistent_allocs[i];
475
476 if (p->in_use)
477 dp_warn("i %d: consistent_mem in use while free", i);
478
479 if (p->va_aligned) {
480 dp_debug("i %d: va aligned %pK pa aligned %pK size %d",
481 i, p->va_aligned, (void *)p->pa_aligned,
482 p->size);
483 qdf_mem_free_consistent(qdf_ctx, qdf_ctx->dev,
484 p->size,
485 p->va_unaligned,
486 p->pa_unaligned, 0);
487 p->in_use = false;
488 p->va_unaligned = NULL;
489 p->va_aligned = NULL;
490 p->pa_unaligned = 0;
491 p->pa_aligned = 0;
492 }
493 }
494
495 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
496 mp = &g_dp_multi_page_allocs[i];
497
498 if (mp->in_use)
499 dp_warn("i %d: multi-page mem in use while free", i);
500
501 if (mp->pages.num_pages) {
502 dp_info("i %d: type %d cacheable_pages %pK dma_pages %pK num_pages %d",
503 i, mp->desc_type,
504 mp->pages.cacheable_pages,
505 mp->pages.dma_pages,
506 mp->pages.num_pages);
507 qdf_mem_multi_pages_free(qdf_ctx, &mp->pages,
508 0, mp->cacheable);
509 mp->in_use = false;
510 qdf_mem_zero(&mp->pages, sizeof(mp->pages));
511 }
512 }
513
514 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_unaligned_allocs); i++) {
515 up = &g_dp_consistent_unaligned_allocs[i];
516
517 if (qdf_unlikely(up->in_use))
518 dp_info("i %d: unaligned mem in use while free", i);
519
520 if (up->va_unaligned) {
521 dp_info("i %d: va unalign %pK pa unalign %pK size %d",
522 i, up->va_unaligned,
523 (void *)up->pa_unaligned, up->size);
524 qdf_mem_free_consistent(qdf_ctx, qdf_ctx->dev,
525 up->size,
526 up->va_unaligned,
527 up->pa_unaligned, 0);
528 up->in_use = false;
529 up->va_unaligned = NULL;
530 up->pa_unaligned = 0;
531 }
532 }
533
534 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_context_allocs); i++) {
535 cp = &g_dp_context_allocs[i];
536 if (qdf_unlikely(cp->in_use))
537 dp_warn("i %d: context in use while free", i);
538
539 if (cp->addr) {
540 qdf_mem_free(cp->addr);
541 cp->addr = NULL;
542 }
543 }
544 }
545
546 #ifdef CONFIG_BERYLLIUM
547 /**
548 * dp_get_tcl_data_srng_entrysize() - Get the tcl data srng entry
549 * size
550 *
551 * Return: TCL data srng entry size
552 */
dp_get_tcl_data_srng_entrysize(void)553 static inline uint32_t dp_get_tcl_data_srng_entrysize(void)
554 {
555 return sizeof(struct tcl_data_cmd);
556 }
557
558 #ifdef WLAN_PKT_CAPTURE_TX_2_0
559 /**
560 * dp_get_tx_mon_mem_size() - Get tx mon ring memory size
561 * @cfg: prealloc config
562 * @ring_type: ring type
563 *
564 * Return: Tx mon ring memory size
565 */
566 static inline
dp_get_tx_mon_mem_size(struct wlan_dp_prealloc_cfg * cfg,enum hal_ring_type ring_type)567 uint32_t dp_get_tx_mon_mem_size(struct wlan_dp_prealloc_cfg *cfg,
568 enum hal_ring_type ring_type)
569 {
570 uint32_t mem_size = 0;
571
572 if (!cfg)
573 return mem_size;
574
575 if (ring_type == TX_MONITOR_BUF) {
576 mem_size = (sizeof(struct mon_ingress_ring)) *
577 cfg->num_tx_mon_buf_ring_entries;
578 } else if (ring_type == TX_MONITOR_DST) {
579 mem_size = (sizeof(struct mon_destination_ring)) *
580 cfg->num_tx_mon_dst_ring_entries;
581 }
582
583 return mem_size;
584 }
585
586 /**
587 * dp_get_tx_mon_desc_pool_mem_size() - Get tx mon desc pool memory size
588 * @cfg: prealloc config
589 *
590 * Return : TX mon desc pool memory size
591 */
592 static inline
dp_get_tx_mon_desc_pool_mem_size(struct wlan_dp_prealloc_cfg * cfg)593 uint32_t dp_get_tx_mon_desc_pool_mem_size(struct wlan_dp_prealloc_cfg *cfg)
594 {
595 return (sizeof(union dp_mon_desc_list_elem_t)) *
596 cfg->num_tx_mon_buf_ring_entries;
597 }
598 #else
599 static inline
dp_get_tx_mon_mem_size(struct wlan_dp_prealloc_cfg * cfg,enum hal_ring_type ring_type)600 uint32_t dp_get_tx_mon_mem_size(struct wlan_dp_prealloc_cfg *cfg,
601 enum hal_ring_type ring_type)
602 {
603 return 0;
604 }
605
606 static inline
dp_get_tx_mon_desc_pool_mem_size(struct wlan_dp_prealloc_cfg * cfg)607 uint32_t dp_get_tx_mon_desc_pool_mem_size(struct wlan_dp_prealloc_cfg *cfg)
608 {
609 return 0;
610 }
611 #endif /* WLAN_PKT_CAPTURE_TX_2_0 */
612 #else
dp_get_tcl_data_srng_entrysize(void)613 static inline uint32_t dp_get_tcl_data_srng_entrysize(void)
614 {
615 return (sizeof(struct tlv_32_hdr) + sizeof(struct tcl_data_cmd));
616 }
617
618 static inline
dp_get_tx_mon_mem_size(struct wlan_dp_prealloc_cfg * cfg,enum hal_ring_type ring_type)619 uint32_t dp_get_tx_mon_mem_size(struct wlan_dp_prealloc_cfg *cfg,
620 enum hal_ring_type ring_type)
621 {
622 return 0;
623 }
624
625 static inline
dp_get_tx_mon_desc_pool_mem_size(struct wlan_dp_prealloc_cfg * cfg)626 uint32_t dp_get_tx_mon_desc_pool_mem_size(struct wlan_dp_prealloc_cfg *cfg)
627 {
628 return 0;
629 }
630 #endif
631
632 /**
633 * dp_update_mem_size_by_ctx_type() - Update dp context memory size
634 * based on context type
635 * @cfg: prealloc related cfg params
636 * @ctx_type: DP context type
637 * @mem_size: memory size to be updated
638 *
639 * Return: none
640 */
641 static void
dp_update_mem_size_by_ctx_type(struct wlan_dp_prealloc_cfg * cfg,enum dp_ctxt_type ctx_type,uint32_t * mem_size)642 dp_update_mem_size_by_ctx_type(struct wlan_dp_prealloc_cfg *cfg,
643 enum dp_ctxt_type ctx_type,
644 uint32_t *mem_size)
645 {
646 switch (ctx_type) {
647 case DP_MON_TX_DESC_POOL_TYPE:
648 *mem_size = dp_get_tx_mon_desc_pool_mem_size(cfg);
649 break;
650 default:
651 return;
652 }
653 }
654
655 /**
656 * dp_update_mem_size_by_ring_type() - Update srng memory size based
657 * on ring type and the corresponding ini configuration
658 * @cfg: prealloc related cfg params
659 * @ring_type: srng type
660 * @mem_size: memory size to be updated
661 *
662 * Return: None
663 */
664 static void
dp_update_mem_size_by_ring_type(struct wlan_dp_prealloc_cfg * cfg,enum hal_ring_type ring_type,uint32_t * mem_size)665 dp_update_mem_size_by_ring_type(struct wlan_dp_prealloc_cfg *cfg,
666 enum hal_ring_type ring_type,
667 uint32_t *mem_size)
668 {
669 switch (ring_type) {
670 case TCL_DATA:
671 *mem_size = dp_get_tcl_data_srng_entrysize() *
672 cfg->num_tx_ring_entries;
673 return;
674 case WBM2SW_RELEASE:
675 *mem_size = (sizeof(struct wbm_release_ring)) *
676 cfg->num_tx_comp_ring_entries;
677 return;
678 case SW2WBM_RELEASE:
679 *mem_size = (sizeof(struct wbm_release_ring)) *
680 cfg->num_wbm_rel_ring_entries;
681 return;
682 case RXDMA_DST:
683 *mem_size = (sizeof(struct reo_entrance_ring)) *
684 cfg->num_rxdma_err_dst_ring_entries;
685 return;
686 case REO_EXCEPTION:
687 *mem_size = (sizeof(struct reo_destination_ring)) *
688 cfg->num_reo_exception_ring_entries;
689 return;
690 case REO_DST:
691 *mem_size = (sizeof(struct reo_destination_ring)) *
692 cfg->num_reo_dst_ring_entries;
693 return;
694 case RXDMA_BUF:
695 *mem_size = (sizeof(struct wbm_buffer_ring)) *
696 cfg->num_rxdma_refill_ring_entries;
697 return;
698 case REO_STATUS:
699 *mem_size = (sizeof(struct tlv_32_hdr) +
700 sizeof(struct reo_get_queue_stats_status)) *
701 cfg->num_reo_status_ring_entries;
702 return;
703 case RXDMA_MONITOR_STATUS:
704 *mem_size = (sizeof(struct wbm_buffer_ring)) *
705 cfg->num_mon_status_ring_entries;
706 return;
707 case TX_MONITOR_BUF:
708 case TX_MONITOR_DST:
709 *mem_size = dp_get_tx_mon_mem_size(cfg, ring_type);
710 return;
711 default:
712 return;
713 }
714 }
715
716 /**
717 * dp_update_num_elements_by_desc_type() - Update num of descriptors based
718 * on type and the corresponding ini configuration
719 * @cfg: prealloc related cfg params
720 * @desc_type: descriptor type
721 * @num_elements: num of descriptor elements
722 *
723 * Return: None
724 */
725 static void
dp_update_num_elements_by_desc_type(struct wlan_dp_prealloc_cfg * cfg,enum qdf_dp_desc_type desc_type,uint16_t * num_elements)726 dp_update_num_elements_by_desc_type(struct wlan_dp_prealloc_cfg *cfg,
727 enum qdf_dp_desc_type desc_type,
728 uint16_t *num_elements)
729 {
730 switch (desc_type) {
731 case QDF_DP_TX_DESC_TYPE:
732 *num_elements = cfg->num_tx_desc;
733 return;
734 case QDF_DP_TX_EXT_DESC_TYPE:
735 case QDF_DP_TX_EXT_DESC_LINK_TYPE:
736 case QDF_DP_TX_TSO_DESC_TYPE:
737 case QDF_DP_TX_TSO_NUM_SEG_TYPE:
738 *num_elements = cfg->num_tx_ext_desc;
739 return;
740 case QDF_DP_RX_DESC_BUF_TYPE:
741 *num_elements = cfg->num_rx_sw_desc * WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE;
742 return;
743 default:
744 return;
745 }
746 }
747
748 #ifdef WLAN_DP_PROFILE_SUPPORT
749 static void
wlan_dp_sync_prealloc_with_profile_cfg(struct wlan_dp_prealloc_cfg * cfg)750 wlan_dp_sync_prealloc_with_profile_cfg(struct wlan_dp_prealloc_cfg *cfg)
751 {
752 struct wlan_dp_memory_profile_info *profile_info;
753 struct wlan_dp_memory_profile_ctx *profile_ctx;
754 int i;
755
756 profile_info = wlan_dp_get_profile_info();
757 if (!profile_info->is_selected)
758 return;
759
760 for (i = 0; i < profile_info->size; i++) {
761 profile_ctx = &profile_info->ctx[i];
762
763 switch (profile_ctx->param_type) {
764 case DP_TX_DESC_NUM_CFG:
765 cfg->num_tx_desc = profile_ctx->size;
766 break;
767 case DP_TX_EXT_DESC_NUM_CFG:
768 cfg->num_tx_ext_desc = profile_ctx->size;
769 break;
770 case DP_TX_RING_SIZE_CFG:
771 cfg->num_tx_ring_entries = profile_ctx->size;
772 break;
773 case DP_TX_COMPL_RING_SIZE_CFG:
774 cfg->num_tx_comp_ring_entries = profile_ctx->size;
775 break;
776 case DP_RX_SW_DESC_NUM_CFG:
777 cfg->num_rx_sw_desc = profile_ctx->size;
778 break;
779 case DP_REO_DST_RING_SIZE_CFG:
780 cfg->num_reo_dst_ring_entries = profile_ctx->size;
781 break;
782 case DP_RXDMA_BUF_RING_SIZE_CFG:
783 cfg->num_rxdma_buf_ring_entries = profile_ctx->size;
784 break;
785 case DP_RXDMA_REFILL_RING_SIZE_CFG:
786 cfg->num_rxdma_refill_ring_entries = profile_ctx->size;
787 break;
788 default:
789 break;
790 }
791 }
792 }
793 #else
794
795 static inline void
wlan_dp_sync_prealloc_with_profile_cfg(struct wlan_dp_prealloc_cfg * cfg)796 wlan_dp_sync_prealloc_with_profile_cfg(struct wlan_dp_prealloc_cfg *cfg) {}
797 #endif
798
dp_prealloc_init(struct cdp_ctrl_objmgr_psoc * ctrl_psoc)799 QDF_STATUS dp_prealloc_init(struct cdp_ctrl_objmgr_psoc *ctrl_psoc)
800 {
801 int i;
802 struct dp_prealloc_context *cp;
803 struct dp_consistent_prealloc *p;
804 struct dp_multi_page_prealloc *mp;
805 struct dp_consistent_prealloc_unaligned *up;
806 qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
807 struct wlan_dp_prealloc_cfg cfg;
808
809 if (!qdf_ctx || !ctrl_psoc) {
810 QDF_BUG(0);
811 return QDF_STATUS_E_FAILURE;
812 }
813
814 wlan_cfg_get_prealloc_cfg(ctrl_psoc, &cfg);
815 wlan_dp_sync_prealloc_with_profile_cfg(&cfg);
816
817 /*Context pre-alloc*/
818 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_context_allocs); i++) {
819 cp = &g_dp_context_allocs[i];
820 dp_update_mem_size_by_ctx_type(&cfg, cp->ctxt_type,
821 &cp->size);
822 cp->addr = qdf_mem_malloc(cp->size);
823
824 if (qdf_unlikely(!cp->addr) && cp->is_critical) {
825 dp_warn("i %d: unable to preallocate %d bytes memory!",
826 i, cp->size);
827 break;
828 }
829 }
830
831 if (i != QDF_ARRAY_SIZE(g_dp_context_allocs)) {
832 dp_err("unable to allocate context memory!");
833 goto deinit;
834 }
835
836 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_allocs); i++) {
837 p = &g_dp_consistent_allocs[i];
838 p->in_use = 0;
839 dp_update_mem_size_by_ring_type(&cfg, p->ring_type, &p->size);
840 p->va_aligned =
841 qdf_aligned_mem_alloc_consistent(qdf_ctx,
842 &p->size,
843 &p->va_unaligned,
844 &p->pa_unaligned,
845 &p->pa_aligned,
846 DP_RING_BASE_ALIGN);
847 if (qdf_unlikely(!p->va_unaligned)) {
848 dp_warn("i %d: unable to preallocate %d bytes memory!",
849 i, p->size);
850 break;
851 }
852 dp_debug("i %d: va aligned %pK pa aligned %pK size %d",
853 i, p->va_aligned, (void *)p->pa_aligned, p->size);
854 }
855
856 if (i != QDF_ARRAY_SIZE(g_dp_consistent_allocs)) {
857 dp_err("unable to allocate consistent memory!");
858 goto deinit;
859 }
860
861 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
862 mp = &g_dp_multi_page_allocs[i];
863 mp->in_use = false;
864 dp_update_num_elements_by_desc_type(&cfg, mp->desc_type,
865 &mp->element_num);
866 if (mp->cacheable)
867 mp->pages.page_size = DP_BLOCKMEM_SIZE;
868
869 qdf_mem_multi_pages_alloc(qdf_ctx, &mp->pages,
870 mp->element_size,
871 mp->element_num,
872 0, mp->cacheable);
873 if (qdf_unlikely(!mp->pages.num_pages)) {
874 dp_warn("i %d: preallocate %d bytes multi-pages failed!",
875 i, (int)(mp->element_size * mp->element_num));
876 break;
877 }
878
879 mp->pages.is_mem_prealloc = true;
880 dp_info("i %d: cacheable_pages %pK dma_pages %pK num_pages %d",
881 i, mp->pages.cacheable_pages,
882 mp->pages.dma_pages,
883 mp->pages.num_pages);
884 }
885
886 if (i != QDF_ARRAY_SIZE(g_dp_multi_page_allocs)) {
887 dp_err("unable to allocate multi-pages memory!");
888 goto deinit;
889 }
890
891 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_unaligned_allocs); i++) {
892 up = &g_dp_consistent_unaligned_allocs[i];
893 up->in_use = 0;
894 up->va_unaligned = qdf_mem_alloc_consistent(qdf_ctx,
895 qdf_ctx->dev,
896 up->size,
897 &up->pa_unaligned);
898 if (qdf_unlikely(!up->va_unaligned)) {
899 dp_warn("i %d: fail to prealloc unaligned %d bytes!",
900 i, up->size);
901 break;
902 }
903 dp_info("i %d: va unalign %pK pa unalign %pK size %d",
904 i, up->va_unaligned,
905 (void *)up->pa_unaligned, up->size);
906 }
907
908 if (i != QDF_ARRAY_SIZE(g_dp_consistent_unaligned_allocs)) {
909 dp_info("unable to allocate unaligned memory!");
910 /*
911 * Only if unaligned memory prealloc fail, is deinit
912 * necessary for all other DP srng/multi-pages memory?
913 */
914 goto deinit;
915 }
916
917 return QDF_STATUS_SUCCESS;
918 deinit:
919 dp_prealloc_deinit();
920 return QDF_STATUS_E_FAILURE;
921 }
922
dp_prealloc_get_context_memory(uint32_t ctxt_type,qdf_size_t ctxt_size)923 void *dp_prealloc_get_context_memory(uint32_t ctxt_type, qdf_size_t ctxt_size)
924 {
925 int i;
926 struct dp_prealloc_context *cp;
927
928 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_context_allocs); i++) {
929 cp = &g_dp_context_allocs[i];
930
931 if ((ctxt_type == cp->ctxt_type) && !cp->in_use &&
932 cp->addr && ctxt_size <= cp->size) {
933 cp->in_use = true;
934 return cp->addr;
935 }
936 }
937
938 return NULL;
939 }
940
dp_prealloc_put_context_memory(uint32_t ctxt_type,void * vaddr)941 QDF_STATUS dp_prealloc_put_context_memory(uint32_t ctxt_type, void *vaddr)
942 {
943 int i;
944 struct dp_prealloc_context *cp;
945
946 if (!vaddr)
947 return QDF_STATUS_E_FAILURE;
948
949 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_context_allocs); i++) {
950 cp = &g_dp_context_allocs[i];
951
952 if ((ctxt_type == cp->ctxt_type) && vaddr == cp->addr) {
953 qdf_mem_zero(cp->addr, cp->size);
954 cp->in_use = false;
955 return QDF_STATUS_SUCCESS;
956 }
957 }
958
959 return QDF_STATUS_E_FAILURE;
960 }
961
dp_prealloc_get_coherent(uint32_t * size,void ** base_vaddr_unaligned,qdf_dma_addr_t * paddr_unaligned,qdf_dma_addr_t * paddr_aligned,uint32_t align,uint32_t ring_type)962 void *dp_prealloc_get_coherent(uint32_t *size, void **base_vaddr_unaligned,
963 qdf_dma_addr_t *paddr_unaligned,
964 qdf_dma_addr_t *paddr_aligned,
965 uint32_t align,
966 uint32_t ring_type)
967 {
968 int i;
969 struct dp_consistent_prealloc *p;
970 void *va_aligned = NULL;
971
972 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_allocs); i++) {
973 p = &g_dp_consistent_allocs[i];
974 if (p->ring_type == ring_type && !p->in_use &&
975 p->va_unaligned && *size <= p->size) {
976 p->in_use = 1;
977 *base_vaddr_unaligned = p->va_unaligned;
978 *paddr_unaligned = p->pa_unaligned;
979 *paddr_aligned = p->pa_aligned;
980 va_aligned = p->va_aligned;
981 *size = p->size;
982 dp_debug("index %i -> ring type %s va-aligned %pK", i,
983 dp_srng_get_str_from_hal_ring_type(ring_type),
984 va_aligned);
985 break;
986 }
987 }
988
989 if (i == QDF_ARRAY_SIZE(g_dp_consistent_allocs))
990 dp_info("unable to allocate memory for ring type %s (%d) size %d",
991 dp_srng_get_str_from_hal_ring_type(ring_type),
992 ring_type, *size);
993 return va_aligned;
994 }
995
dp_prealloc_put_coherent(qdf_size_t size,void * vaddr_unligned,qdf_dma_addr_t paddr)996 void dp_prealloc_put_coherent(qdf_size_t size, void *vaddr_unligned,
997 qdf_dma_addr_t paddr)
998 {
999 int i;
1000 struct dp_consistent_prealloc *p;
1001
1002 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_allocs); i++) {
1003 p = &g_dp_consistent_allocs[i];
1004 if (p->va_unaligned == vaddr_unligned) {
1005 dp_debug("index %d, returned", i);
1006 p->in_use = 0;
1007 qdf_mem_zero(p->va_unaligned, p->size);
1008 break;
1009 }
1010 }
1011
1012 if (i == QDF_ARRAY_SIZE(g_dp_consistent_allocs))
1013 dp_err("unable to find vaddr %pK", vaddr_unligned);
1014 }
1015
dp_prealloc_get_multi_pages(uint32_t desc_type,qdf_size_t element_size,uint16_t element_num,struct qdf_mem_multi_page_t * pages,bool cacheable)1016 void dp_prealloc_get_multi_pages(uint32_t desc_type,
1017 qdf_size_t element_size,
1018 uint16_t element_num,
1019 struct qdf_mem_multi_page_t *pages,
1020 bool cacheable)
1021 {
1022 int i;
1023 struct dp_multi_page_prealloc *mp;
1024
1025 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
1026 mp = &g_dp_multi_page_allocs[i];
1027
1028 if (desc_type == mp->desc_type && !mp->in_use &&
1029 mp->pages.num_pages && element_size == mp->element_size &&
1030 element_num <= mp->element_num) {
1031 mp->in_use = true;
1032 *pages = mp->pages;
1033
1034 dp_info("i %d: desc_type %d cacheable_pages %pK dma_pages %pK num_pages %d",
1035 i, desc_type,
1036 mp->pages.cacheable_pages,
1037 mp->pages.dma_pages,
1038 mp->pages.num_pages);
1039 break;
1040 }
1041 }
1042 }
1043
dp_prealloc_put_multi_pages(uint32_t desc_type,struct qdf_mem_multi_page_t * pages)1044 void dp_prealloc_put_multi_pages(uint32_t desc_type,
1045 struct qdf_mem_multi_page_t *pages)
1046 {
1047 int i;
1048 struct dp_multi_page_prealloc *mp;
1049 bool mp_found = false;
1050
1051 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
1052 mp = &g_dp_multi_page_allocs[i];
1053
1054 if (desc_type == mp->desc_type) {
1055 /* compare different address by cacheable flag */
1056 mp_found = mp->cacheable ?
1057 (mp->pages.cacheable_pages ==
1058 pages->cacheable_pages) :
1059 (mp->pages.dma_pages == pages->dma_pages);
1060 /* find it, put back to prealloc pool */
1061 if (mp_found) {
1062 dp_info("i %d: desc_type %d returned",
1063 i, desc_type);
1064 mp->in_use = false;
1065 qdf_mem_multi_pages_zero(&mp->pages,
1066 mp->cacheable);
1067 break;
1068 }
1069 }
1070 }
1071
1072 if (qdf_unlikely(!mp_found))
1073 dp_warn("Not prealloc pages %pK desc_type %d cacheable_pages %pK dma_pages %pK",
1074 pages,
1075 desc_type,
1076 pages->cacheable_pages,
1077 pages->dma_pages);
1078 }
1079
dp_prealloc_get_consistent_mem_unaligned(qdf_size_t size,qdf_dma_addr_t * base_addr,uint32_t ring_type)1080 void *dp_prealloc_get_consistent_mem_unaligned(qdf_size_t size,
1081 qdf_dma_addr_t *base_addr,
1082 uint32_t ring_type)
1083 {
1084 int i;
1085 struct dp_consistent_prealloc_unaligned *up;
1086
1087 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_unaligned_allocs); i++) {
1088 up = &g_dp_consistent_unaligned_allocs[i];
1089
1090 if (ring_type == up->ring_type && size == up->size &&
1091 up->va_unaligned && !up->in_use) {
1092 up->in_use = true;
1093 *base_addr = up->pa_unaligned;
1094 dp_info("i %d: va unalign %pK pa unalign %pK size %d",
1095 i, up->va_unaligned,
1096 (void *)up->pa_unaligned, up->size);
1097 return up->va_unaligned;
1098 }
1099 }
1100
1101 return NULL;
1102 }
1103
dp_prealloc_put_consistent_mem_unaligned(void * va_unaligned)1104 void dp_prealloc_put_consistent_mem_unaligned(void *va_unaligned)
1105 {
1106 int i;
1107 struct dp_consistent_prealloc_unaligned *up;
1108
1109 for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_unaligned_allocs); i++) {
1110 up = &g_dp_consistent_unaligned_allocs[i];
1111
1112 if (va_unaligned == up->va_unaligned) {
1113 dp_info("index %d, returned", i);
1114 up->in_use = false;
1115 qdf_mem_zero(up->va_unaligned, up->size);
1116 break;
1117 }
1118 }
1119
1120 if (i == QDF_ARRAY_SIZE(g_dp_consistent_unaligned_allocs))
1121 dp_err("unable to find vaddr %pK", va_unaligned);
1122 }
1123 #endif
1124
1125 #ifdef FEATURE_RUNTIME_PM
dp_get_tx_inqueue(ol_txrx_soc_handle soc)1126 uint32_t dp_get_tx_inqueue(ol_txrx_soc_handle soc)
1127 {
1128 struct dp_soc *dp_soc;
1129
1130 dp_soc = cdp_soc_t_to_dp_soc(soc);
1131
1132 return qdf_atomic_read(&dp_soc->tx_pending_rtpm);
1133 }
1134 #else
dp_get_tx_inqueue(ol_txrx_soc_handle soc)1135 uint32_t dp_get_tx_inqueue(ol_txrx_soc_handle soc)
1136 {
1137 return 0;
1138 }
1139 #endif
1140