1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef DP_TX_DESC_H
21 #define DP_TX_DESC_H
22
23 #include "dp_types.h"
24 #include "dp_tx.h"
25 #include "dp_internal.h"
26
27 /*
28 * 21 bits cookie
29 * 1 bit special pool indicator
30 * 3 bits unused
31 * 2 bits pool id 0 ~ 3,
32 * 10 bits page id 0 ~ 1023
33 * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
34 */
35 /* ???Ring ID needed??? */
36
37 /* TODO: Need to revisit this change for Rhine */
38 #ifdef WLAN_SOFTUMAC_SUPPORT
39 #define DP_TX_DESC_ID_SPCL_MASK 0x100000
40 #define DP_TX_DESC_ID_SPCL_OS 20
41 #define DP_TX_DESC_ID_POOL_MASK 0x018000
42 #define DP_TX_DESC_ID_POOL_OS 15
43 #define DP_TX_DESC_ID_PAGE_MASK 0x007FF0
44 #define DP_TX_DESC_ID_PAGE_OS 4
45 #define DP_TX_DESC_ID_OFFSET_MASK 0x00000F
46 #define DP_TX_DESC_ID_OFFSET_OS 0
47 #else
48 #define DP_TX_DESC_ID_SPCL_MASK 0x100000
49 #define DP_TX_DESC_ID_SPCL_OS 20
50 #define DP_TX_DESC_ID_POOL_MASK 0x018000
51 #define DP_TX_DESC_ID_POOL_OS 15
52 #define DP_TX_DESC_ID_PAGE_MASK 0x007FE0
53 #define DP_TX_DESC_ID_PAGE_OS 5
54 #define DP_TX_DESC_ID_OFFSET_MASK 0x00001F
55 #define DP_TX_DESC_ID_OFFSET_OS 0
56 #endif /* WLAN_SOFTUMAC_SUPPORT */
57
58 /*
59 * Compilation assert on tx desc size
60 *
61 * if assert is hit please update POOL_MASK,
62 * PAGE_MASK according to updated size
63 *
64 * for current PAGE mask allowed size range of tx_desc
65 * is between 128 and 256
66 */
67 QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
68 ((sizeof(struct dp_tx_desc_s)) <=
69 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
70 ((sizeof(struct dp_tx_desc_s)) >
71 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
72 );
73
74 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
75 #define TX_DESC_LOCK_CREATE(lock)
76 #define TX_DESC_LOCK_DESTROY(lock)
77 #define TX_DESC_LOCK_LOCK(lock)
78 #define TX_DESC_LOCK_UNLOCK(lock)
79 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
80 ((pool)->status == FLOW_POOL_INACTIVE)
81 #ifdef QCA_AC_BASED_FLOW_CONTROL
82 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
83 dp_tx_flow_pool_member_clean(_tx_desc_pool)
84
85 #else /* !QCA_AC_BASED_FLOW_CONTROL */
86 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
87 do { \
88 (_tx_desc_pool)->elem_size = 0; \
89 (_tx_desc_pool)->freelist = NULL; \
90 (_tx_desc_pool)->pool_size = 0; \
91 (_tx_desc_pool)->avail_desc = 0; \
92 (_tx_desc_pool)->start_th = 0; \
93 (_tx_desc_pool)->stop_th = 0; \
94 (_tx_desc_pool)->status = FLOW_POOL_INACTIVE; \
95 } while (0)
96 #endif /* QCA_AC_BASED_FLOW_CONTROL */
97 #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
98 #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
99 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
100 #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock_bh(lock)
101 #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock_bh(lock)
102 #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
103 #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool) \
104 do { \
105 (_tx_desc_pool)->elem_size = 0; \
106 (_tx_desc_pool)->num_allocated = 0; \
107 (_tx_desc_pool)->freelist = NULL; \
108 (_tx_desc_pool)->elem_count = 0; \
109 (_tx_desc_pool)->num_free = 0; \
110 } while (0)
111 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
112 #define MAX_POOL_BUFF_COUNT 10000
113
114 #ifdef DP_TX_TRACKING
dp_tx_desc_set_magic(struct dp_tx_desc_s * tx_desc,uint32_t magic_pattern)115 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
116 uint32_t magic_pattern)
117 {
118 tx_desc->magic = magic_pattern;
119 }
120 #else
dp_tx_desc_set_magic(struct dp_tx_desc_s * tx_desc,uint32_t magic_pattern)121 static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
122 uint32_t magic_pattern)
123 {
124 }
125 #endif
126
127 /**
128 * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
129 * @soc: Handle to DP SoC structure
130 * @pool_id: pool to allocate
131 * @num_elem: Number of descriptor elements per pool
132 * @spcl_tx_desc: if special desc
133 *
134 * This function allocates memory for SW tx descriptors
135 * (used within host for tx data path).
136 * The number of tx descriptors required will be large
137 * since based on number of clients (1024 clients x 3 radios),
138 * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
139 * large.
140 *
141 * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
142 * function to allocate memory
143 * in multiple pages. It then iterates through the memory allocated across pages
144 * and links each descriptor
145 * to next descriptor, taking care of page boundaries.
146 *
147 * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
148 * one for each ring;
149 * This minimizes lock contention when hard_start_xmit is called
150 * from multiple CPUs.
151 * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
152 * flow control.
153 *
154 * Return: Status code. 0 for success.
155 */
156 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
157 uint32_t num_elem, bool spcl_tx_desc);
158
159 /**
160 * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
161 * @soc: Handle to DP SoC structure
162 * @pool_id: pool to allocate
163 * @num_elem: Number of descriptor elements per pool
164 * @spcl_tx_desc: if special desc
165 *
166 * Return: QDF_STATUS_SUCCESS
167 * QDF_STATUS_E_FAULT
168 */
169 QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
170 uint32_t num_elem, bool spcl_tx_desc);
171
172 /**
173 * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
174 * @soc: Handle to DP SoC structure
175 * @pool_id: pool to free
176 * @spcl_tx_desc: if special desc
177 *
178 */
179 void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
180 bool spcl_tx_desc);
181
182 /**
183 * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
184 * @soc: Handle to DP SoC structure
185 * @pool_id: pool to de-initialize
186 * @spcl_tx_desc: if special desc
187 *
188 */
189 void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
190 bool spcl_tx_desc);
191
192 /**
193 * dp_tx_ext_desc_pool_alloc_by_id() - allocate TX extension Descriptor pool
194 * based on pool ID
195 * @soc: Handle to DP SoC structure
196 * @num_elem: Number of descriptor elements per pool
197 * @pool_id: Pool ID
198 *
199 * Return - QDF_STATUS_SUCCESS
200 * QDF_STATUS_E_NOMEM
201 */
202 QDF_STATUS dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc,
203 uint32_t num_elem,
204 uint8_t pool_id);
205 /**
206 * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
207 * @soc: Handle to DP SoC structure
208 * @num_pool: Number of pools to allocate
209 * @num_elem: Number of descriptor elements per pool
210 *
211 * Return: QDF_STATUS_SUCCESS
212 * QDF_STATUS_E_NOMEM
213 */
214 QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
215 uint32_t num_elem);
216
217 /**
218 * dp_tx_ext_desc_pool_init_by_id() - initialize Tx extension Descriptor pool
219 * based on pool ID
220 * @soc: Handle to DP SoC structure
221 * @num_elem: Number of descriptor elements per pool
222 * @pool_id: Pool ID
223 *
224 * Return - QDF_STATUS_SUCCESS
225 * QDF_STATUS_E_FAULT
226 */
227 QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
228 uint8_t pool_id);
229
230 /**
231 * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
232 * @soc: Handle to DP SoC structure
233 * @num_pool: Number of pools to initialize
234 * @num_elem: Number of descriptor elements per pool
235 *
236 * Return: QDF_STATUS_SUCCESS
237 * QDF_STATUS_E_NOMEM
238 */
239 QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
240 uint32_t num_elem);
241
242 /**
243 * dp_tx_ext_desc_pool_free_by_id() - free TX extension Descriptor pool
244 * based on pool ID
245 * @soc: Handle to DP SoC structure
246 * @pool_id: Pool ID
247 *
248 */
249 void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
250
251 /**
252 * dp_tx_ext_desc_pool_free() - free Tx extension Descriptor pool(s)
253 * @soc: Handle to DP SoC structure
254 * @num_pool: Number of pools to free
255 *
256 */
257 void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
258
259 /**
260 * dp_tx_ext_desc_pool_deinit_by_id() - deinit Tx extension Descriptor pool
261 * based on pool ID
262 * @soc: Handle to DP SoC structure
263 * @pool_id: Pool ID
264 *
265 */
266 void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
267
268 /**
269 * dp_tx_ext_desc_pool_deinit() - deinit Tx extension Descriptor pool(s)
270 * @soc: Handle to DP SoC structure
271 * @num_pool: Number of pools to de-initialize
272 *
273 */
274 void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
275
276 /**
277 * dp_tx_tso_desc_pool_alloc_by_id() - allocate TSO Descriptor pool based
278 * on pool ID
279 * @soc: Handle to DP SoC structure
280 * @num_elem: Number of descriptor elements per pool
281 * @pool_id: Pool ID
282 *
283 * Return - QDF_STATUS_SUCCESS
284 * QDF_STATUS_E_NOMEM
285 */
286 QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
287 uint8_t pool_id);
288
289 /**
290 * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
291 * @soc: Handle to DP SoC structure
292 * @num_pool: Number of pools to allocate
293 * @num_elem: Number of descriptor elements per pool
294 *
295 * Return: QDF_STATUS_SUCCESS
296 * QDF_STATUS_E_NOMEM
297 */
298 QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
299 uint32_t num_elem);
300
301 /**
302 * dp_tx_tso_desc_pool_init_by_id() - initialize TSO Descriptor pool
303 * based on pool ID
304 * @soc: Handle to DP SoC structure
305 * @num_elem: Number of descriptor elements per pool
306 * @pool_id: Pool ID
307 *
308 * Return - QDF_STATUS_SUCCESS
309 * QDF_STATUS_E_NOMEM
310 */
311 QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
312 uint8_t pool_id);
313
314 /**
315 * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
316 * @soc: Handle to DP SoC structure
317 * @num_pool: Number of pools to initialize
318 * @num_elem: Number of descriptor elements per pool
319 *
320 * Return: QDF_STATUS_SUCCESS
321 * QDF_STATUS_E_NOMEM
322 */
323 QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
324 uint32_t num_elem);
325
326 /**
327 * dp_tx_tso_desc_pool_free_by_id() - free TSO Descriptor pool based on pool ID
328 * @soc: Handle to DP SoC structure
329 * @pool_id: Pool ID
330 */
331 void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
332
333 /**
334 * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
335 * @soc: Handle to DP SoC structure
336 * @num_pool: Number of pools to free
337 *
338 */
339 void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
340
341 /**
342 * dp_tx_tso_desc_pool_deinit_by_id() - deinitialize TSO Descriptor pool
343 * based on pool ID
344 * @soc: Handle to DP SoC structure
345 * @pool_id: Pool ID
346 */
347 void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
348
349 /**
350 * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
351 * @soc: Handle to DP SoC structure
352 * @num_pool: Number of pools to free
353 *
354 */
355 void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
356
357 /**
358 * dp_tx_tso_num_seg_pool_alloc_by_id() - Allocate descriptors that tracks the
359 * fragments in each tso segment based on pool ID
360 * @soc: handle to dp soc structure
361 * @num_elem: total number of descriptors to be allocated
362 * @pool_id: Pool ID
363 *
364 * Return - QDF_STATUS_SUCCESS
365 * QDF_STATUS_E_NOMEM
366 */
367 QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
368 uint32_t num_elem,
369 uint8_t pool_id);
370
371 /**
372 * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
373 * fragments in each tso segment
374 *
375 * @soc: handle to dp soc structure
376 * @num_pool: number of pools to allocate
377 * @num_elem: total number of descriptors to be allocated
378 *
379 * Return: QDF_STATUS_SUCCESS
380 * QDF_STATUS_E_NOMEM
381 */
382 QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
383 uint32_t num_elem);
384
385 /**
386 * dp_tx_tso_num_seg_pool_init_by_id() - Initialize descriptors that tracks the
387 * fragments in each tso segment based on pool ID
388 *
389 * @soc: handle to dp soc structure
390 * @num_elem: total number of descriptors to be initialized
391 * @pool_id: Pool ID
392 *
393 * Return - QDF_STATUS_SUCCESS
394 * QDF_STATUS_E_FAULT
395 */
396 QDF_STATUS dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc,
397 uint32_t num_elem,
398 uint8_t pool_id);
399
400 /**
401 * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
402 * fragments in each tso segment
403 *
404 * @soc: handle to dp soc structure
405 * @num_pool: number of pools to initialize
406 * @num_elem: total number of descriptors to be initialized
407 *
408 * Return: QDF_STATUS_SUCCESS
409 * QDF_STATUS_E_FAULT
410 */
411 QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
412 uint32_t num_elem);
413
414 /**
415 * dp_tx_tso_num_seg_pool_free_by_id() - free descriptors that tracks the
416 * fragments in each tso segment based on pool ID
417 *
418 * @soc: handle to dp soc structure
419 * @pool_id: Pool ID
420 */
421 void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
422
423 /**
424 * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
425 * fragments in each tso segment
426 *
427 * @soc: handle to dp soc structure
428 * @num_pool: number of pools to free
429 */
430 void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool);
431
432 /**
433 * dp_tx_tso_num_seg_pool_deinit_by_id() - de-initialize descriptors that tracks
434 * the fragments in each tso segment based on pool ID
435 * @soc: handle to dp soc structure
436 * @pool_id: Pool ID
437 */
438 void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
439
440 /**
441 * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
442 * fragments in each tso segment
443 *
444 * @soc: handle to dp soc structure
445 * @num_pool: number of pools to de-initialize
446 *
447 * Return: QDF_STATUS_SUCCESS
448 * QDF_STATUS_E_FAULT
449 */
450 void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
451
452 #ifdef DP_UMAC_HW_RESET_SUPPORT
453 /**
454 * dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools
455 * @soc: Handle to DP SoC structure
456 * @nbuf_list: nbuf list for delayed free
457 * @cleanup: cleanup the pool
458 *
459 */
460 void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list,
461 bool cleanup);
462 #endif
463
464 /**
465 * dp_tx_desc_clear() - Clear contents of tx desc
466 * @tx_desc: descriptor to free
467 *
468 * Return: none
469 */
470 static inline void
dp_tx_desc_clear(struct dp_tx_desc_s * tx_desc)471 dp_tx_desc_clear(struct dp_tx_desc_s *tx_desc)
472 {
473 tx_desc->vdev_id = DP_INVALID_VDEV_ID;
474 tx_desc->nbuf = NULL;
475 tx_desc->flags = 0;
476 tx_desc->next = NULL;
477 }
478
479 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
480 void dp_tx_flow_control_init(struct dp_soc *);
481 void dp_tx_flow_control_deinit(struct dp_soc *);
482
483 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
484 tx_pause_callback pause_cb);
485 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
486 uint8_t vdev_id);
487 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
488 uint8_t vdev_id);
489 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
490 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
491 uint8_t flow_pool_id, uint32_t flow_pool_size);
492
493 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
494 uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
495 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
496 uint8_t flow_type, uint8_t flow_pool_id);
497
498 /**
499 * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
500 * @pool: flow pool
501 *
502 * Caller needs to take lock and do sanity checks.
503 *
504 * Return: tx descriptor
505 */
506 static inline
dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s * pool)507 struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
508 {
509 struct dp_tx_desc_s *tx_desc = pool->freelist;
510
511 pool->freelist = pool->freelist->next;
512 pool->avail_desc--;
513 return tx_desc;
514 }
515
516 /**
517 * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
518 * @pool: flow pool
519 * @tx_desc: tx descriptor
520 *
521 * Caller needs to take lock and do sanity checks.
522 *
523 * Return: none
524 */
525 static inline
dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s * pool,struct dp_tx_desc_s * tx_desc)526 void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
527 struct dp_tx_desc_s *tx_desc)
528 {
529 tx_desc->next = pool->freelist;
530 pool->freelist = tx_desc;
531 pool->avail_desc++;
532 }
533
534 static inline void
dp_tx_desc_free_list(struct dp_tx_desc_pool_s * pool,struct dp_tx_desc_s * head_desc,struct dp_tx_desc_s * tail_desc,uint32_t fast_desc_count)535 dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
536 struct dp_tx_desc_s *head_desc,
537 struct dp_tx_desc_s *tail_desc,
538 uint32_t fast_desc_count)
539 {
540 }
541
542 #ifdef QCA_AC_BASED_FLOW_CONTROL
543
544 /**
545 * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
546 * @pool: flow pool
547 *
548 * Return: None
549 */
550 static inline void
dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s * pool)551 dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
552 {
553 pool->elem_size = 0;
554 pool->freelist = NULL;
555 pool->pool_size = 0;
556 pool->avail_desc = 0;
557 qdf_mem_zero(pool->start_th, FL_TH_MAX);
558 qdf_mem_zero(pool->stop_th, FL_TH_MAX);
559 pool->status = FLOW_POOL_INACTIVE;
560 }
561
562 /**
563 * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
564 * @pool: flow pool
565 * @avail_desc: available descriptor number
566 *
567 * Return: true if threshold is met, false if not
568 */
569 static inline bool
dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s * pool,uint16_t avail_desc)570 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
571 {
572 if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
573 return true;
574 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
575 return true;
576 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
577 return true;
578 else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
579 return true;
580 else
581 return false;
582 }
583
584 /**
585 * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
586 * @soc: dp soc
587 * @pool: flow pool
588 */
589 static inline void
dp_tx_adjust_flow_pool_state(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool)590 dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
591 struct dp_tx_desc_pool_s *pool)
592 {
593 if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
594 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
595 return;
596 } else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
597 pool->avail_desc > pool->stop_th[DP_TH_VI]) {
598 pool->status = FLOW_POOL_BE_BK_PAUSED;
599 } else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
600 pool->avail_desc > pool->stop_th[DP_TH_VO]) {
601 pool->status = FLOW_POOL_VI_PAUSED;
602 } else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
603 pool->avail_desc > pool->stop_th[DP_TH_HI]) {
604 pool->status = FLOW_POOL_VO_PAUSED;
605 } else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
606 pool->status = FLOW_POOL_ACTIVE_PAUSED;
607 }
608
609 switch (pool->status) {
610 case FLOW_POOL_ACTIVE_PAUSED:
611 soc->pause_cb(pool->flow_pool_id,
612 WLAN_NETIF_PRIORITY_QUEUE_OFF,
613 WLAN_DATA_FLOW_CTRL_PRI);
614 fallthrough;
615
616 case FLOW_POOL_VO_PAUSED:
617 soc->pause_cb(pool->flow_pool_id,
618 WLAN_NETIF_VO_QUEUE_OFF,
619 WLAN_DATA_FLOW_CTRL_VO);
620 fallthrough;
621
622 case FLOW_POOL_VI_PAUSED:
623 soc->pause_cb(pool->flow_pool_id,
624 WLAN_NETIF_VI_QUEUE_OFF,
625 WLAN_DATA_FLOW_CTRL_VI);
626 fallthrough;
627
628 case FLOW_POOL_BE_BK_PAUSED:
629 soc->pause_cb(pool->flow_pool_id,
630 WLAN_NETIF_BE_BK_QUEUE_OFF,
631 WLAN_DATA_FLOW_CTRL_BE_BK);
632 break;
633 default:
634 dp_err("Invalid pool status:%u to adjust", pool->status);
635 }
636 }
637
638 /**
639 * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
640 * @soc: Handle to DP SoC structure
641 * @desc_pool_id: ID of the flow control fool
642 *
643 * Return: TX descriptor allocated or NULL
644 */
645 static inline struct dp_tx_desc_s *
dp_tx_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)646 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
647 {
648 struct dp_tx_desc_s *tx_desc = NULL;
649 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
650 bool is_pause = false;
651 enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
652 enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
653 enum netif_reason_type reason;
654
655 if (qdf_likely(pool)) {
656 qdf_spin_lock_bh(&pool->flow_pool_lock);
657 if (qdf_likely(pool->avail_desc &&
658 pool->status != FLOW_POOL_INVALID &&
659 pool->status != FLOW_POOL_INACTIVE)) {
660 tx_desc = dp_tx_get_desc_flow_pool(pool);
661 tx_desc->pool_id = desc_pool_id;
662 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
663 dp_tx_desc_set_magic(tx_desc,
664 DP_TX_MAGIC_PATTERN_INUSE);
665 is_pause = dp_tx_is_threshold_reached(pool,
666 pool->avail_desc);
667
668 if (qdf_unlikely(pool->status ==
669 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
670 dp_tx_adjust_flow_pool_state(soc, pool);
671 is_pause = false;
672 }
673
674 if (qdf_unlikely(is_pause)) {
675 switch (pool->status) {
676 case FLOW_POOL_ACTIVE_UNPAUSED:
677 /* pause network BE\BK queue */
678 act = WLAN_NETIF_BE_BK_QUEUE_OFF;
679 reason = WLAN_DATA_FLOW_CTRL_BE_BK;
680 level = DP_TH_BE_BK;
681 pool->status = FLOW_POOL_BE_BK_PAUSED;
682 break;
683 case FLOW_POOL_BE_BK_PAUSED:
684 /* pause network VI queue */
685 act = WLAN_NETIF_VI_QUEUE_OFF;
686 reason = WLAN_DATA_FLOW_CTRL_VI;
687 level = DP_TH_VI;
688 pool->status = FLOW_POOL_VI_PAUSED;
689 break;
690 case FLOW_POOL_VI_PAUSED:
691 /* pause network VO queue */
692 act = WLAN_NETIF_VO_QUEUE_OFF;
693 reason = WLAN_DATA_FLOW_CTRL_VO;
694 level = DP_TH_VO;
695 pool->status = FLOW_POOL_VO_PAUSED;
696 break;
697 case FLOW_POOL_VO_PAUSED:
698 /* pause network HI PRI queue */
699 act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
700 reason = WLAN_DATA_FLOW_CTRL_PRI;
701 level = DP_TH_HI;
702 pool->status = FLOW_POOL_ACTIVE_PAUSED;
703 break;
704 case FLOW_POOL_ACTIVE_PAUSED:
705 act = WLAN_NETIF_ACTION_TYPE_NONE;
706 break;
707 default:
708 dp_err_rl("pool status is %d!",
709 pool->status);
710 break;
711 }
712
713 if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
714 pool->latest_pause_time[level] =
715 qdf_get_system_timestamp();
716 soc->pause_cb(desc_pool_id,
717 act,
718 reason);
719 }
720 }
721 } else {
722 pool->pkt_drop_no_desc++;
723 }
724 qdf_spin_unlock_bh(&pool->flow_pool_lock);
725 } else {
726 dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
727 soc->pool_stats.pkt_drop_no_pool++;
728 }
729
730 return tx_desc;
731 }
732
733 /**
734 * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
735 * @soc: Handle to DP SoC structure
736 * @tx_desc: the tx descriptor to be freed
737 * @desc_pool_id: ID of the flow control pool
738 *
739 * Return: None
740 */
741 static inline void
dp_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)742 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
743 uint8_t desc_pool_id)
744 {
745 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
746 qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
747 enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
748 enum netif_reason_type reason;
749
750 qdf_spin_lock_bh(&pool->flow_pool_lock);
751 tx_desc->vdev_id = DP_INVALID_VDEV_ID;
752 tx_desc->nbuf = NULL;
753 tx_desc->flags = 0;
754 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
755 dp_tx_put_desc_flow_pool(pool, tx_desc);
756 switch (pool->status) {
757 case FLOW_POOL_ACTIVE_PAUSED:
758 if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
759 act = WLAN_NETIF_PRIORITY_QUEUE_ON;
760 reason = WLAN_DATA_FLOW_CTRL_PRI;
761 pool->status = FLOW_POOL_VO_PAUSED;
762
763 /* Update maximum pause duration for HI queue */
764 pause_dur = unpause_time -
765 pool->latest_pause_time[DP_TH_HI];
766 if (pool->max_pause_time[DP_TH_HI] < pause_dur)
767 pool->max_pause_time[DP_TH_HI] = pause_dur;
768 }
769 break;
770 case FLOW_POOL_VO_PAUSED:
771 if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
772 act = WLAN_NETIF_VO_QUEUE_ON;
773 reason = WLAN_DATA_FLOW_CTRL_VO;
774 pool->status = FLOW_POOL_VI_PAUSED;
775
776 /* Update maximum pause duration for VO queue */
777 pause_dur = unpause_time -
778 pool->latest_pause_time[DP_TH_VO];
779 if (pool->max_pause_time[DP_TH_VO] < pause_dur)
780 pool->max_pause_time[DP_TH_VO] = pause_dur;
781 }
782 break;
783 case FLOW_POOL_VI_PAUSED:
784 if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
785 act = WLAN_NETIF_VI_QUEUE_ON;
786 reason = WLAN_DATA_FLOW_CTRL_VI;
787 pool->status = FLOW_POOL_BE_BK_PAUSED;
788
789 /* Update maximum pause duration for VI queue */
790 pause_dur = unpause_time -
791 pool->latest_pause_time[DP_TH_VI];
792 if (pool->max_pause_time[DP_TH_VI] < pause_dur)
793 pool->max_pause_time[DP_TH_VI] = pause_dur;
794 }
795 break;
796 case FLOW_POOL_BE_BK_PAUSED:
797 if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
798 act = WLAN_NETIF_BE_BK_QUEUE_ON;
799 reason = WLAN_DATA_FLOW_CTRL_BE_BK;
800 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
801
802 /* Update maximum pause duration for BE_BK queue */
803 pause_dur = unpause_time -
804 pool->latest_pause_time[DP_TH_BE_BK];
805 if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
806 pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
807 }
808 break;
809 case FLOW_POOL_INVALID:
810 if (pool->avail_desc == pool->pool_size) {
811 dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
812 dp_tx_desc_pool_free(soc, desc_pool_id, false);
813 qdf_spin_unlock_bh(&pool->flow_pool_lock);
814 dp_err_rl("pool %d is freed!!", desc_pool_id);
815 return;
816 }
817 break;
818
819 case FLOW_POOL_ACTIVE_UNPAUSED:
820 break;
821
822 case FLOW_POOL_ACTIVE_UNPAUSED_REATTACH:
823 fallthrough;
824 default:
825 dp_err_rl("pool %d status: %d",
826 desc_pool_id, pool->status);
827 break;
828 };
829
830 if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
831 soc->pause_cb(pool->flow_pool_id,
832 act, reason);
833 qdf_spin_unlock_bh(&pool->flow_pool_lock);
834 }
835
836 static inline void
dp_tx_spcl_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)837 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
838 uint8_t desc_pool_id)
839 {
840 }
841
dp_tx_spcl_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)842 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
843 uint8_t desc_pool_id)
844 {
845 return NULL;
846 }
847 #else /* QCA_AC_BASED_FLOW_CONTROL */
848
849 static inline bool
dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s * pool,uint16_t avail_desc)850 dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
851 {
852 if (qdf_unlikely(avail_desc < pool->stop_th))
853 return true;
854 else
855 return false;
856 }
857
858 /**
859 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
860 * @soc: Handle to DP SoC structure
861 * @desc_pool_id:
862 *
863 * Return: Tx descriptor or NULL
864 */
865 static inline struct dp_tx_desc_s *
dp_tx_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)866 dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
867 {
868 struct dp_tx_desc_s *tx_desc = NULL;
869 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
870
871 if (pool) {
872 qdf_spin_lock_bh(&pool->flow_pool_lock);
873 if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
874 pool->avail_desc) {
875 tx_desc = dp_tx_get_desc_flow_pool(pool);
876 tx_desc->pool_id = desc_pool_id;
877 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
878 dp_tx_desc_set_magic(tx_desc,
879 DP_TX_MAGIC_PATTERN_INUSE);
880 if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
881 pool->status = FLOW_POOL_ACTIVE_PAUSED;
882 qdf_spin_unlock_bh(&pool->flow_pool_lock);
883 /* pause network queues */
884 soc->pause_cb(desc_pool_id,
885 WLAN_STOP_ALL_NETIF_QUEUE,
886 WLAN_DATA_FLOW_CONTROL);
887 } else {
888 qdf_spin_unlock_bh(&pool->flow_pool_lock);
889 }
890 } else {
891 pool->pkt_drop_no_desc++;
892 qdf_spin_unlock_bh(&pool->flow_pool_lock);
893 }
894 } else {
895 soc->pool_stats.pkt_drop_no_pool++;
896 }
897
898 return tx_desc;
899 }
900
dp_tx_spcl_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)901 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
902 uint8_t desc_pool_id)
903 {
904 return NULL;
905 }
906 /**
907 * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
908 * @soc: Handle to DP SoC structure
909 * @tx_desc: Descriptor to free
910 * @desc_pool_id: Descriptor pool Id
911 *
912 * Return: None
913 */
914 static inline void
dp_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)915 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
916 uint8_t desc_pool_id)
917 {
918 struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
919
920 qdf_spin_lock_bh(&pool->flow_pool_lock);
921 tx_desc->vdev_id = DP_INVALID_VDEV_ID;
922 tx_desc->nbuf = NULL;
923 tx_desc->flags = 0;
924 dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
925 dp_tx_put_desc_flow_pool(pool, tx_desc);
926 switch (pool->status) {
927 case FLOW_POOL_ACTIVE_PAUSED:
928 if (pool->avail_desc > pool->start_th) {
929 soc->pause_cb(pool->flow_pool_id,
930 WLAN_WAKE_ALL_NETIF_QUEUE,
931 WLAN_DATA_FLOW_CONTROL);
932 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
933 }
934 break;
935 case FLOW_POOL_INVALID:
936 if (pool->avail_desc == pool->pool_size) {
937 dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
938 dp_tx_desc_pool_free(soc, desc_pool_id, false);
939 qdf_spin_unlock_bh(&pool->flow_pool_lock);
940 qdf_print("%s %d pool is freed!!",
941 __func__, __LINE__);
942 return;
943 }
944 break;
945
946 case FLOW_POOL_ACTIVE_UNPAUSED:
947 break;
948 default:
949 qdf_print("%s %d pool is INACTIVE State!!",
950 __func__, __LINE__);
951 break;
952 };
953
954 qdf_spin_unlock_bh(&pool->flow_pool_lock);
955 }
956
957 static inline void
dp_tx_spcl_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)958 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
959 uint8_t desc_pool_id)
960 {
961 }
962 #endif /* QCA_AC_BASED_FLOW_CONTROL */
963
964 static inline bool
dp_tx_desc_thresh_reached(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)965 dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
966 {
967 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
968 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
969 DP_MOD_ID_CDP);
970 struct dp_tx_desc_pool_s *pool;
971 bool status;
972
973 if (!vdev)
974 return false;
975
976 pool = vdev->pool;
977 status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
978 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
979
980 return status;
981 }
982 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
983
dp_tx_flow_control_init(struct dp_soc * handle)984 static inline void dp_tx_flow_control_init(struct dp_soc *handle)
985 {
986 }
987
dp_tx_flow_control_deinit(struct dp_soc * handle)988 static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
989 {
990 }
991
dp_tx_flow_pool_map_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint32_t flow_pool_size)992 static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
993 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
994 uint32_t flow_pool_size)
995 {
996 return QDF_STATUS_SUCCESS;
997 }
998
dp_tx_flow_pool_unmap_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)999 static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
1000 uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
1001 {
1002 }
1003
1004 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
1005 static inline
dp_tx_prefetch_desc(struct dp_tx_desc_s * tx_desc)1006 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1007 {
1008 if (tx_desc)
1009 prefetch(tx_desc);
1010 }
1011 #else
1012 static inline
dp_tx_prefetch_desc(struct dp_tx_desc_s * tx_desc)1013 void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1014 {
1015 }
1016 #endif
1017
1018 /**
1019 * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
1020 * @soc: Handle to DP SoC structure
1021 * @desc_pool_id: pool id
1022 *
1023 * Return: Tx Descriptor or NULL
1024 */
dp_tx_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)1025 static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
1026 uint8_t desc_pool_id)
1027 {
1028 struct dp_tx_desc_s *tx_desc = NULL;
1029 struct dp_tx_desc_pool_s *pool = NULL;
1030
1031 pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1032
1033 TX_DESC_LOCK_LOCK(&pool->lock);
1034
1035 tx_desc = pool->freelist;
1036
1037 /* Pool is exhausted */
1038 if (!tx_desc) {
1039 TX_DESC_LOCK_UNLOCK(&pool->lock);
1040 return NULL;
1041 }
1042
1043 pool->freelist = pool->freelist->next;
1044 pool->num_allocated++;
1045 pool->num_free--;
1046 dp_tx_prefetch_desc(pool->freelist);
1047
1048 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1049
1050 TX_DESC_LOCK_UNLOCK(&pool->lock);
1051
1052 return tx_desc;
1053 }
1054
dp_tx_spcl_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)1055 static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
1056 uint8_t desc_pool_id)
1057 {
1058 struct dp_tx_desc_s *tx_desc = NULL;
1059 struct dp_tx_desc_pool_s *pool = NULL;
1060
1061 pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1062
1063 TX_DESC_LOCK_LOCK(&pool->lock);
1064
1065 tx_desc = pool->freelist;
1066
1067 /* Pool is exhausted */
1068 if (!tx_desc) {
1069 TX_DESC_LOCK_UNLOCK(&pool->lock);
1070 return NULL;
1071 }
1072
1073 pool->freelist = pool->freelist->next;
1074 pool->num_allocated++;
1075 pool->num_free--;
1076 dp_tx_prefetch_desc(pool->freelist);
1077
1078 tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1079 tx_desc->flags |= DP_TX_DESC_FLAG_SPECIAL;
1080
1081 TX_DESC_LOCK_UNLOCK(&pool->lock);
1082
1083 return tx_desc;
1084 }
1085
1086 /**
1087 * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
1088 * from given pool
1089 * @soc: Handle to DP SoC structure
1090 * @desc_pool_id: pool id should pick up
1091 * @num_requested: number of required descriptor
1092 *
1093 * allocate multiple tx descriptor and make a link
1094 *
1095 * Return: first descriptor pointer or NULL
1096 */
dp_tx_desc_alloc_multiple(struct dp_soc * soc,uint8_t desc_pool_id,uint8_t num_requested)1097 static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
1098 struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
1099 {
1100 struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
1101 uint8_t count;
1102 struct dp_tx_desc_pool_s *pool = NULL;
1103
1104 pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1105
1106 TX_DESC_LOCK_LOCK(&pool->lock);
1107
1108 if ((num_requested == 0) ||
1109 (pool->num_free < num_requested)) {
1110 TX_DESC_LOCK_UNLOCK(&pool->lock);
1111 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1112 "%s, No Free Desc: Available(%d) num_requested(%d)",
1113 __func__, pool->num_free,
1114 num_requested);
1115 return NULL;
1116 }
1117
1118 h_desc = pool->freelist;
1119
1120 /* h_desc should never be NULL since num_free > requested */
1121 qdf_assert_always(h_desc);
1122
1123 c_desc = h_desc;
1124 for (count = 0; count < (num_requested - 1); count++) {
1125 c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1126 c_desc = c_desc->next;
1127 }
1128 pool->num_free -= count;
1129 pool->num_allocated += count;
1130 pool->freelist = c_desc->next;
1131 c_desc->next = NULL;
1132
1133 TX_DESC_LOCK_UNLOCK(&pool->lock);
1134 return h_desc;
1135 }
1136
1137 /**
1138 * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
1139 * @soc: Handle to DP SoC structure
1140 * @tx_desc: descriptor to free
1141 * @desc_pool_id: ID of the free pool
1142 */
1143 static inline void
dp_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)1144 dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1145 uint8_t desc_pool_id)
1146 {
1147 struct dp_tx_desc_pool_s *pool = NULL;
1148
1149 dp_tx_desc_clear(tx_desc);
1150 pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1151 TX_DESC_LOCK_LOCK(&pool->lock);
1152 tx_desc->next = pool->freelist;
1153 pool->freelist = tx_desc;
1154 pool->num_allocated--;
1155 pool->num_free++;
1156 TX_DESC_LOCK_UNLOCK(&pool->lock);
1157 }
1158
1159 static inline void
dp_tx_spcl_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)1160 dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1161 uint8_t desc_pool_id)
1162 {
1163 struct dp_tx_desc_pool_s *pool = NULL;
1164
1165 dp_tx_desc_clear(tx_desc);
1166
1167 pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1168 TX_DESC_LOCK_LOCK(&pool->lock);
1169 tx_desc->next = pool->freelist;
1170 pool->freelist = tx_desc;
1171 pool->num_allocated--;
1172 pool->num_free++;
1173 TX_DESC_LOCK_UNLOCK(&pool->lock);
1174 }
1175
1176 static inline void
dp_tx_desc_free_list(struct dp_tx_desc_pool_s * pool,struct dp_tx_desc_s * head_desc,struct dp_tx_desc_s * tail_desc,uint32_t fast_desc_count)1177 dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
1178 struct dp_tx_desc_s *head_desc,
1179 struct dp_tx_desc_s *tail_desc,
1180 uint32_t fast_desc_count)
1181 {
1182 TX_DESC_LOCK_LOCK(&pool->lock);
1183 pool->num_allocated -= fast_desc_count;
1184 pool->num_free += fast_desc_count;
1185 tail_desc->next = pool->freelist;
1186 pool->freelist = head_desc;
1187 TX_DESC_LOCK_UNLOCK(&pool->lock);
1188 }
1189
1190 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1191
1192 #ifdef QCA_DP_TX_DESC_ID_CHECK
1193 /**
1194 * dp_tx_is_desc_id_valid() - check is the tx desc id valid
1195 * @soc: Handle to DP SoC structure
1196 * @tx_desc_id:
1197 *
1198 * Return: true or false
1199 */
1200 static inline bool
dp_tx_is_desc_id_valid(struct dp_soc * soc,uint32_t tx_desc_id)1201 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1202 {
1203 uint8_t pool_id;
1204 uint16_t page_id, offset;
1205 struct dp_tx_desc_pool_s *pool;
1206
1207 pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
1208 DP_TX_DESC_ID_POOL_OS;
1209 /* Pool ID is out of limit */
1210 if (pool_id > wlan_cfg_get_num_tx_desc_pool(
1211 soc->wlan_cfg_ctx)) {
1212 QDF_TRACE(QDF_MODULE_ID_DP,
1213 QDF_TRACE_LEVEL_FATAL,
1214 "%s:Tx Comp pool id %d not valid",
1215 __func__,
1216 pool_id);
1217 goto warn_exit;
1218 }
1219
1220 pool = &soc->tx_desc[pool_id];
1221 /* the pool is freed */
1222 if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
1223 QDF_TRACE(QDF_MODULE_ID_DP,
1224 QDF_TRACE_LEVEL_FATAL,
1225 "%s:the pool %d has been freed",
1226 __func__,
1227 pool_id);
1228 goto warn_exit;
1229 }
1230
1231 page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
1232 DP_TX_DESC_ID_PAGE_OS;
1233 /* the page id is out of limit */
1234 if (page_id >= pool->desc_pages.num_pages) {
1235 QDF_TRACE(QDF_MODULE_ID_DP,
1236 QDF_TRACE_LEVEL_FATAL,
1237 "%s:the page id %d invalid, pool id %d, num_page %d",
1238 __func__,
1239 page_id,
1240 pool_id,
1241 pool->desc_pages.num_pages);
1242 goto warn_exit;
1243 }
1244
1245 offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
1246 DP_TX_DESC_ID_OFFSET_OS;
1247 /* the offset is out of limit */
1248 if (offset >= pool->desc_pages.num_element_per_page) {
1249 QDF_TRACE(QDF_MODULE_ID_DP,
1250 QDF_TRACE_LEVEL_FATAL,
1251 "%s:offset %d invalid, pool%d,num_elem_per_page %d",
1252 __func__,
1253 offset,
1254 pool_id,
1255 pool->desc_pages.num_element_per_page);
1256 goto warn_exit;
1257 }
1258
1259 return true;
1260
1261 warn_exit:
1262 QDF_TRACE(QDF_MODULE_ID_DP,
1263 QDF_TRACE_LEVEL_FATAL,
1264 "%s:Tx desc id 0x%x not valid",
1265 __func__,
1266 tx_desc_id);
1267 qdf_assert_always(0);
1268 return false;
1269 }
1270
1271 #else
1272 static inline bool
dp_tx_is_desc_id_valid(struct dp_soc * soc,uint32_t tx_desc_id)1273 dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1274 {
1275 return true;
1276 }
1277 #endif /* QCA_DP_TX_DESC_ID_CHECK */
1278
1279 #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
dp_tx_desc_update_fast_comp_flag(struct dp_soc * soc,struct dp_tx_desc_s * desc,uint8_t allow_fast_comp)1280 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1281 struct dp_tx_desc_s *desc,
1282 uint8_t allow_fast_comp)
1283 {
1284 if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
1285 qdf_likely(allow_fast_comp))
1286 desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1287
1288 if (qdf_likely(desc->nbuf->is_from_recycler) &&
1289 qdf_likely(desc->nbuf->fast_xmit))
1290 desc->flags |= DP_TX_DESC_FLAG_FAST;
1291 }
1292
1293 #else
dp_tx_desc_update_fast_comp_flag(struct dp_soc * soc,struct dp_tx_desc_s * desc,uint8_t allow_fast_comp)1294 static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1295 struct dp_tx_desc_s *desc,
1296 uint8_t allow_fast_comp)
1297 {
1298 }
1299 #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
1300
1301 /**
1302 * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset
1303 * @soc: handle for the device sending the data
1304 * @pool_id: pool id
1305 * @page_id: page id
1306 * @offset: offset from base address
1307 * @spcl_pool: bit to indicate if this is a special pool
1308 *
1309 * Use page and offset to find the corresponding descriptor object in
1310 * the given descriptor pool.
1311 *
1312 * Return: the descriptor object that has the specified ID
1313 */
1314 static inline
dp_tx_desc_find(struct dp_soc * soc,uint8_t pool_id,uint16_t page_id,uint16_t offset,bool spcl_pool)1315 struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
1316 uint8_t pool_id, uint16_t page_id,
1317 uint16_t offset, bool spcl_pool)
1318 {
1319 struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
1320
1321 tx_desc_pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, pool_id) :
1322 dp_get_tx_desc_pool(soc, pool_id);
1323
1324 return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
1325 tx_desc_pool->elem_size * offset;
1326 }
1327
1328 /**
1329 * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
1330 * @soc: handle for the device sending the data
1331 * @desc_pool_id: target pool id
1332 *
1333 * Return: None
1334 */
1335 static inline
dp_tx_ext_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)1336 struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
1337 uint8_t desc_pool_id)
1338 {
1339 struct dp_tx_ext_desc_elem_s *c_elem;
1340
1341 desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1342 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1343 if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
1344 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1345 return NULL;
1346 }
1347 c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
1348 soc->tx_ext_desc[desc_pool_id].freelist =
1349 soc->tx_ext_desc[desc_pool_id].freelist->next;
1350 soc->tx_ext_desc[desc_pool_id].num_free--;
1351 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1352 return c_elem;
1353 }
1354
1355 /**
1356 * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
1357 * @soc: handle for the device sending the data
1358 * @elem: ext descriptor pointer should release
1359 * @desc_pool_id: target pool id
1360 *
1361 * Return: None
1362 */
dp_tx_ext_desc_free(struct dp_soc * soc,struct dp_tx_ext_desc_elem_s * elem,uint8_t desc_pool_id)1363 static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
1364 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
1365 {
1366 desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1367 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1368 elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
1369 soc->tx_ext_desc[desc_pool_id].freelist = elem;
1370 soc->tx_ext_desc[desc_pool_id].num_free++;
1371 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1372 return;
1373 }
1374
1375 /**
1376 * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and
1377 * attach it to free list
1378 * @soc: Handle to DP SoC structure
1379 * @desc_pool_id: pool id should pick up
1380 * @elem: tx descriptor should be freed
1381 * @num_free: number of descriptors should be freed
1382 *
1383 * Return: none
1384 */
dp_tx_ext_desc_free_multiple(struct dp_soc * soc,struct dp_tx_ext_desc_elem_s * elem,uint8_t desc_pool_id,uint8_t num_free)1385 static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
1386 struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
1387 uint8_t num_free)
1388 {
1389 struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
1390 uint8_t freed = num_free;
1391
1392 /* caller should always guarantee atleast list of num_free nodes */
1393 qdf_assert_always(elem);
1394
1395 head = elem;
1396 c_elem = head;
1397 tail = head;
1398 while (c_elem && freed) {
1399 tail = c_elem;
1400 c_elem = c_elem->next;
1401 freed--;
1402 }
1403
1404 /* caller should always guarantee atleast list of num_free nodes */
1405 qdf_assert_always(tail);
1406
1407 desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1408 qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1409 tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
1410 soc->tx_ext_desc[desc_pool_id].freelist = head;
1411 soc->tx_ext_desc[desc_pool_id].num_free += num_free;
1412 qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1413
1414 return;
1415 }
1416
1417 #if defined(FEATURE_TSO)
1418 /**
1419 * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
1420 * @soc: device soc instance
1421 * @pool_id: pool id should pick up tso descriptor
1422 *
1423 * Allocates a TSO segment element from the free list held in
1424 * the soc
1425 *
1426 * Return: tso_seg, tso segment memory pointer
1427 */
dp_tx_tso_desc_alloc(struct dp_soc * soc,uint8_t pool_id)1428 static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
1429 struct dp_soc *soc, uint8_t pool_id)
1430 {
1431 struct qdf_tso_seg_elem_t *tso_seg = NULL;
1432
1433 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1434 if (soc->tx_tso_desc[pool_id].freelist) {
1435 soc->tx_tso_desc[pool_id].num_free--;
1436 tso_seg = soc->tx_tso_desc[pool_id].freelist;
1437 soc->tx_tso_desc[pool_id].freelist =
1438 soc->tx_tso_desc[pool_id].freelist->next;
1439 }
1440 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1441
1442 return tso_seg;
1443 }
1444
1445 /**
1446 * dp_tx_tso_desc_free() - function to free a TSO segment
1447 * @soc: device soc instance
1448 * @pool_id: pool id should pick up tso descriptor
1449 * @tso_seg: tso segment memory pointer
1450 *
1451 * Returns a TSO segment element to the free list held in the
1452 * HTT pdev
1453 *
1454 * Return: none
1455 */
dp_tx_tso_desc_free(struct dp_soc * soc,uint8_t pool_id,struct qdf_tso_seg_elem_t * tso_seg)1456 static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1457 uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1458 {
1459 qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1460 tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1461 soc->tx_tso_desc[pool_id].freelist = tso_seg;
1462 soc->tx_tso_desc[pool_id].num_free++;
1463 qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1464 }
1465
1466 static inline
dp_tso_num_seg_alloc(struct dp_soc * soc,uint8_t pool_id)1467 struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
1468 uint8_t pool_id)
1469 {
1470 struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1471
1472 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1473 if (soc->tx_tso_num_seg[pool_id].freelist) {
1474 soc->tx_tso_num_seg[pool_id].num_free--;
1475 tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1476 soc->tx_tso_num_seg[pool_id].freelist =
1477 soc->tx_tso_num_seg[pool_id].freelist->next;
1478 }
1479 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1480
1481 return tso_num_seg;
1482 }
1483
1484 static inline
dp_tso_num_seg_free(struct dp_soc * soc,uint8_t pool_id,struct qdf_tso_num_seg_elem_t * tso_num_seg)1485 void dp_tso_num_seg_free(struct dp_soc *soc,
1486 uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1487 {
1488 qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1489 tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1490 soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1491 soc->tx_tso_num_seg[pool_id].num_free++;
1492 qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1493 }
1494 #endif
1495
1496 /**
1497 * dp_tx_me_alloc_buf() - Alloc descriptor from me pool
1498 * @pdev: DP_PDEV handle for datapath
1499 *
1500 * Return: tx descriptor on success, NULL on error
1501 */
1502 static inline struct dp_tx_me_buf_t*
dp_tx_me_alloc_buf(struct dp_pdev * pdev)1503 dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1504 {
1505 struct dp_tx_me_buf_t *buf = NULL;
1506 qdf_spin_lock_bh(&pdev->tx_mutex);
1507 if (pdev->me_buf.freelist) {
1508 buf = pdev->me_buf.freelist;
1509 pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1510 pdev->me_buf.buf_in_use++;
1511 } else {
1512 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1513 "Error allocating memory in pool");
1514 qdf_spin_unlock_bh(&pdev->tx_mutex);
1515 return NULL;
1516 }
1517 qdf_spin_unlock_bh(&pdev->tx_mutex);
1518 return buf;
1519 }
1520
1521 /**
1522 * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1523 * address, free me descriptor and add it to the free-pool
1524 * @pdev: DP_PDEV handle for datapath
1525 * @buf : Allocated ME BUF
1526 *
1527 * Return:void
1528 */
1529 static inline void
dp_tx_me_free_buf(struct dp_pdev * pdev,struct dp_tx_me_buf_t * buf)1530 dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1531 {
1532 /*
1533 * If the buf containing mac address was mapped,
1534 * it must be unmapped before freeing the me_buf.
1535 * The "paddr_macbuf" member in the me_buf structure
1536 * holds the mapped physical address and it must be
1537 * set to 0 after unmapping.
1538 */
1539 if (buf->paddr_macbuf) {
1540 qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1541 buf->paddr_macbuf,
1542 QDF_DMA_TO_DEVICE,
1543 QDF_MAC_ADDR_SIZE);
1544 buf->paddr_macbuf = 0;
1545 }
1546 qdf_spin_lock_bh(&pdev->tx_mutex);
1547 buf->next = pdev->me_buf.freelist;
1548 pdev->me_buf.freelist = buf;
1549 pdev->me_buf.buf_in_use--;
1550 qdf_spin_unlock_bh(&pdev->tx_mutex);
1551 }
1552 #endif /* DP_TX_DESC_H */
1553