1 /*
2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <cds_api.h>
21
22 /* OS abstraction libraries */
23 #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
24 #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
25 #include <qdf_util.h> /* qdf_unlikely */
26 #include "dp_types.h"
27 #include "dp_tx_desc.h"
28 #include "dp_peer.h"
29
30 #include <cdp_txrx_handle.h>
31 #include "dp_internal.h"
32 #define INVALID_FLOW_ID 0xFF
33 #define MAX_INVALID_BIN 3
34 #define GLOBAL_FLOW_POOL_STATS_LEN 25
35 #define FLOW_POOL_LOG_LEN 50
36
37 #ifdef QCA_AC_BASED_FLOW_CONTROL
38 /**
39 * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
40 * @pool: flow_pool
41 * @stop_threshold: stop threshold of certain AC
42 * @start_threshold: start threshold of certain AC
43 * @flow_pool_size: flow pool size
44 *
45 * Return: none
46 */
47 static inline void
dp_tx_initialize_threshold(struct dp_tx_desc_pool_s * pool,uint32_t start_threshold,uint32_t stop_threshold,uint16_t flow_pool_size)48 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
49 uint32_t start_threshold,
50 uint32_t stop_threshold,
51 uint16_t flow_pool_size)
52 {
53 /* BE_BK threshold is same as previous threahold */
54 pool->start_th[DP_TH_BE_BK] = (start_threshold
55 * flow_pool_size) / 100;
56 pool->stop_th[DP_TH_BE_BK] = (stop_threshold
57 * flow_pool_size) / 100;
58
59 /* Update VI threshold based on BE_BK threshold */
60 pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
61 * FL_TH_VI_PERCENTAGE) / 100;
62 pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
63 * FL_TH_VI_PERCENTAGE) / 100;
64
65 /* Update VO threshold based on BE_BK threshold */
66 pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
67 * FL_TH_VO_PERCENTAGE) / 100;
68 pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
69 * FL_TH_VO_PERCENTAGE) / 100;
70
71 /* Update High Priority threshold based on BE_BK threshold */
72 pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
73 * FL_TH_HI_PERCENTAGE) / 100;
74 pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
75 * FL_TH_HI_PERCENTAGE) / 100;
76
77 dp_debug("tx flow control threshold is set, pool size is %d",
78 flow_pool_size);
79 }
80
81 /**
82 * dp_tx_flow_pool_reattach() - Reattach flow_pool
83 * @pool: flow_pool
84 *
85 * Return: none
86 */
87 static inline void
dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s * pool)88 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
89 {
90 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
91 "%s: flow pool already allocated, attached %d times",
92 __func__, pool->pool_create_cnt);
93
94 pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
95 pool->pool_create_cnt++;
96 }
97
98 /**
99 * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
100 * @pool: flow_pool
101 *
102 * Return: none
103 */
104 static inline void
dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s * pool)105 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
106 {
107 int i;
108
109 for (i = 0; i < FL_TH_MAX; i++) {
110 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111 "Level %d :: Start threshold %d :: Stop threshold %d",
112 i, pool->start_th[i], pool->stop_th[i]);
113 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
114 "Level %d :: Maximum pause time %lu ms",
115 i, pool->max_pause_time[i]);
116 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
117 "Level %d :: Latest pause timestamp %lu",
118 i, pool->latest_pause_time[i]);
119 }
120 }
121
122 /**
123 * dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
124 * @soc: dp soc
125 * @pool: flow pool
126 * @pool_status: flow pool status
127 *
128 * Return: none
129 */
130 static inline void
dp_tx_flow_ctrl_reset_subqueues(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool,enum flow_pool_status pool_status)131 dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
132 struct dp_tx_desc_pool_s *pool,
133 enum flow_pool_status pool_status)
134 {
135 switch (pool_status) {
136 case FLOW_POOL_ACTIVE_PAUSED:
137 soc->pause_cb(pool->flow_pool_id,
138 WLAN_NETIF_PRIORITY_QUEUE_ON,
139 WLAN_DATA_FLOW_CTRL_PRI);
140 fallthrough;
141
142 case FLOW_POOL_VO_PAUSED:
143 soc->pause_cb(pool->flow_pool_id,
144 WLAN_NETIF_VO_QUEUE_ON,
145 WLAN_DATA_FLOW_CTRL_VO);
146 fallthrough;
147
148 case FLOW_POOL_VI_PAUSED:
149 soc->pause_cb(pool->flow_pool_id,
150 WLAN_NETIF_VI_QUEUE_ON,
151 WLAN_DATA_FLOW_CTRL_VI);
152 fallthrough;
153
154 case FLOW_POOL_BE_BK_PAUSED:
155 soc->pause_cb(pool->flow_pool_id,
156 WLAN_NETIF_BE_BK_QUEUE_ON,
157 WLAN_DATA_FLOW_CTRL_BE_BK);
158 fallthrough;
159 default:
160 break;
161 }
162 }
163
164 #else
165 static inline void
dp_tx_initialize_threshold(struct dp_tx_desc_pool_s * pool,uint32_t start_threshold,uint32_t stop_threshold,uint16_t flow_pool_size)166 dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
167 uint32_t start_threshold,
168 uint32_t stop_threshold,
169 uint16_t flow_pool_size)
170
171 {
172 /* INI is in percentage so divide by 100 */
173 pool->start_th = (start_threshold * flow_pool_size) / 100;
174 pool->stop_th = (stop_threshold * flow_pool_size) / 100;
175 }
176
177 static inline void
dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s * pool)178 dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
179 {
180 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
181 "%s: flow pool already allocated, attached %d times",
182 __func__, pool->pool_create_cnt);
183 if (pool->avail_desc > pool->start_th)
184 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
185 else
186 pool->status = FLOW_POOL_ACTIVE_PAUSED;
187
188 pool->pool_create_cnt++;
189 }
190
191 static inline void
dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s * pool)192 dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
193 {
194 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
195 "Start threshold %d :: Stop threshold %d",
196 pool->start_th, pool->stop_th);
197 }
198
199 static inline void
dp_tx_flow_ctrl_reset_subqueues(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool,enum flow_pool_status pool_status)200 dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
201 struct dp_tx_desc_pool_s *pool,
202 enum flow_pool_status pool_status)
203 {
204 }
205
206 #endif
207
dp_tx_dump_flow_pool_info(struct cdp_soc_t * soc_hdl)208 void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
209 {
210 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
211 struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
212 struct dp_tx_desc_pool_s *pool = NULL;
213 struct dp_tx_desc_pool_s tmp_pool;
214 int i;
215
216 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
217 "No of pool map received %d", pool_stats->pool_map_count);
218 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
219 "No of pool unmap received %d", pool_stats->pool_unmap_count);
220 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
221 "Pkt dropped due to unavailablity of pool %d",
222 pool_stats->pkt_drop_no_pool);
223
224 /*
225 * Nested spin lock.
226 * Always take in below order.
227 * flow_pool_array_lock -> flow_pool_lock
228 */
229 qdf_spin_lock_bh(&soc->flow_pool_array_lock);
230 for (i = 0; i < MAX_TXDESC_POOLS; i++) {
231 pool = &soc->tx_desc[i];
232 if (pool->status > FLOW_POOL_INVALID)
233 continue;
234 qdf_spin_lock_bh(&pool->flow_pool_lock);
235 qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
236 qdf_spin_unlock_bh(&pool->flow_pool_lock);
237 qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
238 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
239 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
240 "Flow_pool_id %d :: status %d",
241 tmp_pool.flow_pool_id, tmp_pool.status);
242 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
243 "Total %d :: Available %d",
244 tmp_pool.pool_size, tmp_pool.avail_desc);
245 dp_tx_flow_pool_dump_threshold(&tmp_pool);
246 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
247 "Member flow_id %d :: flow_type %d",
248 tmp_pool.flow_pool_id, tmp_pool.flow_type);
249 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
250 "Pkt dropped due to unavailablity of descriptors %d",
251 tmp_pool.pkt_drop_no_desc);
252 qdf_spin_lock_bh(&soc->flow_pool_array_lock);
253 }
254 qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
255 }
256
dp_tx_dump_flow_pool_info_compact(struct dp_soc * soc)257 void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
258 {
259 struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
260 struct dp_tx_desc_pool_s *pool = NULL;
261 char *comb_log_str;
262 uint32_t comb_log_str_size;
263 int bytes_written = 0;
264 int i;
265
266 comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
267 (FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
268 comb_log_str = qdf_mem_malloc(comb_log_str_size);
269 if (!comb_log_str)
270 return;
271
272 bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
273 comb_log_str_size, "G:(%d,%d,%d) ",
274 pool_stats->pool_map_count,
275 pool_stats->pool_unmap_count,
276 pool_stats->pkt_drop_no_pool);
277
278 for (i = 0; i < MAX_TXDESC_POOLS; i++) {
279 pool = &soc->tx_desc[i];
280 if (pool->status > FLOW_POOL_INVALID)
281 continue;
282 bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
283 (bytes_written >= comb_log_str_size) ? 0 :
284 comb_log_str_size - bytes_written,
285 "| %d %d: (%d,%d,%d)",
286 pool->flow_pool_id, pool->status,
287 pool->pool_size, pool->avail_desc,
288 pool->pkt_drop_no_desc);
289 }
290
291 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
292 "FLOW_POOL_STATS %s", comb_log_str);
293
294 qdf_mem_free(comb_log_str);
295 }
296
297 /**
298 * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
299 *
300 * @soc: Handle to struct dp_soc.
301 *
302 * Return: None
303 */
dp_tx_clear_flow_pool_stats(struct dp_soc * soc)304 void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
305 {
306
307 if (!soc) {
308 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
309 "%s: soc is null", __func__);
310 return;
311 }
312 qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
313 }
314
315 /**
316 * dp_tx_create_flow_pool() - create flow pool
317 * @soc: Handle to struct dp_soc
318 * @flow_pool_id: flow pool id
319 * @flow_pool_size: flow pool size
320 *
321 * Return: flow_pool pointer / NULL for error
322 */
dp_tx_create_flow_pool(struct dp_soc * soc,uint8_t flow_pool_id,uint32_t flow_pool_size)323 struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
324 uint8_t flow_pool_id, uint32_t flow_pool_size)
325 {
326 struct dp_tx_desc_pool_s *pool;
327 uint32_t stop_threshold;
328 uint32_t start_threshold;
329
330 if (flow_pool_id >= MAX_TXDESC_POOLS) {
331 dp_err("invalid flow_pool_id %d", flow_pool_id);
332 return NULL;
333 }
334 pool = &soc->tx_desc[flow_pool_id];
335 qdf_spin_lock_bh(&pool->flow_pool_lock);
336 if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
337 dp_tx_flow_pool_reattach(pool);
338 qdf_spin_unlock_bh(&pool->flow_pool_lock);
339 dp_err("cannot alloc desc, status=%d, create_cnt=%d",
340 pool->status, pool->pool_create_cnt);
341 return pool;
342 }
343
344 if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size, false)) {
345 qdf_spin_unlock_bh(&pool->flow_pool_lock);
346 dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
347 flow_pool_id);
348 return NULL;
349 }
350
351 if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size, false)) {
352 dp_tx_desc_pool_free(soc, flow_pool_id, false);
353 qdf_spin_unlock_bh(&pool->flow_pool_lock);
354 dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
355 flow_pool_id);
356 return NULL;
357 }
358
359 stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
360 start_threshold = stop_threshold +
361 wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
362
363 pool->flow_pool_id = flow_pool_id;
364 pool->pool_size = flow_pool_size;
365 pool->avail_desc = flow_pool_size;
366 pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
367 dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
368 flow_pool_size);
369 pool->pool_create_cnt++;
370
371 qdf_spin_unlock_bh(&pool->flow_pool_lock);
372
373 return pool;
374 }
375
376 /**
377 * dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
378 * @soc: Handle to struct dp_soc
379 * @vdev_id: vdev_id corresponding to flow pool
380 *
381 * Check if it is OK to go ahead delete the flow pool. One of the case is
382 * MLO where it is not OK to delete the flow pool when link switch happens.
383 *
384 * Return: 0 for success or error
385 */
dp_is_tx_flow_pool_delete_allowed(struct dp_soc * soc,uint8_t vdev_id)386 static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
387 uint8_t vdev_id)
388 {
389 struct dp_peer *peer;
390 struct dp_peer *tmp_peer;
391 struct dp_vdev *vdev = NULL;
392 bool is_allow = true;
393
394 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
395
396 /* only check for sta mode */
397 if (!vdev || vdev->opmode != wlan_op_mode_sta)
398 goto comp_ret;
399
400 /*
401 * Only if current vdev is belong to MLO connection and connected,
402 * then it's not allowed to delete current pool, for legacy
403 * connection, allowed always.
404 */
405 qdf_spin_lock_bh(&vdev->peer_list_lock);
406 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
407 peer_list_elem,
408 tmp_peer) {
409 if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
410 QDF_STATUS_SUCCESS) {
411 if (peer->valid && !peer->sta_self_peer)
412 is_allow = false;
413 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
414 }
415 }
416 qdf_spin_unlock_bh(&vdev->peer_list_lock);
417
418 comp_ret:
419 if (vdev)
420 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
421
422 return is_allow;
423 }
424
425 /**
426 * dp_tx_delete_flow_pool() - delete flow pool
427 * @soc: Handle to struct dp_soc
428 * @pool: flow pool pointer
429 * @force: free pool forcefully
430 *
431 * Delete flow_pool if all tx descriptors are available.
432 * Otherwise put it in FLOW_POOL_INVALID state.
433 * If force is set then pull all available descriptors to
434 * global pool.
435 *
436 * Return: 0 for success or error
437 */
dp_tx_delete_flow_pool(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool,bool force)438 int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
439 bool force)
440 {
441 struct dp_vdev *vdev;
442 enum flow_pool_status pool_status;
443
444 if (!soc || !pool) {
445 dp_err("pool or soc is NULL");
446 QDF_ASSERT(0);
447 return ENOMEM;
448 }
449
450 dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
451 pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
452 pool->pool_size, pool->status);
453
454 if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
455 dp_info("skip pool id %d delete as it's not allowed",
456 pool->flow_pool_id);
457 return -EAGAIN;
458 }
459
460 qdf_spin_lock_bh(&pool->flow_pool_lock);
461 if (!pool->pool_create_cnt) {
462 qdf_spin_unlock_bh(&pool->flow_pool_lock);
463 dp_err("flow pool either not created or already deleted");
464 return -ENOENT;
465 }
466 pool->pool_create_cnt--;
467 if (pool->pool_create_cnt) {
468 qdf_spin_unlock_bh(&pool->flow_pool_lock);
469 dp_err("pool is still attached, pending detach %d",
470 pool->pool_create_cnt);
471 return -EAGAIN;
472 }
473
474 if (pool->avail_desc < pool->pool_size) {
475 pool_status = pool->status;
476 pool->status = FLOW_POOL_INVALID;
477 dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
478
479 qdf_spin_unlock_bh(&pool->flow_pool_lock);
480 /* Reset TX desc associated to this Vdev as NULL */
481 vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
482 DP_MOD_ID_MISC);
483 if (vdev) {
484 dp_tx_desc_flush(vdev->pdev, vdev, false);
485 dp_vdev_unref_delete(soc, vdev,
486 DP_MOD_ID_MISC);
487 }
488 dp_err("avail desc less than pool size");
489 return -EAGAIN;
490 }
491
492 /* We have all the descriptors for the pool, we can delete the pool */
493 dp_tx_desc_pool_deinit(soc, pool->flow_pool_id, false);
494 dp_tx_desc_pool_free(soc, pool->flow_pool_id, false);
495 qdf_spin_unlock_bh(&pool->flow_pool_lock);
496 return 0;
497 }
498
499 /**
500 * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
501 * @pdev: Handle to struct dp_pdev
502 * @pool: flow_pool
503 * @vdev_id: flow_id /vdev_id
504 *
505 * Return: none
506 */
dp_tx_flow_pool_vdev_map(struct dp_pdev * pdev,struct dp_tx_desc_pool_s * pool,uint8_t vdev_id)507 static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
508 struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
509 {
510 struct dp_vdev *vdev;
511 struct dp_soc *soc = pdev->soc;
512
513 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
514 if (!vdev) {
515 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
516 "%s: invalid vdev_id %d",
517 __func__, vdev_id);
518 return;
519 }
520
521 vdev->pool = pool;
522 qdf_spin_lock_bh(&pool->flow_pool_lock);
523 pool->pool_owner_ctx = soc;
524 pool->flow_pool_id = vdev_id;
525 qdf_spin_unlock_bh(&pool->flow_pool_lock);
526 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
527 }
528
529 /**
530 * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
531 * @pdev: Handle to struct dp_pdev
532 * @pool: flow_pool
533 * @vdev_id: flow_id /vdev_id
534 *
535 * Return: none
536 */
dp_tx_flow_pool_vdev_unmap(struct dp_pdev * pdev,struct dp_tx_desc_pool_s * pool,uint8_t vdev_id)537 static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
538 struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
539 {
540 struct dp_vdev *vdev;
541 struct dp_soc *soc = pdev->soc;
542
543 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
544 if (!vdev) {
545 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
546 "%s: invalid vdev_id %d",
547 __func__, vdev_id);
548 return;
549 }
550
551 vdev->pool = NULL;
552 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
553 }
554
555 /**
556 * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
557 * @pdev: Handle to struct dp_pdev
558 * @flow_id: flow id
559 * @flow_type: flow type
560 * @flow_pool_id: pool id
561 * @flow_pool_size: pool size
562 *
563 * Process below target to host message
564 * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
565 *
566 * Return: none
567 */
dp_tx_flow_pool_map_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint32_t flow_pool_size)568 QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
569 uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
570 {
571 struct dp_soc *soc = pdev->soc;
572 struct dp_tx_desc_pool_s *pool;
573 enum htt_flow_type type = flow_type;
574
575
576 dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
577 flow_id, flow_type, flow_pool_id, flow_pool_size);
578
579 if (qdf_unlikely(!soc)) {
580 dp_err("soc is NULL");
581 return QDF_STATUS_E_FAULT;
582 }
583 soc->pool_stats.pool_map_count++;
584
585 pool = dp_tx_create_flow_pool(soc, flow_pool_id,
586 flow_pool_size);
587 if (!pool) {
588 dp_err("creation of flow_pool %d size %d failed",
589 flow_pool_id, flow_pool_size);
590 return QDF_STATUS_E_RESOURCES;
591 }
592
593 switch (type) {
594
595 case FLOW_TYPE_VDEV:
596 dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
597 break;
598 default:
599 dp_err("flow type %d not supported", type);
600 break;
601 }
602
603 return QDF_STATUS_SUCCESS;
604 }
605
606 /**
607 * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
608 * @pdev: Handle to struct dp_pdev
609 * @flow_id: flow id
610 * @flow_type: flow type
611 * @flow_pool_id: pool id
612 *
613 * Process below target to host message
614 * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
615 *
616 * Return: none
617 */
dp_tx_flow_pool_unmap_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)618 void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
619 uint8_t flow_type, uint8_t flow_pool_id)
620 {
621 struct dp_soc *soc = pdev->soc;
622 struct dp_tx_desc_pool_s *pool;
623 enum htt_flow_type type = flow_type;
624
625 dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
626 flow_pool_id);
627
628 if (qdf_unlikely(!pdev)) {
629 dp_err("pdev is NULL");
630 return;
631 }
632 soc->pool_stats.pool_unmap_count++;
633
634 pool = &soc->tx_desc[flow_pool_id];
635 dp_info("pool status: %d", pool->status);
636
637 if (pool->status == FLOW_POOL_INACTIVE) {
638 dp_err("flow pool id: %d is inactive, ignore unmap",
639 flow_pool_id);
640 return;
641 }
642
643 switch (type) {
644
645 case FLOW_TYPE_VDEV:
646 dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
647 break;
648 default:
649 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
650 "%s: flow type %d not supported !!!",
651 __func__, type);
652 return;
653 }
654
655 /* only delete if all descriptors are available */
656 dp_tx_delete_flow_pool(soc, pool, false);
657 }
658
659 /**
660 * dp_tx_flow_control_init() - Initialize tx flow control
661 * @soc: Handle to struct dp_soc
662 *
663 * Return: none
664 */
dp_tx_flow_control_init(struct dp_soc * soc)665 void dp_tx_flow_control_init(struct dp_soc *soc)
666 {
667 qdf_spinlock_create(&soc->flow_pool_array_lock);
668 }
669
670 /**
671 * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
672 * @soc: Handle to struct dp_soc
673 *
674 * Return: none
675 */
dp_tx_desc_pool_dealloc(struct dp_soc * soc)676 static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
677 {
678 struct dp_tx_desc_pool_s *tx_desc_pool;
679 int i;
680
681 for (i = 0; i < MAX_TXDESC_POOLS; i++) {
682 tx_desc_pool = &((soc)->tx_desc[i]);
683 if (!tx_desc_pool->desc_pages.num_pages)
684 continue;
685
686 dp_tx_desc_pool_deinit(soc, i, false);
687 dp_tx_desc_pool_free(soc, i, false);
688 }
689 }
690
691 /**
692 * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
693 * @soc: Handle to struct dp_soc
694 *
695 * Return: none
696 */
dp_tx_flow_control_deinit(struct dp_soc * soc)697 void dp_tx_flow_control_deinit(struct dp_soc *soc)
698 {
699 dp_tx_desc_pool_dealloc(soc);
700
701 qdf_spinlock_destroy(&soc->flow_pool_array_lock);
702 }
703
704 /**
705 * dp_txrx_register_pause_cb() - Register pause callback
706 * @handle: Handle to struct dp_soc
707 * @pause_cb: Tx pause_cb
708 *
709 * Return: none
710 */
dp_txrx_register_pause_cb(struct cdp_soc_t * handle,tx_pause_callback pause_cb)711 QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
712 tx_pause_callback pause_cb)
713 {
714 struct dp_soc *soc = (struct dp_soc *)handle;
715
716 if (!soc || !pause_cb) {
717 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
718 FL("soc or pause_cb is NULL"));
719 return QDF_STATUS_E_INVAL;
720 }
721 soc->pause_cb = pause_cb;
722
723 return QDF_STATUS_SUCCESS;
724 }
725
dp_tx_flow_pool_map(struct cdp_soc_t * handle,uint8_t pdev_id,uint8_t vdev_id)726 QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
727 uint8_t vdev_id)
728 {
729 struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
730 struct dp_pdev *pdev =
731 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
732 int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
733
734 if (!pdev) {
735 dp_err("pdev is NULL");
736 return QDF_STATUS_E_INVAL;
737 }
738
739 return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
740 vdev_id, tx_ring_size);
741 }
742
dp_tx_flow_pool_unmap(struct cdp_soc_t * handle,uint8_t pdev_id,uint8_t vdev_id)743 void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
744 uint8_t vdev_id)
745 {
746 struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
747 struct dp_pdev *pdev =
748 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
749
750 if (!pdev) {
751 dp_err("pdev is NULL");
752 return;
753 }
754
755 return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
756 FLOW_TYPE_VDEV, vdev_id);
757 }
758