1*5113495bSYour Name /*
2*5113495bSYour Name * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name *
5*5113495bSYour Name * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name * above copyright notice and this permission notice appear in all
8*5113495bSYour Name * copies.
9*5113495bSYour Name *
10*5113495bSYour Name * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name */
19*5113495bSYour Name
20*5113495bSYour Name #include <cds_api.h>
21*5113495bSYour Name
22*5113495bSYour Name /* OS abstraction libraries */
23*5113495bSYour Name #include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
24*5113495bSYour Name #include <qdf_atomic.h> /* qdf_atomic_read, etc. */
25*5113495bSYour Name #include <qdf_util.h> /* qdf_unlikely */
26*5113495bSYour Name #include "dp_types.h"
27*5113495bSYour Name #include "dp_tx_desc.h"
28*5113495bSYour Name #include "dp_peer.h"
29*5113495bSYour Name
30*5113495bSYour Name #include <cdp_txrx_handle.h>
31*5113495bSYour Name #include "dp_internal.h"
32*5113495bSYour Name #define INVALID_FLOW_ID 0xFF
33*5113495bSYour Name #define MAX_INVALID_BIN 3
34*5113495bSYour Name #define GLOBAL_FLOW_POOL_STATS_LEN 25
35*5113495bSYour Name #define FLOW_POOL_LOG_LEN 50
36*5113495bSYour Name
37*5113495bSYour Name #ifdef QCA_AC_BASED_FLOW_CONTROL
38*5113495bSYour Name /**
39*5113495bSYour Name * dp_tx_initialize_threshold() - Threshold of flow Pool initialization
40*5113495bSYour Name * @pool: flow_pool
41*5113495bSYour Name * @stop_threshold: stop threshold of certain AC
42*5113495bSYour Name * @start_threshold: start threshold of certain AC
43*5113495bSYour Name * @flow_pool_size: flow pool size
44*5113495bSYour Name *
45*5113495bSYour Name * Return: none
46*5113495bSYour Name */
47*5113495bSYour Name static inline void
dp_tx_initialize_threshold(struct dp_tx_desc_pool_s * pool,uint32_t start_threshold,uint32_t stop_threshold,uint16_t flow_pool_size)48*5113495bSYour Name dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
49*5113495bSYour Name uint32_t start_threshold,
50*5113495bSYour Name uint32_t stop_threshold,
51*5113495bSYour Name uint16_t flow_pool_size)
52*5113495bSYour Name {
53*5113495bSYour Name /* BE_BK threshold is same as previous threahold */
54*5113495bSYour Name pool->start_th[DP_TH_BE_BK] = (start_threshold
55*5113495bSYour Name * flow_pool_size) / 100;
56*5113495bSYour Name pool->stop_th[DP_TH_BE_BK] = (stop_threshold
57*5113495bSYour Name * flow_pool_size) / 100;
58*5113495bSYour Name
59*5113495bSYour Name /* Update VI threshold based on BE_BK threshold */
60*5113495bSYour Name pool->start_th[DP_TH_VI] = (pool->start_th[DP_TH_BE_BK]
61*5113495bSYour Name * FL_TH_VI_PERCENTAGE) / 100;
62*5113495bSYour Name pool->stop_th[DP_TH_VI] = (pool->stop_th[DP_TH_BE_BK]
63*5113495bSYour Name * FL_TH_VI_PERCENTAGE) / 100;
64*5113495bSYour Name
65*5113495bSYour Name /* Update VO threshold based on BE_BK threshold */
66*5113495bSYour Name pool->start_th[DP_TH_VO] = (pool->start_th[DP_TH_BE_BK]
67*5113495bSYour Name * FL_TH_VO_PERCENTAGE) / 100;
68*5113495bSYour Name pool->stop_th[DP_TH_VO] = (pool->stop_th[DP_TH_BE_BK]
69*5113495bSYour Name * FL_TH_VO_PERCENTAGE) / 100;
70*5113495bSYour Name
71*5113495bSYour Name /* Update High Priority threshold based on BE_BK threshold */
72*5113495bSYour Name pool->start_th[DP_TH_HI] = (pool->start_th[DP_TH_BE_BK]
73*5113495bSYour Name * FL_TH_HI_PERCENTAGE) / 100;
74*5113495bSYour Name pool->stop_th[DP_TH_HI] = (pool->stop_th[DP_TH_BE_BK]
75*5113495bSYour Name * FL_TH_HI_PERCENTAGE) / 100;
76*5113495bSYour Name
77*5113495bSYour Name dp_debug("tx flow control threshold is set, pool size is %d",
78*5113495bSYour Name flow_pool_size);
79*5113495bSYour Name }
80*5113495bSYour Name
81*5113495bSYour Name /**
82*5113495bSYour Name * dp_tx_flow_pool_reattach() - Reattach flow_pool
83*5113495bSYour Name * @pool: flow_pool
84*5113495bSYour Name *
85*5113495bSYour Name * Return: none
86*5113495bSYour Name */
87*5113495bSYour Name static inline void
dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s * pool)88*5113495bSYour Name dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
89*5113495bSYour Name {
90*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
91*5113495bSYour Name "%s: flow pool already allocated, attached %d times",
92*5113495bSYour Name __func__, pool->pool_create_cnt);
93*5113495bSYour Name
94*5113495bSYour Name pool->status = FLOW_POOL_ACTIVE_UNPAUSED_REATTACH;
95*5113495bSYour Name pool->pool_create_cnt++;
96*5113495bSYour Name }
97*5113495bSYour Name
98*5113495bSYour Name /**
99*5113495bSYour Name * dp_tx_flow_pool_dump_threshold() - Dump threshold of the flow_pool
100*5113495bSYour Name * @pool: flow_pool
101*5113495bSYour Name *
102*5113495bSYour Name * Return: none
103*5113495bSYour Name */
104*5113495bSYour Name static inline void
dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s * pool)105*5113495bSYour Name dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
106*5113495bSYour Name {
107*5113495bSYour Name int i;
108*5113495bSYour Name
109*5113495bSYour Name for (i = 0; i < FL_TH_MAX; i++) {
110*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
111*5113495bSYour Name "Level %d :: Start threshold %d :: Stop threshold %d",
112*5113495bSYour Name i, pool->start_th[i], pool->stop_th[i]);
113*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
114*5113495bSYour Name "Level %d :: Maximum pause time %lu ms",
115*5113495bSYour Name i, pool->max_pause_time[i]);
116*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
117*5113495bSYour Name "Level %d :: Latest pause timestamp %lu",
118*5113495bSYour Name i, pool->latest_pause_time[i]);
119*5113495bSYour Name }
120*5113495bSYour Name }
121*5113495bSYour Name
122*5113495bSYour Name /**
123*5113495bSYour Name * dp_tx_flow_ctrl_reset_subqueues() - Reset subqueues to original state
124*5113495bSYour Name * @soc: dp soc
125*5113495bSYour Name * @pool: flow pool
126*5113495bSYour Name * @pool_status: flow pool status
127*5113495bSYour Name *
128*5113495bSYour Name * Return: none
129*5113495bSYour Name */
130*5113495bSYour Name static inline void
dp_tx_flow_ctrl_reset_subqueues(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool,enum flow_pool_status pool_status)131*5113495bSYour Name dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
132*5113495bSYour Name struct dp_tx_desc_pool_s *pool,
133*5113495bSYour Name enum flow_pool_status pool_status)
134*5113495bSYour Name {
135*5113495bSYour Name switch (pool_status) {
136*5113495bSYour Name case FLOW_POOL_ACTIVE_PAUSED:
137*5113495bSYour Name soc->pause_cb(pool->flow_pool_id,
138*5113495bSYour Name WLAN_NETIF_PRIORITY_QUEUE_ON,
139*5113495bSYour Name WLAN_DATA_FLOW_CTRL_PRI);
140*5113495bSYour Name fallthrough;
141*5113495bSYour Name
142*5113495bSYour Name case FLOW_POOL_VO_PAUSED:
143*5113495bSYour Name soc->pause_cb(pool->flow_pool_id,
144*5113495bSYour Name WLAN_NETIF_VO_QUEUE_ON,
145*5113495bSYour Name WLAN_DATA_FLOW_CTRL_VO);
146*5113495bSYour Name fallthrough;
147*5113495bSYour Name
148*5113495bSYour Name case FLOW_POOL_VI_PAUSED:
149*5113495bSYour Name soc->pause_cb(pool->flow_pool_id,
150*5113495bSYour Name WLAN_NETIF_VI_QUEUE_ON,
151*5113495bSYour Name WLAN_DATA_FLOW_CTRL_VI);
152*5113495bSYour Name fallthrough;
153*5113495bSYour Name
154*5113495bSYour Name case FLOW_POOL_BE_BK_PAUSED:
155*5113495bSYour Name soc->pause_cb(pool->flow_pool_id,
156*5113495bSYour Name WLAN_NETIF_BE_BK_QUEUE_ON,
157*5113495bSYour Name WLAN_DATA_FLOW_CTRL_BE_BK);
158*5113495bSYour Name fallthrough;
159*5113495bSYour Name default:
160*5113495bSYour Name break;
161*5113495bSYour Name }
162*5113495bSYour Name }
163*5113495bSYour Name
164*5113495bSYour Name #else
165*5113495bSYour Name static inline void
dp_tx_initialize_threshold(struct dp_tx_desc_pool_s * pool,uint32_t start_threshold,uint32_t stop_threshold,uint16_t flow_pool_size)166*5113495bSYour Name dp_tx_initialize_threshold(struct dp_tx_desc_pool_s *pool,
167*5113495bSYour Name uint32_t start_threshold,
168*5113495bSYour Name uint32_t stop_threshold,
169*5113495bSYour Name uint16_t flow_pool_size)
170*5113495bSYour Name
171*5113495bSYour Name {
172*5113495bSYour Name /* INI is in percentage so divide by 100 */
173*5113495bSYour Name pool->start_th = (start_threshold * flow_pool_size) / 100;
174*5113495bSYour Name pool->stop_th = (stop_threshold * flow_pool_size) / 100;
175*5113495bSYour Name }
176*5113495bSYour Name
177*5113495bSYour Name static inline void
dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s * pool)178*5113495bSYour Name dp_tx_flow_pool_reattach(struct dp_tx_desc_pool_s *pool)
179*5113495bSYour Name {
180*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
181*5113495bSYour Name "%s: flow pool already allocated, attached %d times",
182*5113495bSYour Name __func__, pool->pool_create_cnt);
183*5113495bSYour Name if (pool->avail_desc > pool->start_th)
184*5113495bSYour Name pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
185*5113495bSYour Name else
186*5113495bSYour Name pool->status = FLOW_POOL_ACTIVE_PAUSED;
187*5113495bSYour Name
188*5113495bSYour Name pool->pool_create_cnt++;
189*5113495bSYour Name }
190*5113495bSYour Name
191*5113495bSYour Name static inline void
dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s * pool)192*5113495bSYour Name dp_tx_flow_pool_dump_threshold(struct dp_tx_desc_pool_s *pool)
193*5113495bSYour Name {
194*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
195*5113495bSYour Name "Start threshold %d :: Stop threshold %d",
196*5113495bSYour Name pool->start_th, pool->stop_th);
197*5113495bSYour Name }
198*5113495bSYour Name
199*5113495bSYour Name static inline void
dp_tx_flow_ctrl_reset_subqueues(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool,enum flow_pool_status pool_status)200*5113495bSYour Name dp_tx_flow_ctrl_reset_subqueues(struct dp_soc *soc,
201*5113495bSYour Name struct dp_tx_desc_pool_s *pool,
202*5113495bSYour Name enum flow_pool_status pool_status)
203*5113495bSYour Name {
204*5113495bSYour Name }
205*5113495bSYour Name
206*5113495bSYour Name #endif
207*5113495bSYour Name
dp_tx_dump_flow_pool_info(struct cdp_soc_t * soc_hdl)208*5113495bSYour Name void dp_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
209*5113495bSYour Name {
210*5113495bSYour Name struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
211*5113495bSYour Name struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
212*5113495bSYour Name struct dp_tx_desc_pool_s *pool = NULL;
213*5113495bSYour Name struct dp_tx_desc_pool_s tmp_pool;
214*5113495bSYour Name int i;
215*5113495bSYour Name
216*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
217*5113495bSYour Name "No of pool map received %d", pool_stats->pool_map_count);
218*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
219*5113495bSYour Name "No of pool unmap received %d", pool_stats->pool_unmap_count);
220*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
221*5113495bSYour Name "Pkt dropped due to unavailablity of pool %d",
222*5113495bSYour Name pool_stats->pkt_drop_no_pool);
223*5113495bSYour Name
224*5113495bSYour Name /*
225*5113495bSYour Name * Nested spin lock.
226*5113495bSYour Name * Always take in below order.
227*5113495bSYour Name * flow_pool_array_lock -> flow_pool_lock
228*5113495bSYour Name */
229*5113495bSYour Name qdf_spin_lock_bh(&soc->flow_pool_array_lock);
230*5113495bSYour Name for (i = 0; i < MAX_TXDESC_POOLS; i++) {
231*5113495bSYour Name pool = &soc->tx_desc[i];
232*5113495bSYour Name if (pool->status > FLOW_POOL_INVALID)
233*5113495bSYour Name continue;
234*5113495bSYour Name qdf_spin_lock_bh(&pool->flow_pool_lock);
235*5113495bSYour Name qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
236*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
237*5113495bSYour Name qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
238*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, "\n");
239*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
240*5113495bSYour Name "Flow_pool_id %d :: status %d",
241*5113495bSYour Name tmp_pool.flow_pool_id, tmp_pool.status);
242*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
243*5113495bSYour Name "Total %d :: Available %d",
244*5113495bSYour Name tmp_pool.pool_size, tmp_pool.avail_desc);
245*5113495bSYour Name dp_tx_flow_pool_dump_threshold(&tmp_pool);
246*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
247*5113495bSYour Name "Member flow_id %d :: flow_type %d",
248*5113495bSYour Name tmp_pool.flow_pool_id, tmp_pool.flow_type);
249*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
250*5113495bSYour Name "Pkt dropped due to unavailablity of descriptors %d",
251*5113495bSYour Name tmp_pool.pkt_drop_no_desc);
252*5113495bSYour Name qdf_spin_lock_bh(&soc->flow_pool_array_lock);
253*5113495bSYour Name }
254*5113495bSYour Name qdf_spin_unlock_bh(&soc->flow_pool_array_lock);
255*5113495bSYour Name }
256*5113495bSYour Name
dp_tx_dump_flow_pool_info_compact(struct dp_soc * soc)257*5113495bSYour Name void dp_tx_dump_flow_pool_info_compact(struct dp_soc *soc)
258*5113495bSYour Name {
259*5113495bSYour Name struct dp_txrx_pool_stats *pool_stats = &soc->pool_stats;
260*5113495bSYour Name struct dp_tx_desc_pool_s *pool = NULL;
261*5113495bSYour Name char *comb_log_str;
262*5113495bSYour Name uint32_t comb_log_str_size;
263*5113495bSYour Name int bytes_written = 0;
264*5113495bSYour Name int i;
265*5113495bSYour Name
266*5113495bSYour Name comb_log_str_size = GLOBAL_FLOW_POOL_STATS_LEN +
267*5113495bSYour Name (FLOW_POOL_LOG_LEN * MAX_TXDESC_POOLS) + 1;
268*5113495bSYour Name comb_log_str = qdf_mem_malloc(comb_log_str_size);
269*5113495bSYour Name if (!comb_log_str)
270*5113495bSYour Name return;
271*5113495bSYour Name
272*5113495bSYour Name bytes_written = qdf_snprintf(&comb_log_str[bytes_written],
273*5113495bSYour Name comb_log_str_size, "G:(%d,%d,%d) ",
274*5113495bSYour Name pool_stats->pool_map_count,
275*5113495bSYour Name pool_stats->pool_unmap_count,
276*5113495bSYour Name pool_stats->pkt_drop_no_pool);
277*5113495bSYour Name
278*5113495bSYour Name for (i = 0; i < MAX_TXDESC_POOLS; i++) {
279*5113495bSYour Name pool = &soc->tx_desc[i];
280*5113495bSYour Name if (pool->status > FLOW_POOL_INVALID)
281*5113495bSYour Name continue;
282*5113495bSYour Name bytes_written += qdf_snprintf(&comb_log_str[bytes_written],
283*5113495bSYour Name (bytes_written >= comb_log_str_size) ? 0 :
284*5113495bSYour Name comb_log_str_size - bytes_written,
285*5113495bSYour Name "| %d %d: (%d,%d,%d)",
286*5113495bSYour Name pool->flow_pool_id, pool->status,
287*5113495bSYour Name pool->pool_size, pool->avail_desc,
288*5113495bSYour Name pool->pkt_drop_no_desc);
289*5113495bSYour Name }
290*5113495bSYour Name
291*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
292*5113495bSYour Name "FLOW_POOL_STATS %s", comb_log_str);
293*5113495bSYour Name
294*5113495bSYour Name qdf_mem_free(comb_log_str);
295*5113495bSYour Name }
296*5113495bSYour Name
297*5113495bSYour Name /**
298*5113495bSYour Name * dp_tx_clear_flow_pool_stats() - clear flow pool statistics
299*5113495bSYour Name *
300*5113495bSYour Name * @soc: Handle to struct dp_soc.
301*5113495bSYour Name *
302*5113495bSYour Name * Return: None
303*5113495bSYour Name */
dp_tx_clear_flow_pool_stats(struct dp_soc * soc)304*5113495bSYour Name void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
305*5113495bSYour Name {
306*5113495bSYour Name
307*5113495bSYour Name if (!soc) {
308*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
309*5113495bSYour Name "%s: soc is null", __func__);
310*5113495bSYour Name return;
311*5113495bSYour Name }
312*5113495bSYour Name qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
313*5113495bSYour Name }
314*5113495bSYour Name
315*5113495bSYour Name /**
316*5113495bSYour Name * dp_tx_create_flow_pool() - create flow pool
317*5113495bSYour Name * @soc: Handle to struct dp_soc
318*5113495bSYour Name * @flow_pool_id: flow pool id
319*5113495bSYour Name * @flow_pool_size: flow pool size
320*5113495bSYour Name *
321*5113495bSYour Name * Return: flow_pool pointer / NULL for error
322*5113495bSYour Name */
dp_tx_create_flow_pool(struct dp_soc * soc,uint8_t flow_pool_id,uint32_t flow_pool_size)323*5113495bSYour Name struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
324*5113495bSYour Name uint8_t flow_pool_id, uint32_t flow_pool_size)
325*5113495bSYour Name {
326*5113495bSYour Name struct dp_tx_desc_pool_s *pool;
327*5113495bSYour Name uint32_t stop_threshold;
328*5113495bSYour Name uint32_t start_threshold;
329*5113495bSYour Name
330*5113495bSYour Name if (flow_pool_id >= MAX_TXDESC_POOLS) {
331*5113495bSYour Name dp_err("invalid flow_pool_id %d", flow_pool_id);
332*5113495bSYour Name return NULL;
333*5113495bSYour Name }
334*5113495bSYour Name pool = &soc->tx_desc[flow_pool_id];
335*5113495bSYour Name qdf_spin_lock_bh(&pool->flow_pool_lock);
336*5113495bSYour Name if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
337*5113495bSYour Name dp_tx_flow_pool_reattach(pool);
338*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
339*5113495bSYour Name dp_err("cannot alloc desc, status=%d, create_cnt=%d",
340*5113495bSYour Name pool->status, pool->pool_create_cnt);
341*5113495bSYour Name return pool;
342*5113495bSYour Name }
343*5113495bSYour Name
344*5113495bSYour Name if (dp_tx_desc_pool_alloc(soc, flow_pool_id, flow_pool_size, false)) {
345*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
346*5113495bSYour Name dp_err("dp_tx_desc_pool_alloc failed flow_pool_id: %d",
347*5113495bSYour Name flow_pool_id);
348*5113495bSYour Name return NULL;
349*5113495bSYour Name }
350*5113495bSYour Name
351*5113495bSYour Name if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size, false)) {
352*5113495bSYour Name dp_tx_desc_pool_free(soc, flow_pool_id, false);
353*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
354*5113495bSYour Name dp_err("dp_tx_desc_pool_init failed flow_pool_id: %d",
355*5113495bSYour Name flow_pool_id);
356*5113495bSYour Name return NULL;
357*5113495bSYour Name }
358*5113495bSYour Name
359*5113495bSYour Name stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
360*5113495bSYour Name start_threshold = stop_threshold +
361*5113495bSYour Name wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
362*5113495bSYour Name
363*5113495bSYour Name pool->flow_pool_id = flow_pool_id;
364*5113495bSYour Name pool->pool_size = flow_pool_size;
365*5113495bSYour Name pool->avail_desc = flow_pool_size;
366*5113495bSYour Name pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
367*5113495bSYour Name dp_tx_initialize_threshold(pool, start_threshold, stop_threshold,
368*5113495bSYour Name flow_pool_size);
369*5113495bSYour Name pool->pool_create_cnt++;
370*5113495bSYour Name
371*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
372*5113495bSYour Name
373*5113495bSYour Name return pool;
374*5113495bSYour Name }
375*5113495bSYour Name
376*5113495bSYour Name /**
377*5113495bSYour Name * dp_is_tx_flow_pool_delete_allowed() - Can flow pool be deleted
378*5113495bSYour Name * @soc: Handle to struct dp_soc
379*5113495bSYour Name * @vdev_id: vdev_id corresponding to flow pool
380*5113495bSYour Name *
381*5113495bSYour Name * Check if it is OK to go ahead delete the flow pool. One of the case is
382*5113495bSYour Name * MLO where it is not OK to delete the flow pool when link switch happens.
383*5113495bSYour Name *
384*5113495bSYour Name * Return: 0 for success or error
385*5113495bSYour Name */
dp_is_tx_flow_pool_delete_allowed(struct dp_soc * soc,uint8_t vdev_id)386*5113495bSYour Name static bool dp_is_tx_flow_pool_delete_allowed(struct dp_soc *soc,
387*5113495bSYour Name uint8_t vdev_id)
388*5113495bSYour Name {
389*5113495bSYour Name struct dp_peer *peer;
390*5113495bSYour Name struct dp_peer *tmp_peer;
391*5113495bSYour Name struct dp_vdev *vdev = NULL;
392*5113495bSYour Name bool is_allow = true;
393*5113495bSYour Name
394*5113495bSYour Name vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MISC);
395*5113495bSYour Name
396*5113495bSYour Name /* only check for sta mode */
397*5113495bSYour Name if (!vdev || vdev->opmode != wlan_op_mode_sta)
398*5113495bSYour Name goto comp_ret;
399*5113495bSYour Name
400*5113495bSYour Name /*
401*5113495bSYour Name * Only if current vdev is belong to MLO connection and connected,
402*5113495bSYour Name * then it's not allowed to delete current pool, for legacy
403*5113495bSYour Name * connection, allowed always.
404*5113495bSYour Name */
405*5113495bSYour Name qdf_spin_lock_bh(&vdev->peer_list_lock);
406*5113495bSYour Name TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
407*5113495bSYour Name peer_list_elem,
408*5113495bSYour Name tmp_peer) {
409*5113495bSYour Name if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
410*5113495bSYour Name QDF_STATUS_SUCCESS) {
411*5113495bSYour Name if (peer->valid && !peer->sta_self_peer)
412*5113495bSYour Name is_allow = false;
413*5113495bSYour Name dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
414*5113495bSYour Name }
415*5113495bSYour Name }
416*5113495bSYour Name qdf_spin_unlock_bh(&vdev->peer_list_lock);
417*5113495bSYour Name
418*5113495bSYour Name comp_ret:
419*5113495bSYour Name if (vdev)
420*5113495bSYour Name dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MISC);
421*5113495bSYour Name
422*5113495bSYour Name return is_allow;
423*5113495bSYour Name }
424*5113495bSYour Name
425*5113495bSYour Name /**
426*5113495bSYour Name * dp_tx_delete_flow_pool() - delete flow pool
427*5113495bSYour Name * @soc: Handle to struct dp_soc
428*5113495bSYour Name * @pool: flow pool pointer
429*5113495bSYour Name * @force: free pool forcefully
430*5113495bSYour Name *
431*5113495bSYour Name * Delete flow_pool if all tx descriptors are available.
432*5113495bSYour Name * Otherwise put it in FLOW_POOL_INVALID state.
433*5113495bSYour Name * If force is set then pull all available descriptors to
434*5113495bSYour Name * global pool.
435*5113495bSYour Name *
436*5113495bSYour Name * Return: 0 for success or error
437*5113495bSYour Name */
dp_tx_delete_flow_pool(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool,bool force)438*5113495bSYour Name int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
439*5113495bSYour Name bool force)
440*5113495bSYour Name {
441*5113495bSYour Name struct dp_vdev *vdev;
442*5113495bSYour Name enum flow_pool_status pool_status;
443*5113495bSYour Name
444*5113495bSYour Name if (!soc || !pool) {
445*5113495bSYour Name dp_err("pool or soc is NULL");
446*5113495bSYour Name QDF_ASSERT(0);
447*5113495bSYour Name return ENOMEM;
448*5113495bSYour Name }
449*5113495bSYour Name
450*5113495bSYour Name dp_info("pool_id %d create_cnt=%d, avail_desc=%d, size=%d, status=%d",
451*5113495bSYour Name pool->flow_pool_id, pool->pool_create_cnt, pool->avail_desc,
452*5113495bSYour Name pool->pool_size, pool->status);
453*5113495bSYour Name
454*5113495bSYour Name if (!dp_is_tx_flow_pool_delete_allowed(soc, pool->flow_pool_id)) {
455*5113495bSYour Name dp_info("skip pool id %d delete as it's not allowed",
456*5113495bSYour Name pool->flow_pool_id);
457*5113495bSYour Name return -EAGAIN;
458*5113495bSYour Name }
459*5113495bSYour Name
460*5113495bSYour Name qdf_spin_lock_bh(&pool->flow_pool_lock);
461*5113495bSYour Name if (!pool->pool_create_cnt) {
462*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
463*5113495bSYour Name dp_err("flow pool either not created or already deleted");
464*5113495bSYour Name return -ENOENT;
465*5113495bSYour Name }
466*5113495bSYour Name pool->pool_create_cnt--;
467*5113495bSYour Name if (pool->pool_create_cnt) {
468*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
469*5113495bSYour Name dp_err("pool is still attached, pending detach %d",
470*5113495bSYour Name pool->pool_create_cnt);
471*5113495bSYour Name return -EAGAIN;
472*5113495bSYour Name }
473*5113495bSYour Name
474*5113495bSYour Name if (pool->avail_desc < pool->pool_size) {
475*5113495bSYour Name pool_status = pool->status;
476*5113495bSYour Name pool->status = FLOW_POOL_INVALID;
477*5113495bSYour Name dp_tx_flow_ctrl_reset_subqueues(soc, pool, pool_status);
478*5113495bSYour Name
479*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
480*5113495bSYour Name /* Reset TX desc associated to this Vdev as NULL */
481*5113495bSYour Name vdev = dp_vdev_get_ref_by_id(soc, pool->flow_pool_id,
482*5113495bSYour Name DP_MOD_ID_MISC);
483*5113495bSYour Name if (vdev) {
484*5113495bSYour Name dp_tx_desc_flush(vdev->pdev, vdev, false);
485*5113495bSYour Name dp_vdev_unref_delete(soc, vdev,
486*5113495bSYour Name DP_MOD_ID_MISC);
487*5113495bSYour Name }
488*5113495bSYour Name dp_err("avail desc less than pool size");
489*5113495bSYour Name return -EAGAIN;
490*5113495bSYour Name }
491*5113495bSYour Name
492*5113495bSYour Name /* We have all the descriptors for the pool, we can delete the pool */
493*5113495bSYour Name dp_tx_desc_pool_deinit(soc, pool->flow_pool_id, false);
494*5113495bSYour Name dp_tx_desc_pool_free(soc, pool->flow_pool_id, false);
495*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
496*5113495bSYour Name return 0;
497*5113495bSYour Name }
498*5113495bSYour Name
499*5113495bSYour Name /**
500*5113495bSYour Name * dp_tx_flow_pool_vdev_map() - Map flow_pool with vdev
501*5113495bSYour Name * @pdev: Handle to struct dp_pdev
502*5113495bSYour Name * @pool: flow_pool
503*5113495bSYour Name * @vdev_id: flow_id /vdev_id
504*5113495bSYour Name *
505*5113495bSYour Name * Return: none
506*5113495bSYour Name */
dp_tx_flow_pool_vdev_map(struct dp_pdev * pdev,struct dp_tx_desc_pool_s * pool,uint8_t vdev_id)507*5113495bSYour Name static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
508*5113495bSYour Name struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
509*5113495bSYour Name {
510*5113495bSYour Name struct dp_vdev *vdev;
511*5113495bSYour Name struct dp_soc *soc = pdev->soc;
512*5113495bSYour Name
513*5113495bSYour Name vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
514*5113495bSYour Name if (!vdev) {
515*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
516*5113495bSYour Name "%s: invalid vdev_id %d",
517*5113495bSYour Name __func__, vdev_id);
518*5113495bSYour Name return;
519*5113495bSYour Name }
520*5113495bSYour Name
521*5113495bSYour Name vdev->pool = pool;
522*5113495bSYour Name qdf_spin_lock_bh(&pool->flow_pool_lock);
523*5113495bSYour Name pool->pool_owner_ctx = soc;
524*5113495bSYour Name pool->flow_pool_id = vdev_id;
525*5113495bSYour Name qdf_spin_unlock_bh(&pool->flow_pool_lock);
526*5113495bSYour Name dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
527*5113495bSYour Name }
528*5113495bSYour Name
529*5113495bSYour Name /**
530*5113495bSYour Name * dp_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
531*5113495bSYour Name * @pdev: Handle to struct dp_pdev
532*5113495bSYour Name * @pool: flow_pool
533*5113495bSYour Name * @vdev_id: flow_id /vdev_id
534*5113495bSYour Name *
535*5113495bSYour Name * Return: none
536*5113495bSYour Name */
dp_tx_flow_pool_vdev_unmap(struct dp_pdev * pdev,struct dp_tx_desc_pool_s * pool,uint8_t vdev_id)537*5113495bSYour Name static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
538*5113495bSYour Name struct dp_tx_desc_pool_s *pool, uint8_t vdev_id)
539*5113495bSYour Name {
540*5113495bSYour Name struct dp_vdev *vdev;
541*5113495bSYour Name struct dp_soc *soc = pdev->soc;
542*5113495bSYour Name
543*5113495bSYour Name vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
544*5113495bSYour Name if (!vdev) {
545*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
546*5113495bSYour Name "%s: invalid vdev_id %d",
547*5113495bSYour Name __func__, vdev_id);
548*5113495bSYour Name return;
549*5113495bSYour Name }
550*5113495bSYour Name
551*5113495bSYour Name vdev->pool = NULL;
552*5113495bSYour Name dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
553*5113495bSYour Name }
554*5113495bSYour Name
555*5113495bSYour Name /**
556*5113495bSYour Name * dp_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
557*5113495bSYour Name * @pdev: Handle to struct dp_pdev
558*5113495bSYour Name * @flow_id: flow id
559*5113495bSYour Name * @flow_type: flow type
560*5113495bSYour Name * @flow_pool_id: pool id
561*5113495bSYour Name * @flow_pool_size: pool size
562*5113495bSYour Name *
563*5113495bSYour Name * Process below target to host message
564*5113495bSYour Name * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
565*5113495bSYour Name *
566*5113495bSYour Name * Return: none
567*5113495bSYour Name */
dp_tx_flow_pool_map_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint32_t flow_pool_size)568*5113495bSYour Name QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
569*5113495bSYour Name uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size)
570*5113495bSYour Name {
571*5113495bSYour Name struct dp_soc *soc = pdev->soc;
572*5113495bSYour Name struct dp_tx_desc_pool_s *pool;
573*5113495bSYour Name enum htt_flow_type type = flow_type;
574*5113495bSYour Name
575*5113495bSYour Name
576*5113495bSYour Name dp_info("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
577*5113495bSYour Name flow_id, flow_type, flow_pool_id, flow_pool_size);
578*5113495bSYour Name
579*5113495bSYour Name if (qdf_unlikely(!soc)) {
580*5113495bSYour Name dp_err("soc is NULL");
581*5113495bSYour Name return QDF_STATUS_E_FAULT;
582*5113495bSYour Name }
583*5113495bSYour Name soc->pool_stats.pool_map_count++;
584*5113495bSYour Name
585*5113495bSYour Name pool = dp_tx_create_flow_pool(soc, flow_pool_id,
586*5113495bSYour Name flow_pool_size);
587*5113495bSYour Name if (!pool) {
588*5113495bSYour Name dp_err("creation of flow_pool %d size %d failed",
589*5113495bSYour Name flow_pool_id, flow_pool_size);
590*5113495bSYour Name return QDF_STATUS_E_RESOURCES;
591*5113495bSYour Name }
592*5113495bSYour Name
593*5113495bSYour Name switch (type) {
594*5113495bSYour Name
595*5113495bSYour Name case FLOW_TYPE_VDEV:
596*5113495bSYour Name dp_tx_flow_pool_vdev_map(pdev, pool, flow_id);
597*5113495bSYour Name break;
598*5113495bSYour Name default:
599*5113495bSYour Name dp_err("flow type %d not supported", type);
600*5113495bSYour Name break;
601*5113495bSYour Name }
602*5113495bSYour Name
603*5113495bSYour Name return QDF_STATUS_SUCCESS;
604*5113495bSYour Name }
605*5113495bSYour Name
606*5113495bSYour Name /**
607*5113495bSYour Name * dp_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
608*5113495bSYour Name * @pdev: Handle to struct dp_pdev
609*5113495bSYour Name * @flow_id: flow id
610*5113495bSYour Name * @flow_type: flow type
611*5113495bSYour Name * @flow_pool_id: pool id
612*5113495bSYour Name *
613*5113495bSYour Name * Process below target to host message
614*5113495bSYour Name * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
615*5113495bSYour Name *
616*5113495bSYour Name * Return: none
617*5113495bSYour Name */
dp_tx_flow_pool_unmap_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)618*5113495bSYour Name void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
619*5113495bSYour Name uint8_t flow_type, uint8_t flow_pool_id)
620*5113495bSYour Name {
621*5113495bSYour Name struct dp_soc *soc = pdev->soc;
622*5113495bSYour Name struct dp_tx_desc_pool_s *pool;
623*5113495bSYour Name enum htt_flow_type type = flow_type;
624*5113495bSYour Name
625*5113495bSYour Name dp_info("flow_id %d flow_type %d flow_pool_id %d", flow_id, flow_type,
626*5113495bSYour Name flow_pool_id);
627*5113495bSYour Name
628*5113495bSYour Name if (qdf_unlikely(!pdev)) {
629*5113495bSYour Name dp_err("pdev is NULL");
630*5113495bSYour Name return;
631*5113495bSYour Name }
632*5113495bSYour Name soc->pool_stats.pool_unmap_count++;
633*5113495bSYour Name
634*5113495bSYour Name pool = &soc->tx_desc[flow_pool_id];
635*5113495bSYour Name dp_info("pool status: %d", pool->status);
636*5113495bSYour Name
637*5113495bSYour Name if (pool->status == FLOW_POOL_INACTIVE) {
638*5113495bSYour Name dp_err("flow pool id: %d is inactive, ignore unmap",
639*5113495bSYour Name flow_pool_id);
640*5113495bSYour Name return;
641*5113495bSYour Name }
642*5113495bSYour Name
643*5113495bSYour Name switch (type) {
644*5113495bSYour Name
645*5113495bSYour Name case FLOW_TYPE_VDEV:
646*5113495bSYour Name dp_tx_flow_pool_vdev_unmap(pdev, pool, flow_id);
647*5113495bSYour Name break;
648*5113495bSYour Name default:
649*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
650*5113495bSYour Name "%s: flow type %d not supported !!!",
651*5113495bSYour Name __func__, type);
652*5113495bSYour Name return;
653*5113495bSYour Name }
654*5113495bSYour Name
655*5113495bSYour Name /* only delete if all descriptors are available */
656*5113495bSYour Name dp_tx_delete_flow_pool(soc, pool, false);
657*5113495bSYour Name }
658*5113495bSYour Name
659*5113495bSYour Name /**
660*5113495bSYour Name * dp_tx_flow_control_init() - Initialize tx flow control
661*5113495bSYour Name * @soc: Handle to struct dp_soc
662*5113495bSYour Name *
663*5113495bSYour Name * Return: none
664*5113495bSYour Name */
dp_tx_flow_control_init(struct dp_soc * soc)665*5113495bSYour Name void dp_tx_flow_control_init(struct dp_soc *soc)
666*5113495bSYour Name {
667*5113495bSYour Name qdf_spinlock_create(&soc->flow_pool_array_lock);
668*5113495bSYour Name }
669*5113495bSYour Name
670*5113495bSYour Name /**
671*5113495bSYour Name * dp_tx_desc_pool_dealloc() - De-allocate tx desc pool
672*5113495bSYour Name * @soc: Handle to struct dp_soc
673*5113495bSYour Name *
674*5113495bSYour Name * Return: none
675*5113495bSYour Name */
dp_tx_desc_pool_dealloc(struct dp_soc * soc)676*5113495bSYour Name static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
677*5113495bSYour Name {
678*5113495bSYour Name struct dp_tx_desc_pool_s *tx_desc_pool;
679*5113495bSYour Name int i;
680*5113495bSYour Name
681*5113495bSYour Name for (i = 0; i < MAX_TXDESC_POOLS; i++) {
682*5113495bSYour Name tx_desc_pool = &((soc)->tx_desc[i]);
683*5113495bSYour Name if (!tx_desc_pool->desc_pages.num_pages)
684*5113495bSYour Name continue;
685*5113495bSYour Name
686*5113495bSYour Name dp_tx_desc_pool_deinit(soc, i, false);
687*5113495bSYour Name dp_tx_desc_pool_free(soc, i, false);
688*5113495bSYour Name }
689*5113495bSYour Name }
690*5113495bSYour Name
691*5113495bSYour Name /**
692*5113495bSYour Name * dp_tx_flow_control_deinit() - Deregister fw based tx flow control
693*5113495bSYour Name * @soc: Handle to struct dp_soc
694*5113495bSYour Name *
695*5113495bSYour Name * Return: none
696*5113495bSYour Name */
dp_tx_flow_control_deinit(struct dp_soc * soc)697*5113495bSYour Name void dp_tx_flow_control_deinit(struct dp_soc *soc)
698*5113495bSYour Name {
699*5113495bSYour Name dp_tx_desc_pool_dealloc(soc);
700*5113495bSYour Name
701*5113495bSYour Name qdf_spinlock_destroy(&soc->flow_pool_array_lock);
702*5113495bSYour Name }
703*5113495bSYour Name
704*5113495bSYour Name /**
705*5113495bSYour Name * dp_txrx_register_pause_cb() - Register pause callback
706*5113495bSYour Name * @handle: Handle to struct dp_soc
707*5113495bSYour Name * @pause_cb: Tx pause_cb
708*5113495bSYour Name *
709*5113495bSYour Name * Return: none
710*5113495bSYour Name */
dp_txrx_register_pause_cb(struct cdp_soc_t * handle,tx_pause_callback pause_cb)711*5113495bSYour Name QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *handle,
712*5113495bSYour Name tx_pause_callback pause_cb)
713*5113495bSYour Name {
714*5113495bSYour Name struct dp_soc *soc = (struct dp_soc *)handle;
715*5113495bSYour Name
716*5113495bSYour Name if (!soc || !pause_cb) {
717*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
718*5113495bSYour Name FL("soc or pause_cb is NULL"));
719*5113495bSYour Name return QDF_STATUS_E_INVAL;
720*5113495bSYour Name }
721*5113495bSYour Name soc->pause_cb = pause_cb;
722*5113495bSYour Name
723*5113495bSYour Name return QDF_STATUS_SUCCESS;
724*5113495bSYour Name }
725*5113495bSYour Name
dp_tx_flow_pool_map(struct cdp_soc_t * handle,uint8_t pdev_id,uint8_t vdev_id)726*5113495bSYour Name QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *handle, uint8_t pdev_id,
727*5113495bSYour Name uint8_t vdev_id)
728*5113495bSYour Name {
729*5113495bSYour Name struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
730*5113495bSYour Name struct dp_pdev *pdev =
731*5113495bSYour Name dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
732*5113495bSYour Name int tx_ring_size = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
733*5113495bSYour Name
734*5113495bSYour Name if (!pdev) {
735*5113495bSYour Name dp_err("pdev is NULL");
736*5113495bSYour Name return QDF_STATUS_E_INVAL;
737*5113495bSYour Name }
738*5113495bSYour Name
739*5113495bSYour Name return dp_tx_flow_pool_map_handler(pdev, vdev_id, FLOW_TYPE_VDEV,
740*5113495bSYour Name vdev_id, tx_ring_size);
741*5113495bSYour Name }
742*5113495bSYour Name
dp_tx_flow_pool_unmap(struct cdp_soc_t * handle,uint8_t pdev_id,uint8_t vdev_id)743*5113495bSYour Name void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
744*5113495bSYour Name uint8_t vdev_id)
745*5113495bSYour Name {
746*5113495bSYour Name struct dp_soc *soc = cdp_soc_t_to_dp_soc(handle);
747*5113495bSYour Name struct dp_pdev *pdev =
748*5113495bSYour Name dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
749*5113495bSYour Name
750*5113495bSYour Name if (!pdev) {
751*5113495bSYour Name dp_err("pdev is NULL");
752*5113495bSYour Name return;
753*5113495bSYour Name }
754*5113495bSYour Name
755*5113495bSYour Name return dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
756*5113495bSYour Name FLOW_TYPE_VDEV, vdev_id);
757*5113495bSYour Name }
758