xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx_desc.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #ifndef DP_TX_DESC_H
21*5113495bSYour Name #define DP_TX_DESC_H
22*5113495bSYour Name 
23*5113495bSYour Name #include "dp_types.h"
24*5113495bSYour Name #include "dp_tx.h"
25*5113495bSYour Name #include "dp_internal.h"
26*5113495bSYour Name 
27*5113495bSYour Name /*
28*5113495bSYour Name  * 21 bits cookie
29*5113495bSYour Name  * 1 bit special pool indicator
30*5113495bSYour Name  * 3 bits unused
31*5113495bSYour Name  * 2 bits pool id 0 ~ 3,
32*5113495bSYour Name  * 10 bits page id 0 ~ 1023
33*5113495bSYour Name  * 5 bits offset id 0 ~ 31 (Desc size = 128, Num descs per page = 4096/128 = 32)
34*5113495bSYour Name  */
35*5113495bSYour Name /* ???Ring ID needed??? */
36*5113495bSYour Name 
37*5113495bSYour Name /* TODO: Need to revisit this change for Rhine */
38*5113495bSYour Name #ifdef WLAN_SOFTUMAC_SUPPORT
39*5113495bSYour Name #define DP_TX_DESC_ID_SPCL_MASK    0x100000
40*5113495bSYour Name #define DP_TX_DESC_ID_SPCL_OS      20
41*5113495bSYour Name #define DP_TX_DESC_ID_POOL_MASK    0x018000
42*5113495bSYour Name #define DP_TX_DESC_ID_POOL_OS      15
43*5113495bSYour Name #define DP_TX_DESC_ID_PAGE_MASK    0x007FF0
44*5113495bSYour Name #define DP_TX_DESC_ID_PAGE_OS      4
45*5113495bSYour Name #define DP_TX_DESC_ID_OFFSET_MASK  0x00000F
46*5113495bSYour Name #define DP_TX_DESC_ID_OFFSET_OS    0
47*5113495bSYour Name #else
48*5113495bSYour Name #define DP_TX_DESC_ID_SPCL_MASK    0x100000
49*5113495bSYour Name #define DP_TX_DESC_ID_SPCL_OS      20
50*5113495bSYour Name #define DP_TX_DESC_ID_POOL_MASK    0x018000
51*5113495bSYour Name #define DP_TX_DESC_ID_POOL_OS      15
52*5113495bSYour Name #define DP_TX_DESC_ID_PAGE_MASK    0x007FE0
53*5113495bSYour Name #define DP_TX_DESC_ID_PAGE_OS      5
54*5113495bSYour Name #define DP_TX_DESC_ID_OFFSET_MASK  0x00001F
55*5113495bSYour Name #define DP_TX_DESC_ID_OFFSET_OS    0
56*5113495bSYour Name #endif /* WLAN_SOFTUMAC_SUPPORT */
57*5113495bSYour Name 
58*5113495bSYour Name /*
59*5113495bSYour Name  * Compilation assert on tx desc size
60*5113495bSYour Name  *
61*5113495bSYour Name  * if assert is hit please update POOL_MASK,
62*5113495bSYour Name  * PAGE_MASK according to updated size
63*5113495bSYour Name  *
64*5113495bSYour Name  * for current PAGE mask allowed size range of tx_desc
65*5113495bSYour Name  * is between 128 and 256
66*5113495bSYour Name  */
67*5113495bSYour Name QDF_COMPILE_TIME_ASSERT(dp_tx_desc_size,
68*5113495bSYour Name 			((sizeof(struct dp_tx_desc_s)) <=
69*5113495bSYour Name 			 (DP_BLOCKMEM_SIZE >> DP_TX_DESC_ID_PAGE_OS)) &&
70*5113495bSYour Name 			((sizeof(struct dp_tx_desc_s)) >
71*5113495bSYour Name 			 (DP_BLOCKMEM_SIZE >> (DP_TX_DESC_ID_PAGE_OS + 1)))
72*5113495bSYour Name 		       );
73*5113495bSYour Name 
74*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
75*5113495bSYour Name #define TX_DESC_LOCK_CREATE(lock)
76*5113495bSYour Name #define TX_DESC_LOCK_DESTROY(lock)
77*5113495bSYour Name #define TX_DESC_LOCK_LOCK(lock)
78*5113495bSYour Name #define TX_DESC_LOCK_UNLOCK(lock)
79*5113495bSYour Name #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) \
80*5113495bSYour Name 	((pool)->status == FLOW_POOL_INACTIVE)
81*5113495bSYour Name #ifdef QCA_AC_BASED_FLOW_CONTROL
82*5113495bSYour Name #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
83*5113495bSYour Name 	dp_tx_flow_pool_member_clean(_tx_desc_pool)
84*5113495bSYour Name 
85*5113495bSYour Name #else /* !QCA_AC_BASED_FLOW_CONTROL */
86*5113495bSYour Name #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
87*5113495bSYour Name do {                                                   \
88*5113495bSYour Name 	(_tx_desc_pool)->elem_size = 0;                \
89*5113495bSYour Name 	(_tx_desc_pool)->freelist = NULL;              \
90*5113495bSYour Name 	(_tx_desc_pool)->pool_size = 0;                \
91*5113495bSYour Name 	(_tx_desc_pool)->avail_desc = 0;               \
92*5113495bSYour Name 	(_tx_desc_pool)->start_th = 0;                 \
93*5113495bSYour Name 	(_tx_desc_pool)->stop_th = 0;                  \
94*5113495bSYour Name 	(_tx_desc_pool)->status = FLOW_POOL_INACTIVE;  \
95*5113495bSYour Name } while (0)
96*5113495bSYour Name #endif /* QCA_AC_BASED_FLOW_CONTROL */
97*5113495bSYour Name #else /* !QCA_LL_TX_FLOW_CONTROL_V2 */
98*5113495bSYour Name #define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
99*5113495bSYour Name #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
100*5113495bSYour Name #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock_bh(lock)
101*5113495bSYour Name #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock_bh(lock)
102*5113495bSYour Name #define IS_TX_DESC_POOL_STATUS_INACTIVE(pool) (false)
103*5113495bSYour Name #define TX_DESC_POOL_MEMBER_CLEAN(_tx_desc_pool)       \
104*5113495bSYour Name do {                                                   \
105*5113495bSYour Name 	(_tx_desc_pool)->elem_size = 0;                \
106*5113495bSYour Name 	(_tx_desc_pool)->num_allocated = 0;            \
107*5113495bSYour Name 	(_tx_desc_pool)->freelist = NULL;              \
108*5113495bSYour Name 	(_tx_desc_pool)->elem_count = 0;               \
109*5113495bSYour Name 	(_tx_desc_pool)->num_free = 0;                 \
110*5113495bSYour Name } while (0)
111*5113495bSYour Name #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
112*5113495bSYour Name #define MAX_POOL_BUFF_COUNT 10000
113*5113495bSYour Name 
114*5113495bSYour Name #ifdef DP_TX_TRACKING
dp_tx_desc_set_magic(struct dp_tx_desc_s * tx_desc,uint32_t magic_pattern)115*5113495bSYour Name static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
116*5113495bSYour Name 					uint32_t magic_pattern)
117*5113495bSYour Name {
118*5113495bSYour Name 	tx_desc->magic = magic_pattern;
119*5113495bSYour Name }
120*5113495bSYour Name #else
dp_tx_desc_set_magic(struct dp_tx_desc_s * tx_desc,uint32_t magic_pattern)121*5113495bSYour Name static inline void dp_tx_desc_set_magic(struct dp_tx_desc_s *tx_desc,
122*5113495bSYour Name 					uint32_t magic_pattern)
123*5113495bSYour Name {
124*5113495bSYour Name }
125*5113495bSYour Name #endif
126*5113495bSYour Name 
127*5113495bSYour Name /**
128*5113495bSYour Name  * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
129*5113495bSYour Name  * @soc: Handle to DP SoC structure
130*5113495bSYour Name  * @pool_id: pool to allocate
131*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
132*5113495bSYour Name  * @spcl_tx_desc: if special desc
133*5113495bSYour Name  *
134*5113495bSYour Name  * This function allocates memory for SW tx descriptors
135*5113495bSYour Name  * (used within host for tx data path).
136*5113495bSYour Name  * The number of tx descriptors required will be large
137*5113495bSYour Name  * since based on number of clients (1024 clients x 3 radios),
138*5113495bSYour Name  * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
139*5113495bSYour Name  * large.
140*5113495bSYour Name  *
141*5113495bSYour Name  * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
142*5113495bSYour Name  * function to allocate memory
143*5113495bSYour Name  * in multiple pages. It then iterates through the memory allocated across pages
144*5113495bSYour Name  * and links each descriptor
145*5113495bSYour Name  * to next descriptor, taking care of page boundaries.
146*5113495bSYour Name  *
147*5113495bSYour Name  * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
148*5113495bSYour Name  * one for each ring;
149*5113495bSYour Name  * This minimizes lock contention when hard_start_xmit is called
150*5113495bSYour Name  * from multiple CPUs.
151*5113495bSYour Name  * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
152*5113495bSYour Name  * flow control.
153*5113495bSYour Name  *
154*5113495bSYour Name  * Return: Status code. 0 for success.
155*5113495bSYour Name  */
156*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
157*5113495bSYour Name 				 uint32_t num_elem, bool spcl_tx_desc);
158*5113495bSYour Name 
159*5113495bSYour Name /**
160*5113495bSYour Name  * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
161*5113495bSYour Name  * @soc: Handle to DP SoC structure
162*5113495bSYour Name  * @pool_id: pool to allocate
163*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
164*5113495bSYour Name  * @spcl_tx_desc: if special desc
165*5113495bSYour Name  *
166*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
167*5113495bSYour Name  *	   QDF_STATUS_E_FAULT
168*5113495bSYour Name  */
169*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
170*5113495bSYour Name 				uint32_t num_elem, bool spcl_tx_desc);
171*5113495bSYour Name 
172*5113495bSYour Name /**
173*5113495bSYour Name  * dp_tx_desc_pool_free() -  Free the tx dexcriptor pools
174*5113495bSYour Name  * @soc: Handle to DP SoC structure
175*5113495bSYour Name  * @pool_id: pool to free
176*5113495bSYour Name  * @spcl_tx_desc: if special desc
177*5113495bSYour Name  *
178*5113495bSYour Name  */
179*5113495bSYour Name void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
180*5113495bSYour Name 			  bool spcl_tx_desc);
181*5113495bSYour Name 
182*5113495bSYour Name /**
183*5113495bSYour Name  * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
184*5113495bSYour Name  * @soc: Handle to DP SoC structure
185*5113495bSYour Name  * @pool_id: pool to de-initialize
186*5113495bSYour Name  * @spcl_tx_desc: if special desc
187*5113495bSYour Name  *
188*5113495bSYour Name  */
189*5113495bSYour Name void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
190*5113495bSYour Name 			    bool spcl_tx_desc);
191*5113495bSYour Name 
192*5113495bSYour Name /**
193*5113495bSYour Name  * dp_tx_ext_desc_pool_alloc_by_id() - allocate TX extension Descriptor pool
194*5113495bSYour Name  *                                     based on pool ID
195*5113495bSYour Name  * @soc: Handle to DP SoC structure
196*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
197*5113495bSYour Name  * @pool_id: Pool ID
198*5113495bSYour Name  *
199*5113495bSYour Name  * Return - QDF_STATUS_SUCCESS
200*5113495bSYour Name  *	    QDF_STATUS_E_NOMEM
201*5113495bSYour Name  */
202*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc,
203*5113495bSYour Name 					   uint32_t num_elem,
204*5113495bSYour Name 					   uint8_t pool_id);
205*5113495bSYour Name /**
206*5113495bSYour Name  * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
207*5113495bSYour Name  * @soc: Handle to DP SoC structure
208*5113495bSYour Name  * @num_pool: Number of pools to allocate
209*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
210*5113495bSYour Name  *
211*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
212*5113495bSYour Name  *	   QDF_STATUS_E_NOMEM
213*5113495bSYour Name  */
214*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
215*5113495bSYour Name 				     uint32_t num_elem);
216*5113495bSYour Name 
217*5113495bSYour Name /**
218*5113495bSYour Name  * dp_tx_ext_desc_pool_init_by_id() - initialize Tx extension Descriptor pool
219*5113495bSYour Name  *                                    based on pool ID
220*5113495bSYour Name  * @soc: Handle to DP SoC structure
221*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
222*5113495bSYour Name  * @pool_id: Pool ID
223*5113495bSYour Name  *
224*5113495bSYour Name  * Return - QDF_STATUS_SUCCESS
225*5113495bSYour Name  *	    QDF_STATUS_E_FAULT
226*5113495bSYour Name  */
227*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
228*5113495bSYour Name 					  uint8_t pool_id);
229*5113495bSYour Name 
230*5113495bSYour Name /**
231*5113495bSYour Name  * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
232*5113495bSYour Name  * @soc: Handle to DP SoC structure
233*5113495bSYour Name  * @num_pool: Number of pools to initialize
234*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
235*5113495bSYour Name  *
236*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
237*5113495bSYour Name  *	   QDF_STATUS_E_NOMEM
238*5113495bSYour Name  */
239*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
240*5113495bSYour Name 				    uint32_t num_elem);
241*5113495bSYour Name 
242*5113495bSYour Name /**
243*5113495bSYour Name  * dp_tx_ext_desc_pool_free_by_id() - free TX extension Descriptor pool
244*5113495bSYour Name  *                                    based on pool ID
245*5113495bSYour Name  * @soc: Handle to DP SoC structure
246*5113495bSYour Name  * @pool_id: Pool ID
247*5113495bSYour Name  *
248*5113495bSYour Name  */
249*5113495bSYour Name void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
250*5113495bSYour Name 
251*5113495bSYour Name /**
252*5113495bSYour Name  * dp_tx_ext_desc_pool_free() -  free Tx extension Descriptor pool(s)
253*5113495bSYour Name  * @soc: Handle to DP SoC structure
254*5113495bSYour Name  * @num_pool: Number of pools to free
255*5113495bSYour Name  *
256*5113495bSYour Name  */
257*5113495bSYour Name void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
258*5113495bSYour Name 
259*5113495bSYour Name /**
260*5113495bSYour Name  * dp_tx_ext_desc_pool_deinit_by_id() - deinit Tx extension Descriptor pool
261*5113495bSYour Name  *                                      based on pool ID
262*5113495bSYour Name  * @soc: Handle to DP SoC structure
263*5113495bSYour Name  * @pool_id: Pool ID
264*5113495bSYour Name  *
265*5113495bSYour Name  */
266*5113495bSYour Name void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
267*5113495bSYour Name 
268*5113495bSYour Name /**
269*5113495bSYour Name  * dp_tx_ext_desc_pool_deinit() -  deinit Tx extension Descriptor pool(s)
270*5113495bSYour Name  * @soc: Handle to DP SoC structure
271*5113495bSYour Name  * @num_pool: Number of pools to de-initialize
272*5113495bSYour Name  *
273*5113495bSYour Name  */
274*5113495bSYour Name void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
275*5113495bSYour Name 
276*5113495bSYour Name /**
277*5113495bSYour Name  * dp_tx_tso_desc_pool_alloc_by_id() - allocate TSO Descriptor pool based
278*5113495bSYour Name  *                                     on pool ID
279*5113495bSYour Name  * @soc: Handle to DP SoC structure
280*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
281*5113495bSYour Name  * @pool_id: Pool ID
282*5113495bSYour Name  *
283*5113495bSYour Name  * Return - QDF_STATUS_SUCCESS
284*5113495bSYour Name  *	    QDF_STATUS_E_NOMEM
285*5113495bSYour Name  */
286*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
287*5113495bSYour Name 					   uint8_t pool_id);
288*5113495bSYour Name 
289*5113495bSYour Name /**
290*5113495bSYour Name  * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
291*5113495bSYour Name  * @soc: Handle to DP SoC structure
292*5113495bSYour Name  * @num_pool: Number of pools to allocate
293*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
294*5113495bSYour Name  *
295*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
296*5113495bSYour Name  *	   QDF_STATUS_E_NOMEM
297*5113495bSYour Name  */
298*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
299*5113495bSYour Name 				     uint32_t num_elem);
300*5113495bSYour Name 
301*5113495bSYour Name /**
302*5113495bSYour Name  * dp_tx_tso_desc_pool_init_by_id() - initialize TSO Descriptor pool
303*5113495bSYour Name  *                                    based on pool ID
304*5113495bSYour Name  * @soc: Handle to DP SoC structure
305*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
306*5113495bSYour Name  * @pool_id: Pool ID
307*5113495bSYour Name  *
308*5113495bSYour Name  * Return - QDF_STATUS_SUCCESS
309*5113495bSYour Name  *	    QDF_STATUS_E_NOMEM
310*5113495bSYour Name  */
311*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
312*5113495bSYour Name 					  uint8_t pool_id);
313*5113495bSYour Name 
314*5113495bSYour Name /**
315*5113495bSYour Name  * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
316*5113495bSYour Name  * @soc: Handle to DP SoC structure
317*5113495bSYour Name  * @num_pool: Number of pools to initialize
318*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
319*5113495bSYour Name  *
320*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
321*5113495bSYour Name  *	   QDF_STATUS_E_NOMEM
322*5113495bSYour Name  */
323*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
324*5113495bSYour Name 				    uint32_t num_elem);
325*5113495bSYour Name 
326*5113495bSYour Name /**
327*5113495bSYour Name  * dp_tx_tso_desc_pool_free_by_id() - free TSO Descriptor pool based on pool ID
328*5113495bSYour Name  * @soc: Handle to DP SoC structure
329*5113495bSYour Name  * @pool_id: Pool ID
330*5113495bSYour Name  */
331*5113495bSYour Name void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
332*5113495bSYour Name 
333*5113495bSYour Name /**
334*5113495bSYour Name  * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
335*5113495bSYour Name  * @soc: Handle to DP SoC structure
336*5113495bSYour Name  * @num_pool: Number of pools to free
337*5113495bSYour Name  *
338*5113495bSYour Name  */
339*5113495bSYour Name void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
340*5113495bSYour Name 
341*5113495bSYour Name /**
342*5113495bSYour Name  * dp_tx_tso_desc_pool_deinit_by_id() - deinitialize TSO Descriptor pool
343*5113495bSYour Name  *                                      based on pool ID
344*5113495bSYour Name  * @soc: Handle to DP SoC structure
345*5113495bSYour Name  * @pool_id: Pool ID
346*5113495bSYour Name  */
347*5113495bSYour Name void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
348*5113495bSYour Name 
349*5113495bSYour Name /**
350*5113495bSYour Name  * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
351*5113495bSYour Name  * @soc: Handle to DP SoC structure
352*5113495bSYour Name  * @num_pool: Number of pools to free
353*5113495bSYour Name  *
354*5113495bSYour Name  */
355*5113495bSYour Name void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
356*5113495bSYour Name 
357*5113495bSYour Name /**
358*5113495bSYour Name  * dp_tx_tso_num_seg_pool_alloc_by_id() - Allocate descriptors that tracks the
359*5113495bSYour Name  *                             fragments in each tso segment based on pool ID
360*5113495bSYour Name  * @soc: handle to dp soc structure
361*5113495bSYour Name  * @num_elem: total number of descriptors to be allocated
362*5113495bSYour Name  * @pool_id: Pool ID
363*5113495bSYour Name  *
364*5113495bSYour Name  * Return - QDF_STATUS_SUCCESS
365*5113495bSYour Name  *	    QDF_STATUS_E_NOMEM
366*5113495bSYour Name  */
367*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
368*5113495bSYour Name 					      uint32_t num_elem,
369*5113495bSYour Name 					      uint8_t pool_id);
370*5113495bSYour Name 
371*5113495bSYour Name /**
372*5113495bSYour Name  * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
373*5113495bSYour Name  *                              fragments in each tso segment
374*5113495bSYour Name  *
375*5113495bSYour Name  * @soc: handle to dp soc structure
376*5113495bSYour Name  * @num_pool: number of pools to allocate
377*5113495bSYour Name  * @num_elem: total number of descriptors to be allocated
378*5113495bSYour Name  *
379*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
380*5113495bSYour Name  *	   QDF_STATUS_E_NOMEM
381*5113495bSYour Name  */
382*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
383*5113495bSYour Name 					uint32_t num_elem);
384*5113495bSYour Name 
385*5113495bSYour Name /**
386*5113495bSYour Name  * dp_tx_tso_num_seg_pool_init_by_id() - Initialize descriptors that tracks the
387*5113495bSYour Name  *                              fragments in each tso segment based on pool ID
388*5113495bSYour Name  *
389*5113495bSYour Name  * @soc: handle to dp soc structure
390*5113495bSYour Name  * @num_elem: total number of descriptors to be initialized
391*5113495bSYour Name  * @pool_id: Pool ID
392*5113495bSYour Name  *
393*5113495bSYour Name  * Return - QDF_STATUS_SUCCESS
394*5113495bSYour Name  *	    QDF_STATUS_E_FAULT
395*5113495bSYour Name  */
396*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc,
397*5113495bSYour Name 					     uint32_t num_elem,
398*5113495bSYour Name 					     uint8_t pool_id);
399*5113495bSYour Name 
400*5113495bSYour Name /**
401*5113495bSYour Name  * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
402*5113495bSYour Name  *                              fragments in each tso segment
403*5113495bSYour Name  *
404*5113495bSYour Name  * @soc: handle to dp soc structure
405*5113495bSYour Name  * @num_pool: number of pools to initialize
406*5113495bSYour Name  * @num_elem: total number of descriptors to be initialized
407*5113495bSYour Name  *
408*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
409*5113495bSYour Name  *	   QDF_STATUS_E_FAULT
410*5113495bSYour Name  */
411*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
412*5113495bSYour Name 				       uint32_t num_elem);
413*5113495bSYour Name 
414*5113495bSYour Name /**
415*5113495bSYour Name  * dp_tx_tso_num_seg_pool_free_by_id() - free descriptors that tracks the
416*5113495bSYour Name  *                              fragments in each tso segment based on pool ID
417*5113495bSYour Name  *
418*5113495bSYour Name  * @soc: handle to dp soc structure
419*5113495bSYour Name  * @pool_id: Pool ID
420*5113495bSYour Name  */
421*5113495bSYour Name void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id);
422*5113495bSYour Name 
423*5113495bSYour Name /**
424*5113495bSYour Name  * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
425*5113495bSYour Name  *                              fragments in each tso segment
426*5113495bSYour Name  *
427*5113495bSYour Name  * @soc: handle to dp soc structure
428*5113495bSYour Name  * @num_pool: number of pools to free
429*5113495bSYour Name  */
430*5113495bSYour Name void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool);
431*5113495bSYour Name 
432*5113495bSYour Name /**
433*5113495bSYour Name  * dp_tx_tso_num_seg_pool_deinit_by_id() - de-initialize descriptors that tracks
434*5113495bSYour Name  *                           the fragments in each tso segment based on pool ID
435*5113495bSYour Name  * @soc: handle to dp soc structure
436*5113495bSYour Name  * @pool_id: Pool ID
437*5113495bSYour Name  */
438*5113495bSYour Name void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id);
439*5113495bSYour Name 
440*5113495bSYour Name /**
441*5113495bSYour Name  * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
442*5113495bSYour Name  *                              fragments in each tso segment
443*5113495bSYour Name  *
444*5113495bSYour Name  * @soc: handle to dp soc structure
445*5113495bSYour Name  * @num_pool: number of pools to de-initialize
446*5113495bSYour Name  *
447*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
448*5113495bSYour Name  *	   QDF_STATUS_E_FAULT
449*5113495bSYour Name  */
450*5113495bSYour Name void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
451*5113495bSYour Name 
452*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
453*5113495bSYour Name /**
454*5113495bSYour Name  * dp_tx_desc_pool_cleanup() -  Clean up the tx dexcriptor pools
455*5113495bSYour Name  * @soc: Handle to DP SoC structure
456*5113495bSYour Name  * @nbuf_list: nbuf list for delayed free
457*5113495bSYour Name  * @cleanup: cleanup the pool
458*5113495bSYour Name  *
459*5113495bSYour Name  */
460*5113495bSYour Name void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list,
461*5113495bSYour Name 			     bool cleanup);
462*5113495bSYour Name #endif
463*5113495bSYour Name 
464*5113495bSYour Name /**
465*5113495bSYour Name  * dp_tx_desc_clear() - Clear contents of tx desc
466*5113495bSYour Name  * @tx_desc: descriptor to free
467*5113495bSYour Name  *
468*5113495bSYour Name  * Return: none
469*5113495bSYour Name  */
470*5113495bSYour Name static inline void
dp_tx_desc_clear(struct dp_tx_desc_s * tx_desc)471*5113495bSYour Name dp_tx_desc_clear(struct dp_tx_desc_s *tx_desc)
472*5113495bSYour Name {
473*5113495bSYour Name 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
474*5113495bSYour Name 	tx_desc->nbuf = NULL;
475*5113495bSYour Name 	tx_desc->flags = 0;
476*5113495bSYour Name 	tx_desc->next = NULL;
477*5113495bSYour Name }
478*5113495bSYour Name 
479*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
480*5113495bSYour Name void dp_tx_flow_control_init(struct dp_soc *);
481*5113495bSYour Name void dp_tx_flow_control_deinit(struct dp_soc *);
482*5113495bSYour Name 
483*5113495bSYour Name QDF_STATUS dp_txrx_register_pause_cb(struct cdp_soc_t *soc,
484*5113495bSYour Name 	tx_pause_callback pause_cb);
485*5113495bSYour Name QDF_STATUS dp_tx_flow_pool_map(struct cdp_soc_t *soc, uint8_t pdev_id,
486*5113495bSYour Name 			       uint8_t vdev_id);
487*5113495bSYour Name void dp_tx_flow_pool_unmap(struct cdp_soc_t *handle, uint8_t pdev_id,
488*5113495bSYour Name 			   uint8_t vdev_id);
489*5113495bSYour Name void dp_tx_clear_flow_pool_stats(struct dp_soc *soc);
490*5113495bSYour Name struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
491*5113495bSYour Name 	uint8_t flow_pool_id, uint32_t flow_pool_size);
492*5113495bSYour Name 
493*5113495bSYour Name QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
494*5113495bSYour Name 	uint8_t flow_type, uint8_t flow_pool_id, uint32_t flow_pool_size);
495*5113495bSYour Name void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
496*5113495bSYour Name 	uint8_t flow_type, uint8_t flow_pool_id);
497*5113495bSYour Name 
498*5113495bSYour Name /**
499*5113495bSYour Name  * dp_tx_get_desc_flow_pool() - get descriptor from flow pool
500*5113495bSYour Name  * @pool: flow pool
501*5113495bSYour Name  *
502*5113495bSYour Name  * Caller needs to take lock and do sanity checks.
503*5113495bSYour Name  *
504*5113495bSYour Name  * Return: tx descriptor
505*5113495bSYour Name  */
506*5113495bSYour Name static inline
dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s * pool)507*5113495bSYour Name struct dp_tx_desc_s *dp_tx_get_desc_flow_pool(struct dp_tx_desc_pool_s *pool)
508*5113495bSYour Name {
509*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = pool->freelist;
510*5113495bSYour Name 
511*5113495bSYour Name 	pool->freelist = pool->freelist->next;
512*5113495bSYour Name 	pool->avail_desc--;
513*5113495bSYour Name 	return tx_desc;
514*5113495bSYour Name }
515*5113495bSYour Name 
516*5113495bSYour Name /**
517*5113495bSYour Name  * dp_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
518*5113495bSYour Name  * @pool: flow pool
519*5113495bSYour Name  * @tx_desc: tx descriptor
520*5113495bSYour Name  *
521*5113495bSYour Name  * Caller needs to take lock and do sanity checks.
522*5113495bSYour Name  *
523*5113495bSYour Name  * Return: none
524*5113495bSYour Name  */
525*5113495bSYour Name static inline
dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s * pool,struct dp_tx_desc_s * tx_desc)526*5113495bSYour Name void dp_tx_put_desc_flow_pool(struct dp_tx_desc_pool_s *pool,
527*5113495bSYour Name 			struct dp_tx_desc_s *tx_desc)
528*5113495bSYour Name {
529*5113495bSYour Name 	tx_desc->next = pool->freelist;
530*5113495bSYour Name 	pool->freelist = tx_desc;
531*5113495bSYour Name 	pool->avail_desc++;
532*5113495bSYour Name }
533*5113495bSYour Name 
534*5113495bSYour Name static inline void
dp_tx_desc_free_list(struct dp_tx_desc_pool_s * pool,struct dp_tx_desc_s * head_desc,struct dp_tx_desc_s * tail_desc,uint32_t fast_desc_count)535*5113495bSYour Name dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
536*5113495bSYour Name 		     struct dp_tx_desc_s *head_desc,
537*5113495bSYour Name 		     struct dp_tx_desc_s *tail_desc,
538*5113495bSYour Name 		     uint32_t fast_desc_count)
539*5113495bSYour Name {
540*5113495bSYour Name }
541*5113495bSYour Name 
542*5113495bSYour Name #ifdef QCA_AC_BASED_FLOW_CONTROL
543*5113495bSYour Name 
544*5113495bSYour Name /**
545*5113495bSYour Name  * dp_tx_flow_pool_member_clean() - Clean the members of TX flow pool
546*5113495bSYour Name  * @pool: flow pool
547*5113495bSYour Name  *
548*5113495bSYour Name  * Return: None
549*5113495bSYour Name  */
550*5113495bSYour Name static inline void
dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s * pool)551*5113495bSYour Name dp_tx_flow_pool_member_clean(struct dp_tx_desc_pool_s *pool)
552*5113495bSYour Name {
553*5113495bSYour Name 	pool->elem_size = 0;
554*5113495bSYour Name 	pool->freelist = NULL;
555*5113495bSYour Name 	pool->pool_size = 0;
556*5113495bSYour Name 	pool->avail_desc = 0;
557*5113495bSYour Name 	qdf_mem_zero(pool->start_th, FL_TH_MAX);
558*5113495bSYour Name 	qdf_mem_zero(pool->stop_th, FL_TH_MAX);
559*5113495bSYour Name 	pool->status = FLOW_POOL_INACTIVE;
560*5113495bSYour Name }
561*5113495bSYour Name 
562*5113495bSYour Name /**
563*5113495bSYour Name  * dp_tx_is_threshold_reached() - Check if current avail desc meet threshold
564*5113495bSYour Name  * @pool: flow pool
565*5113495bSYour Name  * @avail_desc: available descriptor number
566*5113495bSYour Name  *
567*5113495bSYour Name  * Return: true if threshold is met, false if not
568*5113495bSYour Name  */
569*5113495bSYour Name static inline bool
dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s * pool,uint16_t avail_desc)570*5113495bSYour Name dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
571*5113495bSYour Name {
572*5113495bSYour Name 	if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_BE_BK]))
573*5113495bSYour Name 		return true;
574*5113495bSYour Name 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VI]))
575*5113495bSYour Name 		return true;
576*5113495bSYour Name 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_VO]))
577*5113495bSYour Name 		return true;
578*5113495bSYour Name 	else if (qdf_unlikely(avail_desc == pool->stop_th[DP_TH_HI]))
579*5113495bSYour Name 		return true;
580*5113495bSYour Name 	else
581*5113495bSYour Name 		return false;
582*5113495bSYour Name }
583*5113495bSYour Name 
584*5113495bSYour Name /**
585*5113495bSYour Name  * dp_tx_adjust_flow_pool_state() - Adjust flow pool state
586*5113495bSYour Name  * @soc: dp soc
587*5113495bSYour Name  * @pool: flow pool
588*5113495bSYour Name  */
589*5113495bSYour Name static inline void
dp_tx_adjust_flow_pool_state(struct dp_soc * soc,struct dp_tx_desc_pool_s * pool)590*5113495bSYour Name dp_tx_adjust_flow_pool_state(struct dp_soc *soc,
591*5113495bSYour Name 			     struct dp_tx_desc_pool_s *pool)
592*5113495bSYour Name {
593*5113495bSYour Name 	if (pool->avail_desc > pool->stop_th[DP_TH_BE_BK]) {
594*5113495bSYour Name 		pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
595*5113495bSYour Name 		return;
596*5113495bSYour Name 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_BE_BK] &&
597*5113495bSYour Name 		   pool->avail_desc > pool->stop_th[DP_TH_VI]) {
598*5113495bSYour Name 		pool->status = FLOW_POOL_BE_BK_PAUSED;
599*5113495bSYour Name 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VI] &&
600*5113495bSYour Name 		   pool->avail_desc > pool->stop_th[DP_TH_VO]) {
601*5113495bSYour Name 		pool->status = FLOW_POOL_VI_PAUSED;
602*5113495bSYour Name 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_VO] &&
603*5113495bSYour Name 		   pool->avail_desc > pool->stop_th[DP_TH_HI]) {
604*5113495bSYour Name 		pool->status = FLOW_POOL_VO_PAUSED;
605*5113495bSYour Name 	} else if (pool->avail_desc <= pool->stop_th[DP_TH_HI]) {
606*5113495bSYour Name 		pool->status = FLOW_POOL_ACTIVE_PAUSED;
607*5113495bSYour Name 	}
608*5113495bSYour Name 
609*5113495bSYour Name 	switch (pool->status) {
610*5113495bSYour Name 	case FLOW_POOL_ACTIVE_PAUSED:
611*5113495bSYour Name 		soc->pause_cb(pool->flow_pool_id,
612*5113495bSYour Name 			      WLAN_NETIF_PRIORITY_QUEUE_OFF,
613*5113495bSYour Name 			      WLAN_DATA_FLOW_CTRL_PRI);
614*5113495bSYour Name 		fallthrough;
615*5113495bSYour Name 
616*5113495bSYour Name 	case FLOW_POOL_VO_PAUSED:
617*5113495bSYour Name 		soc->pause_cb(pool->flow_pool_id,
618*5113495bSYour Name 			      WLAN_NETIF_VO_QUEUE_OFF,
619*5113495bSYour Name 			      WLAN_DATA_FLOW_CTRL_VO);
620*5113495bSYour Name 		fallthrough;
621*5113495bSYour Name 
622*5113495bSYour Name 	case FLOW_POOL_VI_PAUSED:
623*5113495bSYour Name 		soc->pause_cb(pool->flow_pool_id,
624*5113495bSYour Name 			      WLAN_NETIF_VI_QUEUE_OFF,
625*5113495bSYour Name 			      WLAN_DATA_FLOW_CTRL_VI);
626*5113495bSYour Name 		fallthrough;
627*5113495bSYour Name 
628*5113495bSYour Name 	case FLOW_POOL_BE_BK_PAUSED:
629*5113495bSYour Name 		soc->pause_cb(pool->flow_pool_id,
630*5113495bSYour Name 			      WLAN_NETIF_BE_BK_QUEUE_OFF,
631*5113495bSYour Name 			      WLAN_DATA_FLOW_CTRL_BE_BK);
632*5113495bSYour Name 		break;
633*5113495bSYour Name 	default:
634*5113495bSYour Name 		dp_err("Invalid pool status:%u to adjust", pool->status);
635*5113495bSYour Name 	}
636*5113495bSYour Name }
637*5113495bSYour Name 
638*5113495bSYour Name /**
639*5113495bSYour Name  * dp_tx_desc_alloc() - Allocate a Software Tx descriptor from given pool
640*5113495bSYour Name  * @soc: Handle to DP SoC structure
641*5113495bSYour Name  * @desc_pool_id: ID of the flow control fool
642*5113495bSYour Name  *
643*5113495bSYour Name  * Return: TX descriptor allocated or NULL
644*5113495bSYour Name  */
645*5113495bSYour Name static inline struct dp_tx_desc_s *
dp_tx_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)646*5113495bSYour Name dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
647*5113495bSYour Name {
648*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
649*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
650*5113495bSYour Name 	bool is_pause = false;
651*5113495bSYour Name 	enum netif_action_type act = WLAN_NETIF_ACTION_TYPE_NONE;
652*5113495bSYour Name 	enum dp_fl_ctrl_threshold level = DP_TH_BE_BK;
653*5113495bSYour Name 	enum netif_reason_type reason;
654*5113495bSYour Name 
655*5113495bSYour Name 	if (qdf_likely(pool)) {
656*5113495bSYour Name 		qdf_spin_lock_bh(&pool->flow_pool_lock);
657*5113495bSYour Name 		if (qdf_likely(pool->avail_desc &&
658*5113495bSYour Name 		    pool->status != FLOW_POOL_INVALID &&
659*5113495bSYour Name 		    pool->status != FLOW_POOL_INACTIVE)) {
660*5113495bSYour Name 			tx_desc = dp_tx_get_desc_flow_pool(pool);
661*5113495bSYour Name 			tx_desc->pool_id = desc_pool_id;
662*5113495bSYour Name 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
663*5113495bSYour Name 			dp_tx_desc_set_magic(tx_desc,
664*5113495bSYour Name 					     DP_TX_MAGIC_PATTERN_INUSE);
665*5113495bSYour Name 			is_pause = dp_tx_is_threshold_reached(pool,
666*5113495bSYour Name 							      pool->avail_desc);
667*5113495bSYour Name 
668*5113495bSYour Name 			if (qdf_unlikely(pool->status ==
669*5113495bSYour Name 					 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH)) {
670*5113495bSYour Name 				dp_tx_adjust_flow_pool_state(soc, pool);
671*5113495bSYour Name 				is_pause = false;
672*5113495bSYour Name 			}
673*5113495bSYour Name 
674*5113495bSYour Name 			if (qdf_unlikely(is_pause)) {
675*5113495bSYour Name 				switch (pool->status) {
676*5113495bSYour Name 				case FLOW_POOL_ACTIVE_UNPAUSED:
677*5113495bSYour Name 					/* pause network BE\BK queue */
678*5113495bSYour Name 					act = WLAN_NETIF_BE_BK_QUEUE_OFF;
679*5113495bSYour Name 					reason = WLAN_DATA_FLOW_CTRL_BE_BK;
680*5113495bSYour Name 					level = DP_TH_BE_BK;
681*5113495bSYour Name 					pool->status = FLOW_POOL_BE_BK_PAUSED;
682*5113495bSYour Name 					break;
683*5113495bSYour Name 				case FLOW_POOL_BE_BK_PAUSED:
684*5113495bSYour Name 					/* pause network VI queue */
685*5113495bSYour Name 					act = WLAN_NETIF_VI_QUEUE_OFF;
686*5113495bSYour Name 					reason = WLAN_DATA_FLOW_CTRL_VI;
687*5113495bSYour Name 					level = DP_TH_VI;
688*5113495bSYour Name 					pool->status = FLOW_POOL_VI_PAUSED;
689*5113495bSYour Name 					break;
690*5113495bSYour Name 				case FLOW_POOL_VI_PAUSED:
691*5113495bSYour Name 					/* pause network VO queue */
692*5113495bSYour Name 					act = WLAN_NETIF_VO_QUEUE_OFF;
693*5113495bSYour Name 					reason = WLAN_DATA_FLOW_CTRL_VO;
694*5113495bSYour Name 					level = DP_TH_VO;
695*5113495bSYour Name 					pool->status = FLOW_POOL_VO_PAUSED;
696*5113495bSYour Name 					break;
697*5113495bSYour Name 				case FLOW_POOL_VO_PAUSED:
698*5113495bSYour Name 					/* pause network HI PRI queue */
699*5113495bSYour Name 					act = WLAN_NETIF_PRIORITY_QUEUE_OFF;
700*5113495bSYour Name 					reason = WLAN_DATA_FLOW_CTRL_PRI;
701*5113495bSYour Name 					level = DP_TH_HI;
702*5113495bSYour Name 					pool->status = FLOW_POOL_ACTIVE_PAUSED;
703*5113495bSYour Name 					break;
704*5113495bSYour Name 				case FLOW_POOL_ACTIVE_PAUSED:
705*5113495bSYour Name 					act = WLAN_NETIF_ACTION_TYPE_NONE;
706*5113495bSYour Name 					break;
707*5113495bSYour Name 				default:
708*5113495bSYour Name 					dp_err_rl("pool status is %d!",
709*5113495bSYour Name 						  pool->status);
710*5113495bSYour Name 					break;
711*5113495bSYour Name 				}
712*5113495bSYour Name 
713*5113495bSYour Name 				if (act != WLAN_NETIF_ACTION_TYPE_NONE) {
714*5113495bSYour Name 					pool->latest_pause_time[level] =
715*5113495bSYour Name 						qdf_get_system_timestamp();
716*5113495bSYour Name 					soc->pause_cb(desc_pool_id,
717*5113495bSYour Name 						      act,
718*5113495bSYour Name 						      reason);
719*5113495bSYour Name 				}
720*5113495bSYour Name 			}
721*5113495bSYour Name 		} else {
722*5113495bSYour Name 			pool->pkt_drop_no_desc++;
723*5113495bSYour Name 		}
724*5113495bSYour Name 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
725*5113495bSYour Name 	} else {
726*5113495bSYour Name 		dp_err_rl("NULL desc pool pool_id %d", desc_pool_id);
727*5113495bSYour Name 		soc->pool_stats.pkt_drop_no_pool++;
728*5113495bSYour Name 	}
729*5113495bSYour Name 
730*5113495bSYour Name 	return tx_desc;
731*5113495bSYour Name }
732*5113495bSYour Name 
733*5113495bSYour Name /**
734*5113495bSYour Name  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
735*5113495bSYour Name  * @soc: Handle to DP SoC structure
736*5113495bSYour Name  * @tx_desc: the tx descriptor to be freed
737*5113495bSYour Name  * @desc_pool_id: ID of the flow control pool
738*5113495bSYour Name  *
739*5113495bSYour Name  * Return: None
740*5113495bSYour Name  */
741*5113495bSYour Name static inline void
dp_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)742*5113495bSYour Name dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
743*5113495bSYour Name 		uint8_t desc_pool_id)
744*5113495bSYour Name {
745*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
746*5113495bSYour Name 	qdf_time_t unpause_time = qdf_get_system_timestamp(), pause_dur;
747*5113495bSYour Name 	enum netif_action_type act = WLAN_WAKE_ALL_NETIF_QUEUE;
748*5113495bSYour Name 	enum netif_reason_type reason;
749*5113495bSYour Name 
750*5113495bSYour Name 	qdf_spin_lock_bh(&pool->flow_pool_lock);
751*5113495bSYour Name 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
752*5113495bSYour Name 	tx_desc->nbuf = NULL;
753*5113495bSYour Name 	tx_desc->flags = 0;
754*5113495bSYour Name 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
755*5113495bSYour Name 	dp_tx_put_desc_flow_pool(pool, tx_desc);
756*5113495bSYour Name 	switch (pool->status) {
757*5113495bSYour Name 	case FLOW_POOL_ACTIVE_PAUSED:
758*5113495bSYour Name 		if (pool->avail_desc > pool->start_th[DP_TH_HI]) {
759*5113495bSYour Name 			act = WLAN_NETIF_PRIORITY_QUEUE_ON;
760*5113495bSYour Name 			reason = WLAN_DATA_FLOW_CTRL_PRI;
761*5113495bSYour Name 			pool->status = FLOW_POOL_VO_PAUSED;
762*5113495bSYour Name 
763*5113495bSYour Name 			/* Update maximum pause duration for HI queue */
764*5113495bSYour Name 			pause_dur = unpause_time -
765*5113495bSYour Name 					pool->latest_pause_time[DP_TH_HI];
766*5113495bSYour Name 			if (pool->max_pause_time[DP_TH_HI] < pause_dur)
767*5113495bSYour Name 				pool->max_pause_time[DP_TH_HI] = pause_dur;
768*5113495bSYour Name 		}
769*5113495bSYour Name 		break;
770*5113495bSYour Name 	case FLOW_POOL_VO_PAUSED:
771*5113495bSYour Name 		if (pool->avail_desc > pool->start_th[DP_TH_VO]) {
772*5113495bSYour Name 			act = WLAN_NETIF_VO_QUEUE_ON;
773*5113495bSYour Name 			reason = WLAN_DATA_FLOW_CTRL_VO;
774*5113495bSYour Name 			pool->status = FLOW_POOL_VI_PAUSED;
775*5113495bSYour Name 
776*5113495bSYour Name 			/* Update maximum pause duration for VO queue */
777*5113495bSYour Name 			pause_dur = unpause_time -
778*5113495bSYour Name 					pool->latest_pause_time[DP_TH_VO];
779*5113495bSYour Name 			if (pool->max_pause_time[DP_TH_VO] < pause_dur)
780*5113495bSYour Name 				pool->max_pause_time[DP_TH_VO] = pause_dur;
781*5113495bSYour Name 		}
782*5113495bSYour Name 		break;
783*5113495bSYour Name 	case FLOW_POOL_VI_PAUSED:
784*5113495bSYour Name 		if (pool->avail_desc > pool->start_th[DP_TH_VI]) {
785*5113495bSYour Name 			act = WLAN_NETIF_VI_QUEUE_ON;
786*5113495bSYour Name 			reason = WLAN_DATA_FLOW_CTRL_VI;
787*5113495bSYour Name 			pool->status = FLOW_POOL_BE_BK_PAUSED;
788*5113495bSYour Name 
789*5113495bSYour Name 			/* Update maximum pause duration for VI queue */
790*5113495bSYour Name 			pause_dur = unpause_time -
791*5113495bSYour Name 					pool->latest_pause_time[DP_TH_VI];
792*5113495bSYour Name 			if (pool->max_pause_time[DP_TH_VI] < pause_dur)
793*5113495bSYour Name 				pool->max_pause_time[DP_TH_VI] = pause_dur;
794*5113495bSYour Name 		}
795*5113495bSYour Name 		break;
796*5113495bSYour Name 	case FLOW_POOL_BE_BK_PAUSED:
797*5113495bSYour Name 		if (pool->avail_desc > pool->start_th[DP_TH_BE_BK]) {
798*5113495bSYour Name 			act = WLAN_NETIF_BE_BK_QUEUE_ON;
799*5113495bSYour Name 			reason = WLAN_DATA_FLOW_CTRL_BE_BK;
800*5113495bSYour Name 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
801*5113495bSYour Name 
802*5113495bSYour Name 			/* Update maximum pause duration for BE_BK queue */
803*5113495bSYour Name 			pause_dur = unpause_time -
804*5113495bSYour Name 					pool->latest_pause_time[DP_TH_BE_BK];
805*5113495bSYour Name 			if (pool->max_pause_time[DP_TH_BE_BK] < pause_dur)
806*5113495bSYour Name 				pool->max_pause_time[DP_TH_BE_BK] = pause_dur;
807*5113495bSYour Name 		}
808*5113495bSYour Name 		break;
809*5113495bSYour Name 	case FLOW_POOL_INVALID:
810*5113495bSYour Name 		if (pool->avail_desc == pool->pool_size) {
811*5113495bSYour Name 			dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
812*5113495bSYour Name 			dp_tx_desc_pool_free(soc, desc_pool_id, false);
813*5113495bSYour Name 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
814*5113495bSYour Name 			dp_err_rl("pool %d is freed!!", desc_pool_id);
815*5113495bSYour Name 			return;
816*5113495bSYour Name 		}
817*5113495bSYour Name 		break;
818*5113495bSYour Name 
819*5113495bSYour Name 	case FLOW_POOL_ACTIVE_UNPAUSED:
820*5113495bSYour Name 		break;
821*5113495bSYour Name 
822*5113495bSYour Name 	case FLOW_POOL_ACTIVE_UNPAUSED_REATTACH:
823*5113495bSYour Name 		fallthrough;
824*5113495bSYour Name 	default:
825*5113495bSYour Name 		dp_err_rl("pool %d status: %d",
826*5113495bSYour Name 			  desc_pool_id, pool->status);
827*5113495bSYour Name 		break;
828*5113495bSYour Name 	};
829*5113495bSYour Name 
830*5113495bSYour Name 	if (act != WLAN_WAKE_ALL_NETIF_QUEUE)
831*5113495bSYour Name 		soc->pause_cb(pool->flow_pool_id,
832*5113495bSYour Name 			      act, reason);
833*5113495bSYour Name 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
834*5113495bSYour Name }
835*5113495bSYour Name 
836*5113495bSYour Name static inline void
dp_tx_spcl_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)837*5113495bSYour Name dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
838*5113495bSYour Name 		     uint8_t desc_pool_id)
839*5113495bSYour Name {
840*5113495bSYour Name }
841*5113495bSYour Name 
dp_tx_spcl_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)842*5113495bSYour Name static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
843*5113495bSYour Name 							 uint8_t desc_pool_id)
844*5113495bSYour Name {
845*5113495bSYour Name 	return NULL;
846*5113495bSYour Name }
847*5113495bSYour Name #else /* QCA_AC_BASED_FLOW_CONTROL */
848*5113495bSYour Name 
849*5113495bSYour Name static inline bool
dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s * pool,uint16_t avail_desc)850*5113495bSYour Name dp_tx_is_threshold_reached(struct dp_tx_desc_pool_s *pool, uint16_t avail_desc)
851*5113495bSYour Name {
852*5113495bSYour Name 	if (qdf_unlikely(avail_desc < pool->stop_th))
853*5113495bSYour Name 		return true;
854*5113495bSYour Name 	else
855*5113495bSYour Name 		return false;
856*5113495bSYour Name }
857*5113495bSYour Name 
858*5113495bSYour Name /**
859*5113495bSYour Name  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
860*5113495bSYour Name  * @soc: Handle to DP SoC structure
861*5113495bSYour Name  * @desc_pool_id:
862*5113495bSYour Name  *
863*5113495bSYour Name  * Return: Tx descriptor or NULL
864*5113495bSYour Name  */
865*5113495bSYour Name static inline struct dp_tx_desc_s *
dp_tx_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)866*5113495bSYour Name dp_tx_desc_alloc(struct dp_soc *soc, uint8_t desc_pool_id)
867*5113495bSYour Name {
868*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
869*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
870*5113495bSYour Name 
871*5113495bSYour Name 	if (pool) {
872*5113495bSYour Name 		qdf_spin_lock_bh(&pool->flow_pool_lock);
873*5113495bSYour Name 		if (pool->status <= FLOW_POOL_ACTIVE_PAUSED &&
874*5113495bSYour Name 		    pool->avail_desc) {
875*5113495bSYour Name 			tx_desc = dp_tx_get_desc_flow_pool(pool);
876*5113495bSYour Name 			tx_desc->pool_id = desc_pool_id;
877*5113495bSYour Name 			tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
878*5113495bSYour Name 			dp_tx_desc_set_magic(tx_desc,
879*5113495bSYour Name 					     DP_TX_MAGIC_PATTERN_INUSE);
880*5113495bSYour Name 			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
881*5113495bSYour Name 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
882*5113495bSYour Name 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
883*5113495bSYour Name 				/* pause network queues */
884*5113495bSYour Name 				soc->pause_cb(desc_pool_id,
885*5113495bSYour Name 					       WLAN_STOP_ALL_NETIF_QUEUE,
886*5113495bSYour Name 					       WLAN_DATA_FLOW_CONTROL);
887*5113495bSYour Name 			} else {
888*5113495bSYour Name 				qdf_spin_unlock_bh(&pool->flow_pool_lock);
889*5113495bSYour Name 			}
890*5113495bSYour Name 		} else {
891*5113495bSYour Name 			pool->pkt_drop_no_desc++;
892*5113495bSYour Name 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
893*5113495bSYour Name 		}
894*5113495bSYour Name 	} else {
895*5113495bSYour Name 		soc->pool_stats.pkt_drop_no_pool++;
896*5113495bSYour Name 	}
897*5113495bSYour Name 
898*5113495bSYour Name 	return tx_desc;
899*5113495bSYour Name }
900*5113495bSYour Name 
dp_tx_spcl_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)901*5113495bSYour Name static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
902*5113495bSYour Name 							 uint8_t desc_pool_id)
903*5113495bSYour Name {
904*5113495bSYour Name 	return NULL;
905*5113495bSYour Name }
906*5113495bSYour Name /**
907*5113495bSYour Name  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
908*5113495bSYour Name  * @soc: Handle to DP SoC structure
909*5113495bSYour Name  * @tx_desc: Descriptor to free
910*5113495bSYour Name  * @desc_pool_id: Descriptor pool Id
911*5113495bSYour Name  *
912*5113495bSYour Name  * Return: None
913*5113495bSYour Name  */
914*5113495bSYour Name static inline void
dp_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)915*5113495bSYour Name dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
916*5113495bSYour Name 		uint8_t desc_pool_id)
917*5113495bSYour Name {
918*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
919*5113495bSYour Name 
920*5113495bSYour Name 	qdf_spin_lock_bh(&pool->flow_pool_lock);
921*5113495bSYour Name 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
922*5113495bSYour Name 	tx_desc->nbuf = NULL;
923*5113495bSYour Name 	tx_desc->flags = 0;
924*5113495bSYour Name 	dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
925*5113495bSYour Name 	dp_tx_put_desc_flow_pool(pool, tx_desc);
926*5113495bSYour Name 	switch (pool->status) {
927*5113495bSYour Name 	case FLOW_POOL_ACTIVE_PAUSED:
928*5113495bSYour Name 		if (pool->avail_desc > pool->start_th) {
929*5113495bSYour Name 			soc->pause_cb(pool->flow_pool_id,
930*5113495bSYour Name 				       WLAN_WAKE_ALL_NETIF_QUEUE,
931*5113495bSYour Name 				       WLAN_DATA_FLOW_CONTROL);
932*5113495bSYour Name 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
933*5113495bSYour Name 		}
934*5113495bSYour Name 		break;
935*5113495bSYour Name 	case FLOW_POOL_INVALID:
936*5113495bSYour Name 		if (pool->avail_desc == pool->pool_size) {
937*5113495bSYour Name 			dp_tx_desc_pool_deinit(soc, desc_pool_id, false);
938*5113495bSYour Name 			dp_tx_desc_pool_free(soc, desc_pool_id, false);
939*5113495bSYour Name 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
940*5113495bSYour Name 			qdf_print("%s %d pool is freed!!",
941*5113495bSYour Name 				  __func__, __LINE__);
942*5113495bSYour Name 			return;
943*5113495bSYour Name 		}
944*5113495bSYour Name 		break;
945*5113495bSYour Name 
946*5113495bSYour Name 	case FLOW_POOL_ACTIVE_UNPAUSED:
947*5113495bSYour Name 		break;
948*5113495bSYour Name 	default:
949*5113495bSYour Name 		qdf_print("%s %d pool is INACTIVE State!!",
950*5113495bSYour Name 			  __func__, __LINE__);
951*5113495bSYour Name 		break;
952*5113495bSYour Name 	};
953*5113495bSYour Name 
954*5113495bSYour Name 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
955*5113495bSYour Name }
956*5113495bSYour Name 
957*5113495bSYour Name static inline void
dp_tx_spcl_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)958*5113495bSYour Name dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
959*5113495bSYour Name 		     uint8_t desc_pool_id)
960*5113495bSYour Name {
961*5113495bSYour Name }
962*5113495bSYour Name #endif /* QCA_AC_BASED_FLOW_CONTROL */
963*5113495bSYour Name 
964*5113495bSYour Name static inline bool
dp_tx_desc_thresh_reached(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)965*5113495bSYour Name dp_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
966*5113495bSYour Name {
967*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
968*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
969*5113495bSYour Name 						     DP_MOD_ID_CDP);
970*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool;
971*5113495bSYour Name 	bool status;
972*5113495bSYour Name 
973*5113495bSYour Name 	if (!vdev)
974*5113495bSYour Name 		return false;
975*5113495bSYour Name 
976*5113495bSYour Name 	pool = vdev->pool;
977*5113495bSYour Name 	status = dp_tx_is_threshold_reached(pool, pool->avail_desc);
978*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
979*5113495bSYour Name 
980*5113495bSYour Name 	return status;
981*5113495bSYour Name }
982*5113495bSYour Name #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
983*5113495bSYour Name 
dp_tx_flow_control_init(struct dp_soc * handle)984*5113495bSYour Name static inline void dp_tx_flow_control_init(struct dp_soc *handle)
985*5113495bSYour Name {
986*5113495bSYour Name }
987*5113495bSYour Name 
dp_tx_flow_control_deinit(struct dp_soc * handle)988*5113495bSYour Name static inline void dp_tx_flow_control_deinit(struct dp_soc *handle)
989*5113495bSYour Name {
990*5113495bSYour Name }
991*5113495bSYour Name 
dp_tx_flow_pool_map_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id,uint32_t flow_pool_size)992*5113495bSYour Name static inline QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev,
993*5113495bSYour Name 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id,
994*5113495bSYour Name 	uint32_t flow_pool_size)
995*5113495bSYour Name {
996*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
997*5113495bSYour Name }
998*5113495bSYour Name 
dp_tx_flow_pool_unmap_handler(struct dp_pdev * pdev,uint8_t flow_id,uint8_t flow_type,uint8_t flow_pool_id)999*5113495bSYour Name static inline void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev,
1000*5113495bSYour Name 	uint8_t flow_id, uint8_t flow_type, uint8_t flow_pool_id)
1001*5113495bSYour Name {
1002*5113495bSYour Name }
1003*5113495bSYour Name 
1004*5113495bSYour Name #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
1005*5113495bSYour Name static inline
dp_tx_prefetch_desc(struct dp_tx_desc_s * tx_desc)1006*5113495bSYour Name void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1007*5113495bSYour Name {
1008*5113495bSYour Name 	if (tx_desc)
1009*5113495bSYour Name 		prefetch(tx_desc);
1010*5113495bSYour Name }
1011*5113495bSYour Name #else
1012*5113495bSYour Name static inline
dp_tx_prefetch_desc(struct dp_tx_desc_s * tx_desc)1013*5113495bSYour Name void dp_tx_prefetch_desc(struct dp_tx_desc_s *tx_desc)
1014*5113495bSYour Name {
1015*5113495bSYour Name }
1016*5113495bSYour Name #endif
1017*5113495bSYour Name 
1018*5113495bSYour Name /**
1019*5113495bSYour Name  * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
1020*5113495bSYour Name  * @soc: Handle to DP SoC structure
1021*5113495bSYour Name  * @desc_pool_id: pool id
1022*5113495bSYour Name  *
1023*5113495bSYour Name  * Return: Tx Descriptor or NULL
1024*5113495bSYour Name  */
dp_tx_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)1025*5113495bSYour Name static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
1026*5113495bSYour Name 						uint8_t desc_pool_id)
1027*5113495bSYour Name {
1028*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
1029*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
1030*5113495bSYour Name 
1031*5113495bSYour Name 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1032*5113495bSYour Name 
1033*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
1034*5113495bSYour Name 
1035*5113495bSYour Name 	tx_desc = pool->freelist;
1036*5113495bSYour Name 
1037*5113495bSYour Name 	/* Pool is exhausted */
1038*5113495bSYour Name 	if (!tx_desc) {
1039*5113495bSYour Name 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1040*5113495bSYour Name 		return NULL;
1041*5113495bSYour Name 	}
1042*5113495bSYour Name 
1043*5113495bSYour Name 	pool->freelist = pool->freelist->next;
1044*5113495bSYour Name 	pool->num_allocated++;
1045*5113495bSYour Name 	pool->num_free--;
1046*5113495bSYour Name 	dp_tx_prefetch_desc(pool->freelist);
1047*5113495bSYour Name 
1048*5113495bSYour Name 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1049*5113495bSYour Name 
1050*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1051*5113495bSYour Name 
1052*5113495bSYour Name 	return tx_desc;
1053*5113495bSYour Name }
1054*5113495bSYour Name 
dp_tx_spcl_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)1055*5113495bSYour Name static inline struct dp_tx_desc_s *dp_tx_spcl_desc_alloc(struct dp_soc *soc,
1056*5113495bSYour Name 							 uint8_t desc_pool_id)
1057*5113495bSYour Name {
1058*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
1059*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
1060*5113495bSYour Name 
1061*5113495bSYour Name 	pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1062*5113495bSYour Name 
1063*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
1064*5113495bSYour Name 
1065*5113495bSYour Name 	tx_desc = pool->freelist;
1066*5113495bSYour Name 
1067*5113495bSYour Name 	/* Pool is exhausted */
1068*5113495bSYour Name 	if (!tx_desc) {
1069*5113495bSYour Name 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1070*5113495bSYour Name 		return NULL;
1071*5113495bSYour Name 	}
1072*5113495bSYour Name 
1073*5113495bSYour Name 	pool->freelist = pool->freelist->next;
1074*5113495bSYour Name 	pool->num_allocated++;
1075*5113495bSYour Name 	pool->num_free--;
1076*5113495bSYour Name 	dp_tx_prefetch_desc(pool->freelist);
1077*5113495bSYour Name 
1078*5113495bSYour Name 	tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1079*5113495bSYour Name 	tx_desc->flags |= DP_TX_DESC_FLAG_SPECIAL;
1080*5113495bSYour Name 
1081*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1082*5113495bSYour Name 
1083*5113495bSYour Name 	return tx_desc;
1084*5113495bSYour Name }
1085*5113495bSYour Name 
1086*5113495bSYour Name /**
1087*5113495bSYour Name  * dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
1088*5113495bSYour Name  *                            from given pool
1089*5113495bSYour Name  * @soc: Handle to DP SoC structure
1090*5113495bSYour Name  * @desc_pool_id: pool id should pick up
1091*5113495bSYour Name  * @num_requested: number of required descriptor
1092*5113495bSYour Name  *
1093*5113495bSYour Name  * allocate multiple tx descriptor and make a link
1094*5113495bSYour Name  *
1095*5113495bSYour Name  * Return: first descriptor pointer or NULL
1096*5113495bSYour Name  */
dp_tx_desc_alloc_multiple(struct dp_soc * soc,uint8_t desc_pool_id,uint8_t num_requested)1097*5113495bSYour Name static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
1098*5113495bSYour Name 		struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
1099*5113495bSYour Name {
1100*5113495bSYour Name 	struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
1101*5113495bSYour Name 	uint8_t count;
1102*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
1103*5113495bSYour Name 
1104*5113495bSYour Name 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1105*5113495bSYour Name 
1106*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
1107*5113495bSYour Name 
1108*5113495bSYour Name 	if ((num_requested == 0) ||
1109*5113495bSYour Name 			(pool->num_free < num_requested)) {
1110*5113495bSYour Name 		TX_DESC_LOCK_UNLOCK(&pool->lock);
1111*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1112*5113495bSYour Name 			"%s, No Free Desc: Available(%d) num_requested(%d)",
1113*5113495bSYour Name 			__func__, pool->num_free,
1114*5113495bSYour Name 			num_requested);
1115*5113495bSYour Name 		return NULL;
1116*5113495bSYour Name 	}
1117*5113495bSYour Name 
1118*5113495bSYour Name 	h_desc = pool->freelist;
1119*5113495bSYour Name 
1120*5113495bSYour Name 	/* h_desc should never be NULL since num_free > requested */
1121*5113495bSYour Name 	qdf_assert_always(h_desc);
1122*5113495bSYour Name 
1123*5113495bSYour Name 	c_desc = h_desc;
1124*5113495bSYour Name 	for (count = 0; count < (num_requested - 1); count++) {
1125*5113495bSYour Name 		c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
1126*5113495bSYour Name 		c_desc = c_desc->next;
1127*5113495bSYour Name 	}
1128*5113495bSYour Name 	pool->num_free -= count;
1129*5113495bSYour Name 	pool->num_allocated += count;
1130*5113495bSYour Name 	pool->freelist = c_desc->next;
1131*5113495bSYour Name 	c_desc->next = NULL;
1132*5113495bSYour Name 
1133*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1134*5113495bSYour Name 	return h_desc;
1135*5113495bSYour Name }
1136*5113495bSYour Name 
1137*5113495bSYour Name /**
1138*5113495bSYour Name  * dp_tx_desc_free() - Free a tx descriptor and attach it to free list
1139*5113495bSYour Name  * @soc: Handle to DP SoC structure
1140*5113495bSYour Name  * @tx_desc: descriptor to free
1141*5113495bSYour Name  * @desc_pool_id: ID of the free pool
1142*5113495bSYour Name  */
1143*5113495bSYour Name static inline void
dp_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)1144*5113495bSYour Name dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1145*5113495bSYour Name 		uint8_t desc_pool_id)
1146*5113495bSYour Name {
1147*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
1148*5113495bSYour Name 
1149*5113495bSYour Name 	dp_tx_desc_clear(tx_desc);
1150*5113495bSYour Name 	pool = dp_get_tx_desc_pool(soc, desc_pool_id);
1151*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
1152*5113495bSYour Name 	tx_desc->next = pool->freelist;
1153*5113495bSYour Name 	pool->freelist = tx_desc;
1154*5113495bSYour Name 	pool->num_allocated--;
1155*5113495bSYour Name 	pool->num_free++;
1156*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1157*5113495bSYour Name }
1158*5113495bSYour Name 
1159*5113495bSYour Name static inline void
dp_tx_spcl_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)1160*5113495bSYour Name dp_tx_spcl_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
1161*5113495bSYour Name 		     uint8_t desc_pool_id)
1162*5113495bSYour Name {
1163*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
1164*5113495bSYour Name 
1165*5113495bSYour Name 	dp_tx_desc_clear(tx_desc);
1166*5113495bSYour Name 
1167*5113495bSYour Name 	pool = dp_get_spcl_tx_desc_pool(soc, desc_pool_id);
1168*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
1169*5113495bSYour Name 	tx_desc->next = pool->freelist;
1170*5113495bSYour Name 	pool->freelist = tx_desc;
1171*5113495bSYour Name 	pool->num_allocated--;
1172*5113495bSYour Name 	pool->num_free++;
1173*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1174*5113495bSYour Name }
1175*5113495bSYour Name 
1176*5113495bSYour Name static inline void
dp_tx_desc_free_list(struct dp_tx_desc_pool_s * pool,struct dp_tx_desc_s * head_desc,struct dp_tx_desc_s * tail_desc,uint32_t fast_desc_count)1177*5113495bSYour Name dp_tx_desc_free_list(struct dp_tx_desc_pool_s *pool,
1178*5113495bSYour Name 		     struct dp_tx_desc_s *head_desc,
1179*5113495bSYour Name 		     struct dp_tx_desc_s *tail_desc,
1180*5113495bSYour Name 		     uint32_t fast_desc_count)
1181*5113495bSYour Name {
1182*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
1183*5113495bSYour Name 	pool->num_allocated -= fast_desc_count;
1184*5113495bSYour Name 	pool->num_free += fast_desc_count;
1185*5113495bSYour Name 	tail_desc->next = pool->freelist;
1186*5113495bSYour Name 	pool->freelist = head_desc;
1187*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
1188*5113495bSYour Name }
1189*5113495bSYour Name 
1190*5113495bSYour Name #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
1191*5113495bSYour Name 
1192*5113495bSYour Name #ifdef QCA_DP_TX_DESC_ID_CHECK
1193*5113495bSYour Name /**
1194*5113495bSYour Name  * dp_tx_is_desc_id_valid() - check is the tx desc id valid
1195*5113495bSYour Name  * @soc: Handle to DP SoC structure
1196*5113495bSYour Name  * @tx_desc_id:
1197*5113495bSYour Name  *
1198*5113495bSYour Name  * Return: true or false
1199*5113495bSYour Name  */
1200*5113495bSYour Name static inline bool
dp_tx_is_desc_id_valid(struct dp_soc * soc,uint32_t tx_desc_id)1201*5113495bSYour Name dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1202*5113495bSYour Name {
1203*5113495bSYour Name 	uint8_t pool_id;
1204*5113495bSYour Name 	uint16_t page_id, offset;
1205*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool;
1206*5113495bSYour Name 
1207*5113495bSYour Name 	pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
1208*5113495bSYour Name 			DP_TX_DESC_ID_POOL_OS;
1209*5113495bSYour Name 	/* Pool ID is out of limit */
1210*5113495bSYour Name 	if (pool_id > wlan_cfg_get_num_tx_desc_pool(
1211*5113495bSYour Name 				soc->wlan_cfg_ctx)) {
1212*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP,
1213*5113495bSYour Name 			  QDF_TRACE_LEVEL_FATAL,
1214*5113495bSYour Name 			  "%s:Tx Comp pool id %d not valid",
1215*5113495bSYour Name 			  __func__,
1216*5113495bSYour Name 			  pool_id);
1217*5113495bSYour Name 		goto warn_exit;
1218*5113495bSYour Name 	}
1219*5113495bSYour Name 
1220*5113495bSYour Name 	pool = &soc->tx_desc[pool_id];
1221*5113495bSYour Name 	/* the pool is freed */
1222*5113495bSYour Name 	if (IS_TX_DESC_POOL_STATUS_INACTIVE(pool)) {
1223*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP,
1224*5113495bSYour Name 			  QDF_TRACE_LEVEL_FATAL,
1225*5113495bSYour Name 			  "%s:the pool %d has been freed",
1226*5113495bSYour Name 			  __func__,
1227*5113495bSYour Name 			  pool_id);
1228*5113495bSYour Name 		goto warn_exit;
1229*5113495bSYour Name 	}
1230*5113495bSYour Name 
1231*5113495bSYour Name 	page_id = (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
1232*5113495bSYour Name 				DP_TX_DESC_ID_PAGE_OS;
1233*5113495bSYour Name 	/* the page id is out of limit */
1234*5113495bSYour Name 	if (page_id >= pool->desc_pages.num_pages) {
1235*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP,
1236*5113495bSYour Name 			  QDF_TRACE_LEVEL_FATAL,
1237*5113495bSYour Name 			  "%s:the page id %d invalid, pool id %d, num_page %d",
1238*5113495bSYour Name 			  __func__,
1239*5113495bSYour Name 			  page_id,
1240*5113495bSYour Name 			  pool_id,
1241*5113495bSYour Name 			  pool->desc_pages.num_pages);
1242*5113495bSYour Name 		goto warn_exit;
1243*5113495bSYour Name 	}
1244*5113495bSYour Name 
1245*5113495bSYour Name 	offset = (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
1246*5113495bSYour Name 				DP_TX_DESC_ID_OFFSET_OS;
1247*5113495bSYour Name 	/* the offset is out of limit */
1248*5113495bSYour Name 	if (offset >= pool->desc_pages.num_element_per_page) {
1249*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP,
1250*5113495bSYour Name 			  QDF_TRACE_LEVEL_FATAL,
1251*5113495bSYour Name 			  "%s:offset %d invalid, pool%d,num_elem_per_page %d",
1252*5113495bSYour Name 			  __func__,
1253*5113495bSYour Name 			  offset,
1254*5113495bSYour Name 			  pool_id,
1255*5113495bSYour Name 			  pool->desc_pages.num_element_per_page);
1256*5113495bSYour Name 		goto warn_exit;
1257*5113495bSYour Name 	}
1258*5113495bSYour Name 
1259*5113495bSYour Name 	return true;
1260*5113495bSYour Name 
1261*5113495bSYour Name warn_exit:
1262*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_DP,
1263*5113495bSYour Name 		  QDF_TRACE_LEVEL_FATAL,
1264*5113495bSYour Name 		  "%s:Tx desc id 0x%x not valid",
1265*5113495bSYour Name 		  __func__,
1266*5113495bSYour Name 		  tx_desc_id);
1267*5113495bSYour Name 	qdf_assert_always(0);
1268*5113495bSYour Name 	return false;
1269*5113495bSYour Name }
1270*5113495bSYour Name 
1271*5113495bSYour Name #else
1272*5113495bSYour Name static inline bool
dp_tx_is_desc_id_valid(struct dp_soc * soc,uint32_t tx_desc_id)1273*5113495bSYour Name dp_tx_is_desc_id_valid(struct dp_soc *soc, uint32_t tx_desc_id)
1274*5113495bSYour Name {
1275*5113495bSYour Name 	return true;
1276*5113495bSYour Name }
1277*5113495bSYour Name #endif /* QCA_DP_TX_DESC_ID_CHECK */
1278*5113495bSYour Name 
1279*5113495bSYour Name #ifdef QCA_DP_TX_DESC_FAST_COMP_ENABLE
dp_tx_desc_update_fast_comp_flag(struct dp_soc * soc,struct dp_tx_desc_s * desc,uint8_t allow_fast_comp)1280*5113495bSYour Name static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1281*5113495bSYour Name 						    struct dp_tx_desc_s *desc,
1282*5113495bSYour Name 						    uint8_t allow_fast_comp)
1283*5113495bSYour Name {
1284*5113495bSYour Name 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_TO_FW)) &&
1285*5113495bSYour Name 	    qdf_likely(allow_fast_comp))
1286*5113495bSYour Name 		desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
1287*5113495bSYour Name 
1288*5113495bSYour Name 	if (qdf_likely(desc->nbuf->is_from_recycler) &&
1289*5113495bSYour Name 	    qdf_likely(desc->nbuf->fast_xmit))
1290*5113495bSYour Name 		desc->flags |= DP_TX_DESC_FLAG_FAST;
1291*5113495bSYour Name }
1292*5113495bSYour Name 
1293*5113495bSYour Name #else
dp_tx_desc_update_fast_comp_flag(struct dp_soc * soc,struct dp_tx_desc_s * desc,uint8_t allow_fast_comp)1294*5113495bSYour Name static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
1295*5113495bSYour Name 						    struct dp_tx_desc_s *desc,
1296*5113495bSYour Name 						    uint8_t allow_fast_comp)
1297*5113495bSYour Name {
1298*5113495bSYour Name }
1299*5113495bSYour Name #endif /* QCA_DP_TX_DESC_FAST_COMP_ENABLE */
1300*5113495bSYour Name 
1301*5113495bSYour Name /**
1302*5113495bSYour Name  * dp_tx_desc_find() - find dp tx descriptor from pool/page/offset
1303*5113495bSYour Name  * @soc: handle for the device sending the data
1304*5113495bSYour Name  * @pool_id: pool id
1305*5113495bSYour Name  * @page_id: page id
1306*5113495bSYour Name  * @offset: offset from base address
1307*5113495bSYour Name  * @spcl_pool: bit to indicate if this is a special pool
1308*5113495bSYour Name  *
1309*5113495bSYour Name  * Use page and offset to find the corresponding descriptor object in
1310*5113495bSYour Name  * the given descriptor pool.
1311*5113495bSYour Name  *
1312*5113495bSYour Name  * Return: the descriptor object that has the specified ID
1313*5113495bSYour Name  */
1314*5113495bSYour Name static inline
dp_tx_desc_find(struct dp_soc * soc,uint8_t pool_id,uint16_t page_id,uint16_t offset,bool spcl_pool)1315*5113495bSYour Name struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
1316*5113495bSYour Name 				     uint8_t pool_id, uint16_t page_id,
1317*5113495bSYour Name 				     uint16_t offset, bool spcl_pool)
1318*5113495bSYour Name {
1319*5113495bSYour Name 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
1320*5113495bSYour Name 
1321*5113495bSYour Name 	tx_desc_pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, pool_id) :
1322*5113495bSYour Name 				dp_get_tx_desc_pool(soc, pool_id);
1323*5113495bSYour Name 
1324*5113495bSYour Name 	return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
1325*5113495bSYour Name 		tx_desc_pool->elem_size * offset;
1326*5113495bSYour Name }
1327*5113495bSYour Name 
1328*5113495bSYour Name /**
1329*5113495bSYour Name  * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
1330*5113495bSYour Name  * @soc: handle for the device sending the data
1331*5113495bSYour Name  * @desc_pool_id: target pool id
1332*5113495bSYour Name  *
1333*5113495bSYour Name  * Return: None
1334*5113495bSYour Name  */
1335*5113495bSYour Name static inline
dp_tx_ext_desc_alloc(struct dp_soc * soc,uint8_t desc_pool_id)1336*5113495bSYour Name struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
1337*5113495bSYour Name 		uint8_t desc_pool_id)
1338*5113495bSYour Name {
1339*5113495bSYour Name 	struct dp_tx_ext_desc_elem_s *c_elem;
1340*5113495bSYour Name 
1341*5113495bSYour Name 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1342*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1343*5113495bSYour Name 	if (soc->tx_ext_desc[desc_pool_id].num_free <= 0) {
1344*5113495bSYour Name 		qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1345*5113495bSYour Name 		return NULL;
1346*5113495bSYour Name 	}
1347*5113495bSYour Name 	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
1348*5113495bSYour Name 	soc->tx_ext_desc[desc_pool_id].freelist =
1349*5113495bSYour Name 		soc->tx_ext_desc[desc_pool_id].freelist->next;
1350*5113495bSYour Name 	soc->tx_ext_desc[desc_pool_id].num_free--;
1351*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1352*5113495bSYour Name 	return c_elem;
1353*5113495bSYour Name }
1354*5113495bSYour Name 
1355*5113495bSYour Name /**
1356*5113495bSYour Name  * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
1357*5113495bSYour Name  * @soc: handle for the device sending the data
1358*5113495bSYour Name  * @elem: ext descriptor pointer should release
1359*5113495bSYour Name  * @desc_pool_id: target pool id
1360*5113495bSYour Name  *
1361*5113495bSYour Name  * Return: None
1362*5113495bSYour Name  */
dp_tx_ext_desc_free(struct dp_soc * soc,struct dp_tx_ext_desc_elem_s * elem,uint8_t desc_pool_id)1363*5113495bSYour Name static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
1364*5113495bSYour Name 	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
1365*5113495bSYour Name {
1366*5113495bSYour Name 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1367*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1368*5113495bSYour Name 	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
1369*5113495bSYour Name 	soc->tx_ext_desc[desc_pool_id].freelist = elem;
1370*5113495bSYour Name 	soc->tx_ext_desc[desc_pool_id].num_free++;
1371*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1372*5113495bSYour Name 	return;
1373*5113495bSYour Name }
1374*5113495bSYour Name 
1375*5113495bSYour Name /**
1376*5113495bSYour Name  * dp_tx_ext_desc_free_multiple() - Free multiple tx extension descriptor and
1377*5113495bSYour Name  *                           attach it to free list
1378*5113495bSYour Name  * @soc: Handle to DP SoC structure
1379*5113495bSYour Name  * @desc_pool_id: pool id should pick up
1380*5113495bSYour Name  * @elem: tx descriptor should be freed
1381*5113495bSYour Name  * @num_free: number of descriptors should be freed
1382*5113495bSYour Name  *
1383*5113495bSYour Name  * Return: none
1384*5113495bSYour Name  */
dp_tx_ext_desc_free_multiple(struct dp_soc * soc,struct dp_tx_ext_desc_elem_s * elem,uint8_t desc_pool_id,uint8_t num_free)1385*5113495bSYour Name static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
1386*5113495bSYour Name 		struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
1387*5113495bSYour Name 		uint8_t num_free)
1388*5113495bSYour Name {
1389*5113495bSYour Name 	struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
1390*5113495bSYour Name 	uint8_t freed = num_free;
1391*5113495bSYour Name 
1392*5113495bSYour Name 	/* caller should always guarantee atleast list of num_free nodes */
1393*5113495bSYour Name 	qdf_assert_always(elem);
1394*5113495bSYour Name 
1395*5113495bSYour Name 	head = elem;
1396*5113495bSYour Name 	c_elem = head;
1397*5113495bSYour Name 	tail = head;
1398*5113495bSYour Name 	while (c_elem && freed) {
1399*5113495bSYour Name 		tail = c_elem;
1400*5113495bSYour Name 		c_elem = c_elem->next;
1401*5113495bSYour Name 		freed--;
1402*5113495bSYour Name 	}
1403*5113495bSYour Name 
1404*5113495bSYour Name 	/* caller should always guarantee atleast list of num_free nodes */
1405*5113495bSYour Name 	qdf_assert_always(tail);
1406*5113495bSYour Name 
1407*5113495bSYour Name 	desc_pool_id = dp_tx_ext_desc_pool_override(desc_pool_id);
1408*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1409*5113495bSYour Name 	tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
1410*5113495bSYour Name 	soc->tx_ext_desc[desc_pool_id].freelist = head;
1411*5113495bSYour Name 	soc->tx_ext_desc[desc_pool_id].num_free += num_free;
1412*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_ext_desc[desc_pool_id].lock);
1413*5113495bSYour Name 
1414*5113495bSYour Name 	return;
1415*5113495bSYour Name }
1416*5113495bSYour Name 
1417*5113495bSYour Name #if defined(FEATURE_TSO)
1418*5113495bSYour Name /**
1419*5113495bSYour Name  * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
1420*5113495bSYour Name  * @soc: device soc instance
1421*5113495bSYour Name  * @pool_id: pool id should pick up tso descriptor
1422*5113495bSYour Name  *
1423*5113495bSYour Name  * Allocates a TSO segment element from the free list held in
1424*5113495bSYour Name  * the soc
1425*5113495bSYour Name  *
1426*5113495bSYour Name  * Return: tso_seg, tso segment memory pointer
1427*5113495bSYour Name  */
dp_tx_tso_desc_alloc(struct dp_soc * soc,uint8_t pool_id)1428*5113495bSYour Name static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
1429*5113495bSYour Name 		struct dp_soc *soc, uint8_t pool_id)
1430*5113495bSYour Name {
1431*5113495bSYour Name 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
1432*5113495bSYour Name 
1433*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1434*5113495bSYour Name 	if (soc->tx_tso_desc[pool_id].freelist) {
1435*5113495bSYour Name 		soc->tx_tso_desc[pool_id].num_free--;
1436*5113495bSYour Name 		tso_seg = soc->tx_tso_desc[pool_id].freelist;
1437*5113495bSYour Name 		soc->tx_tso_desc[pool_id].freelist =
1438*5113495bSYour Name 			soc->tx_tso_desc[pool_id].freelist->next;
1439*5113495bSYour Name 	}
1440*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1441*5113495bSYour Name 
1442*5113495bSYour Name 	return tso_seg;
1443*5113495bSYour Name }
1444*5113495bSYour Name 
1445*5113495bSYour Name /**
1446*5113495bSYour Name  * dp_tx_tso_desc_free() - function to free a TSO segment
1447*5113495bSYour Name  * @soc: device soc instance
1448*5113495bSYour Name  * @pool_id: pool id should pick up tso descriptor
1449*5113495bSYour Name  * @tso_seg: tso segment memory pointer
1450*5113495bSYour Name  *
1451*5113495bSYour Name  * Returns a TSO segment element to the free list held in the
1452*5113495bSYour Name  * HTT pdev
1453*5113495bSYour Name  *
1454*5113495bSYour Name  * Return: none
1455*5113495bSYour Name  */
dp_tx_tso_desc_free(struct dp_soc * soc,uint8_t pool_id,struct qdf_tso_seg_elem_t * tso_seg)1456*5113495bSYour Name static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
1457*5113495bSYour Name 		uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
1458*5113495bSYour Name {
1459*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
1460*5113495bSYour Name 	tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
1461*5113495bSYour Name 	soc->tx_tso_desc[pool_id].freelist = tso_seg;
1462*5113495bSYour Name 	soc->tx_tso_desc[pool_id].num_free++;
1463*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
1464*5113495bSYour Name }
1465*5113495bSYour Name 
1466*5113495bSYour Name static inline
dp_tso_num_seg_alloc(struct dp_soc * soc,uint8_t pool_id)1467*5113495bSYour Name struct qdf_tso_num_seg_elem_t  *dp_tso_num_seg_alloc(struct dp_soc *soc,
1468*5113495bSYour Name 		uint8_t pool_id)
1469*5113495bSYour Name {
1470*5113495bSYour Name 	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
1471*5113495bSYour Name 
1472*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1473*5113495bSYour Name 	if (soc->tx_tso_num_seg[pool_id].freelist) {
1474*5113495bSYour Name 		soc->tx_tso_num_seg[pool_id].num_free--;
1475*5113495bSYour Name 		tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
1476*5113495bSYour Name 		soc->tx_tso_num_seg[pool_id].freelist =
1477*5113495bSYour Name 			soc->tx_tso_num_seg[pool_id].freelist->next;
1478*5113495bSYour Name 	}
1479*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1480*5113495bSYour Name 
1481*5113495bSYour Name 	return tso_num_seg;
1482*5113495bSYour Name }
1483*5113495bSYour Name 
1484*5113495bSYour Name static inline
dp_tso_num_seg_free(struct dp_soc * soc,uint8_t pool_id,struct qdf_tso_num_seg_elem_t * tso_num_seg)1485*5113495bSYour Name void dp_tso_num_seg_free(struct dp_soc *soc,
1486*5113495bSYour Name 		uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
1487*5113495bSYour Name {
1488*5113495bSYour Name 	qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1489*5113495bSYour Name 	tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
1490*5113495bSYour Name 	soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
1491*5113495bSYour Name 	soc->tx_tso_num_seg[pool_id].num_free++;
1492*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
1493*5113495bSYour Name }
1494*5113495bSYour Name #endif
1495*5113495bSYour Name 
1496*5113495bSYour Name /**
1497*5113495bSYour Name  * dp_tx_me_alloc_buf() - Alloc descriptor from me pool
1498*5113495bSYour Name  * @pdev: DP_PDEV handle for datapath
1499*5113495bSYour Name  *
1500*5113495bSYour Name  * Return: tx descriptor on success, NULL on error
1501*5113495bSYour Name  */
1502*5113495bSYour Name static inline struct dp_tx_me_buf_t*
dp_tx_me_alloc_buf(struct dp_pdev * pdev)1503*5113495bSYour Name dp_tx_me_alloc_buf(struct dp_pdev *pdev)
1504*5113495bSYour Name {
1505*5113495bSYour Name 	struct dp_tx_me_buf_t *buf = NULL;
1506*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
1507*5113495bSYour Name 	if (pdev->me_buf.freelist) {
1508*5113495bSYour Name 		buf = pdev->me_buf.freelist;
1509*5113495bSYour Name 		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
1510*5113495bSYour Name 		pdev->me_buf.buf_in_use++;
1511*5113495bSYour Name 	} else {
1512*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1513*5113495bSYour Name 				"Error allocating memory in pool");
1514*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->tx_mutex);
1515*5113495bSYour Name 		return NULL;
1516*5113495bSYour Name 	}
1517*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1518*5113495bSYour Name 	return buf;
1519*5113495bSYour Name }
1520*5113495bSYour Name 
1521*5113495bSYour Name /**
1522*5113495bSYour Name  * dp_tx_me_free_buf() - Unmap the buffer holding the dest
1523*5113495bSYour Name  * address, free me descriptor and add it to the free-pool
1524*5113495bSYour Name  * @pdev: DP_PDEV handle for datapath
1525*5113495bSYour Name  * @buf : Allocated ME BUF
1526*5113495bSYour Name  *
1527*5113495bSYour Name  * Return:void
1528*5113495bSYour Name  */
1529*5113495bSYour Name static inline void
dp_tx_me_free_buf(struct dp_pdev * pdev,struct dp_tx_me_buf_t * buf)1530*5113495bSYour Name dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
1531*5113495bSYour Name {
1532*5113495bSYour Name 	/*
1533*5113495bSYour Name 	 * If the buf containing mac address was mapped,
1534*5113495bSYour Name 	 * it must be unmapped before freeing the me_buf.
1535*5113495bSYour Name 	 * The "paddr_macbuf" member in the me_buf structure
1536*5113495bSYour Name 	 * holds the mapped physical address and it must be
1537*5113495bSYour Name 	 * set to 0 after unmapping.
1538*5113495bSYour Name 	 */
1539*5113495bSYour Name 	if (buf->paddr_macbuf) {
1540*5113495bSYour Name 		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
1541*5113495bSYour Name 					    buf->paddr_macbuf,
1542*5113495bSYour Name 					    QDF_DMA_TO_DEVICE,
1543*5113495bSYour Name 					    QDF_MAC_ADDR_SIZE);
1544*5113495bSYour Name 		buf->paddr_macbuf = 0;
1545*5113495bSYour Name 	}
1546*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
1547*5113495bSYour Name 	buf->next = pdev->me_buf.freelist;
1548*5113495bSYour Name 	pdev->me_buf.freelist = buf;
1549*5113495bSYour Name 	pdev->me_buf.buf_in_use--;
1550*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
1551*5113495bSYour Name }
1552*5113495bSYour Name #endif /* DP_TX_DESC_H */
1553