xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_queue.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  *
4*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
5*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
6*5113495bSYour Name  * above copyright notice and this permission notice appear in all
7*5113495bSYour Name  * copies.
8*5113495bSYour Name  *
9*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
17*5113495bSYour Name  */
18*5113495bSYour Name 
19*5113495bSYour Name /**
20*5113495bSYour Name  * @file ol_tx_queue.h
21*5113495bSYour Name  * @brief API definitions for the tx frame queue module within the data SW.
22*5113495bSYour Name  */
23*5113495bSYour Name #ifndef _OL_TX_QUEUE__H_
24*5113495bSYour Name #define _OL_TX_QUEUE__H_
25*5113495bSYour Name 
26*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t */
27*5113495bSYour Name #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
28*5113495bSYour Name #include <qdf_types.h>          /* bool */
29*5113495bSYour Name 
30*5113495bSYour Name /*--- function prototypes for optional queue log feature --------------------*/
31*5113495bSYour Name #if defined(ENABLE_TX_QUEUE_LOG) || \
32*5113495bSYour Name 	(defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT))
33*5113495bSYour Name 
34*5113495bSYour Name /**
35*5113495bSYour Name  * ol_tx_queue_log_enqueue() - enqueue tx queue logs
36*5113495bSYour Name  * @pdev: physical device object
37*5113495bSYour Name  * @msdu_info: tx msdu meta data
38*5113495bSYour Name  * @frms: number of frames for which logs need to be enqueued
39*5113495bSYour Name  * @bytes: number of bytes
40*5113495bSYour Name  *
41*5113495bSYour Name  *
42*5113495bSYour Name  * Return: None
43*5113495bSYour Name  */
44*5113495bSYour Name void
45*5113495bSYour Name ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
46*5113495bSYour Name 			struct ol_txrx_msdu_info_t *msdu_info,
47*5113495bSYour Name 			int frms, int bytes);
48*5113495bSYour Name 
49*5113495bSYour Name /**
50*5113495bSYour Name  * ol_tx_queue_log_dequeue() - dequeue tx queue logs
51*5113495bSYour Name  * @pdev: physical device object
52*5113495bSYour Name  * @txq: tx queue
53*5113495bSYour Name  * @frms: number of frames for which logs need to be dequeued
54*5113495bSYour Name  * @bytes: number of bytes
55*5113495bSYour Name  *
56*5113495bSYour Name  *
57*5113495bSYour Name  * Return: None
58*5113495bSYour Name  */
59*5113495bSYour Name void
60*5113495bSYour Name ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
61*5113495bSYour Name 			struct ol_tx_frms_queue_t *txq, int frms, int bytes);
62*5113495bSYour Name 
63*5113495bSYour Name /**
64*5113495bSYour Name  * ol_tx_queue_log_free() - free tx queue logs
65*5113495bSYour Name  * @pdev: physical device object
66*5113495bSYour Name  * @txq: tx queue
67*5113495bSYour Name  * @tid: tid value
68*5113495bSYour Name  * @frms: number of frames for which logs need to be freed
69*5113495bSYour Name  * @bytes: number of bytes
70*5113495bSYour Name  * @is_peer_txq - peer queue or not
71*5113495bSYour Name  *
72*5113495bSYour Name  *
73*5113495bSYour Name  * Return: None
74*5113495bSYour Name  */
75*5113495bSYour Name void
76*5113495bSYour Name ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
77*5113495bSYour Name 		     struct ol_tx_frms_queue_t *txq,
78*5113495bSYour Name 		     int tid, int frms, int bytes, bool is_peer_txq);
79*5113495bSYour Name 
80*5113495bSYour Name #else
81*5113495bSYour Name 
82*5113495bSYour Name static inline void
ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_txrx_msdu_info_t * msdu_info,int frms,int bytes)83*5113495bSYour Name ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
84*5113495bSYour Name 			struct ol_txrx_msdu_info_t *msdu_info,
85*5113495bSYour Name 			int frms, int bytes)
86*5113495bSYour Name {
87*5113495bSYour Name }
88*5113495bSYour Name 
89*5113495bSYour Name static inline void
ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int frms,int bytes)90*5113495bSYour Name ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
91*5113495bSYour Name 			struct ol_tx_frms_queue_t *txq, int frms, int bytes)
92*5113495bSYour Name {
93*5113495bSYour Name }
94*5113495bSYour Name 
95*5113495bSYour Name static inline void
ol_tx_queue_log_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes,bool is_peer_txq)96*5113495bSYour Name ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
97*5113495bSYour Name 		     struct ol_tx_frms_queue_t *txq,
98*5113495bSYour Name 		     int tid, int frms, int bytes, bool is_peer_txq)
99*5113495bSYour Name {
100*5113495bSYour Name }
101*5113495bSYour Name 
102*5113495bSYour Name #endif
103*5113495bSYour Name 
104*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT)
105*5113495bSYour Name 
106*5113495bSYour Name /**
107*5113495bSYour Name  * @brief Queue a tx frame to the tid queue.
108*5113495bSYour Name  *
109*5113495bSYour Name  * @param pdev - the data virtual device sending the data
110*5113495bSYour Name  *      (for storing the tx desc in the virtual dev's tx_target_list,
111*5113495bSYour Name  *      and for accessing the phy dev)
112*5113495bSYour Name  * @param txq - which queue the tx frame gets stored in
113*5113495bSYour Name  * @param tx_desc - tx meta-data, including prev and next ptrs
114*5113495bSYour Name  * @param tx_msdu_info - characteristics of the tx frame
115*5113495bSYour Name  */
116*5113495bSYour Name void
117*5113495bSYour Name ol_tx_enqueue(
118*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
119*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
120*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc,
121*5113495bSYour Name 		struct ol_txrx_msdu_info_t *tx_msdu_info);
122*5113495bSYour Name 
123*5113495bSYour Name /**
124*5113495bSYour Name  * @brief - remove the specified number of frames from the head of a tx queue
125*5113495bSYour Name  * @details
126*5113495bSYour Name  *  This function removes frames from the head of a tx queue,
127*5113495bSYour Name  *  and returns them as a NULL-terminated linked list.
128*5113495bSYour Name  *  The function will remove frames until one of the following happens:
129*5113495bSYour Name  *  1.  The tx queue is empty
130*5113495bSYour Name  *  2.  The specified number of frames have been removed
131*5113495bSYour Name  *  3.  Removal of more frames would exceed the specified credit limit
132*5113495bSYour Name  *
133*5113495bSYour Name  * @param pdev - the physical device object
134*5113495bSYour Name  * @param txq - which tx queue to remove frames from
135*5113495bSYour Name  * @param head - which contains return linked-list of tx frames (descriptors)
136*5113495bSYour Name  * @param num_frames - maximum number of frames to remove
137*5113495bSYour Name  * @param[in/out] credit -
138*5113495bSYour Name  *     input:  max credit the dequeued frames can consume
139*5113495bSYour Name  *     output: how much credit the dequeued frames consume
140*5113495bSYour Name  * @param[out] bytes - the sum of the sizes of the dequeued frames
141*5113495bSYour Name  * @return number of frames dequeued
142*5113495bSYour Name  */
143*5113495bSYour Name u_int16_t
144*5113495bSYour Name ol_tx_dequeue(
145*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
146*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
147*5113495bSYour Name 	ol_tx_desc_list *head,
148*5113495bSYour Name 	u_int16_t num_frames,
149*5113495bSYour Name 	u_int32_t *credit,
150*5113495bSYour Name 	int *bytes);
151*5113495bSYour Name 
152*5113495bSYour Name /**
153*5113495bSYour Name  * @brief - free all of frames from the tx queue while deletion
154*5113495bSYour Name  * @details
155*5113495bSYour Name  *  This function frees all of frames from the tx queue.
156*5113495bSYour Name  *  This function is called during peer or vdev deletion.
157*5113495bSYour Name  *  This function notifies the scheduler, so the scheduler can update
158*5113495bSYour Name  *  its state to account for the absence of the queue.
159*5113495bSYour Name  *
160*5113495bSYour Name  * @param pdev - the physical device object, which stores the txqs
161*5113495bSYour Name  * @param txq - which tx queue to free frames from
162*5113495bSYour Name  * @param tid - the extended TID that the queue belongs to
163*5113495bSYour Name  * @param is_peer_txq - peer queue or not
164*5113495bSYour Name  */
165*5113495bSYour Name void
166*5113495bSYour Name ol_tx_queue_free(
167*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
168*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
169*5113495bSYour Name 		int tid, bool is_peer_txq);
170*5113495bSYour Name 
171*5113495bSYour Name /**
172*5113495bSYour Name  * @brief - discard pending tx frames from the tx queue
173*5113495bSYour Name  * @details
174*5113495bSYour Name  *  This function is called if there are too many queues in tx scheduler.
175*5113495bSYour Name  *  This function is called if we wants to flush all pending tx
176*5113495bSYour Name  *  queues in tx scheduler.
177*5113495bSYour Name  *
178*5113495bSYour Name  * @param pdev - the physical device object, which stores the txqs
179*5113495bSYour Name  * @param flush_all - flush all pending tx queues if set to true
180*5113495bSYour Name  * @param tx_descs - List Of tx_descs to be discarded will be returned by this
181*5113495bSYour Name  *                   function
182*5113495bSYour Name  */
183*5113495bSYour Name 
184*5113495bSYour Name void
185*5113495bSYour Name ol_tx_queue_discard(
186*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
187*5113495bSYour Name 		bool flush_all,
188*5113495bSYour Name 		ol_tx_desc_list *tx_descs);
189*5113495bSYour Name 
190*5113495bSYour Name #else
191*5113495bSYour Name 
192*5113495bSYour Name static inline void
ol_tx_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,struct ol_tx_desc_t * tx_desc,struct ol_txrx_msdu_info_t * tx_msdu_info)193*5113495bSYour Name ol_tx_enqueue(
194*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
195*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
196*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc,
197*5113495bSYour Name 		struct ol_txrx_msdu_info_t *tx_msdu_info)
198*5113495bSYour Name {
199*5113495bSYour Name }
200*5113495bSYour Name 
201*5113495bSYour Name static inline u_int16_t
ol_tx_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,ol_tx_desc_list * head,u_int16_t num_frames,u_int32_t * credit,int * bytes)202*5113495bSYour Name ol_tx_dequeue(
203*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
204*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
205*5113495bSYour Name 	ol_tx_desc_list *head,
206*5113495bSYour Name 	u_int16_t num_frames,
207*5113495bSYour Name 	u_int32_t *credit,
208*5113495bSYour Name 	int *bytes)
209*5113495bSYour Name {
210*5113495bSYour Name 	return 0;
211*5113495bSYour Name }
212*5113495bSYour Name 
213*5113495bSYour Name static inline void
ol_tx_queue_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,bool is_peer_txq)214*5113495bSYour Name ol_tx_queue_free(
215*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
216*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
217*5113495bSYour Name 		int tid, bool is_peer_txq)
218*5113495bSYour Name {
219*5113495bSYour Name }
220*5113495bSYour Name 
221*5113495bSYour Name static inline void
ol_tx_queue_discard(struct ol_txrx_pdev_t * pdev,bool flush_all,ol_tx_desc_list * tx_descs)222*5113495bSYour Name ol_tx_queue_discard(
223*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
224*5113495bSYour Name 		bool flush_all,
225*5113495bSYour Name 		ol_tx_desc_list *tx_descs)
226*5113495bSYour Name {
227*5113495bSYour Name }
228*5113495bSYour Name #endif /* defined(CONFIG_HL_SUPPORT) */
229*5113495bSYour Name 
230*5113495bSYour Name #if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
231*5113495bSYour Name static inline
ol_txrx_vdev_flush(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)232*5113495bSYour Name void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
233*5113495bSYour Name {
234*5113495bSYour Name }
235*5113495bSYour Name #else
236*5113495bSYour Name /**
237*5113495bSYour Name  * ol_txrx_vdev_flush() - Drop all tx data for the specified virtual device
238*5113495bSYour Name  * @soc_hdl: soc handle
239*5113495bSYour Name  * @vdev_id: vdev id
240*5113495bSYour Name  *
241*5113495bSYour Name  * Returns: none
242*5113495bSYour Name  *
243*5113495bSYour Name  * This function applies primarily to HL systems, but also applies to
244*5113495bSYour Name  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
245*5113495bSYour Name  * This function would typically be used by the ctrl SW after it parks
246*5113495bSYour Name  * a STA vdev and then resumes it, but to a new AP.  In this case, though
247*5113495bSYour Name  * the same vdev can be used, any old tx frames queued inside it would be
248*5113495bSYour Name  * stale, and would need to be discarded.
249*5113495bSYour Name  */
250*5113495bSYour Name void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
251*5113495bSYour Name #endif
252*5113495bSYour Name 
253*5113495bSYour Name #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
254*5113495bSYour Name    (defined(QCA_LL_TX_FLOW_CONTROL_V2)) || \
255*5113495bSYour Name    defined(CONFIG_HL_SUPPORT)
256*5113495bSYour Name /**
257*5113495bSYour Name  * ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
258*5113495bSYour Name  * soc_hdl: Datapath soc handle
259*5113495bSYour Name  * @vdev_id: id of vdev
260*5113495bSYour Name  * @reason: the reason for which vdev queue is getting paused
261*5113495bSYour Name  * @pause_type: type of pause
262*5113495bSYour Name  *
263*5113495bSYour Name  * Return: none
264*5113495bSYour Name  *
265*5113495bSYour Name  * This function applies primarily to HL systems, but also
266*5113495bSYour Name  * applies to LL systems that use per-vdev tx queues for MCC or
267*5113495bSYour Name  * thermal throttling. As an example, this function could be
268*5113495bSYour Name  * used when a single-channel physical device supports multiple
269*5113495bSYour Name  * channels by jumping back and forth between the channels in a
270*5113495bSYour Name  * time-shared manner.  As the device is switched from channel A
271*5113495bSYour Name  * to channel B, the virtual devices that operate on channel A
272*5113495bSYour Name  * will be paused.
273*5113495bSYour Name  */
274*5113495bSYour Name void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
275*5113495bSYour Name 			uint32_t reason, uint32_t pause_type);
276*5113495bSYour Name 
277*5113495bSYour Name /**
278*5113495bSYour Name  * ol_txrx_vdev_unpause - Resume tx for the specified virtual device
279*5113495bSYour Name  * soc_hdl: Datapath soc handle
280*5113495bSYour Name  * @vdev_id: id of vdev being unpaused
281*5113495bSYour Name  * @reason: the reason for which vdev queue is getting unpaused
282*5113495bSYour Name  * @pause_type: type of pause
283*5113495bSYour Name  *
284*5113495bSYour Name  * Return: none
285*5113495bSYour Name  *
286*5113495bSYour Name  * This function applies primarily to HL systems, but also applies to
287*5113495bSYour Name  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
288*5113495bSYour Name  */
289*5113495bSYour Name void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
290*5113495bSYour Name 			  uint32_t reason, uint32_t pause_type);
291*5113495bSYour Name #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
292*5113495bSYour Name 
293*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
294*5113495bSYour Name 
295*5113495bSYour Name void
296*5113495bSYour Name ol_txrx_peer_bal_add_limit_peer(
297*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
298*5113495bSYour Name 		u_int16_t peer_id,
299*5113495bSYour Name 		u_int16_t peer_limit);
300*5113495bSYour Name 
301*5113495bSYour Name void
302*5113495bSYour Name ol_txrx_peer_bal_remove_limit_peer(
303*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
304*5113495bSYour Name 		u_int16_t peer_id);
305*5113495bSYour Name 
306*5113495bSYour Name /**
307*5113495bSYour Name  * ol_txrx_peer_pause_but_no_mgmt_q() - suspend/pause all txqs except
308*5113495bSYour Name  *					management queue for a given peer
309*5113495bSYour Name  * @peer: peer device object
310*5113495bSYour Name  *
311*5113495bSYour Name  * Return: None
312*5113495bSYour Name  */
313*5113495bSYour Name void
314*5113495bSYour Name ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer);
315*5113495bSYour Name 
316*5113495bSYour Name /**
317*5113495bSYour Name  * ol_txrx_peer_unpause_but_no_mgmt_q() - unpause all txqs except management
318*5113495bSYour Name  *					  queue for a given peer
319*5113495bSYour Name  * @peer: peer device object
320*5113495bSYour Name  *
321*5113495bSYour Name  * Return: None
322*5113495bSYour Name  */
323*5113495bSYour Name void
324*5113495bSYour Name ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer);
325*5113495bSYour Name 
326*5113495bSYour Name /**
327*5113495bSYour Name  * ol_tx_bad_peer_dequeue_check() - retrieve the send limit
328*5113495bSYour Name  *				    of the tx queue category
329*5113495bSYour Name  * @txq: tx queue of the head of the category list
330*5113495bSYour Name  * @max_frames: send limit of the txq category
331*5113495bSYour Name  * @tx_limit_flag: set true is tx limit is reached
332*5113495bSYour Name  *
333*5113495bSYour Name  * Return: send limit
334*5113495bSYour Name  */
335*5113495bSYour Name u_int16_t
336*5113495bSYour Name ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
337*5113495bSYour Name 			     u_int16_t max_frames,
338*5113495bSYour Name 			     u_int16_t *tx_limit_flag);
339*5113495bSYour Name 
340*5113495bSYour Name /**
341*5113495bSYour Name  * ol_tx_bad_peer_update_tx_limit() - update the send limit of the
342*5113495bSYour Name  *				      tx queue category
343*5113495bSYour Name  * @pdev: the physical device object
344*5113495bSYour Name  * @txq: tx queue of the head of the category list
345*5113495bSYour Name  * @frames: frames that has been dequeued
346*5113495bSYour Name  * @tx_limit_flag: tx limit reached flag
347*5113495bSYour Name  *
348*5113495bSYour Name  * Return: None
349*5113495bSYour Name  */
350*5113495bSYour Name void
351*5113495bSYour Name ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
352*5113495bSYour Name 			       struct ol_tx_frms_queue_t *txq,
353*5113495bSYour Name 			       u_int16_t frames,
354*5113495bSYour Name 			       u_int16_t tx_limit_flag);
355*5113495bSYour Name 
356*5113495bSYour Name /**
357*5113495bSYour Name  * ol_txrx_set_txq_peer() - set peer to the tx queue's peer
358*5113495bSYour Name  * @txq: tx queue for a given tid
359*5113495bSYour Name  * @peer: the peer device object
360*5113495bSYour Name  *
361*5113495bSYour Name  * Return: None
362*5113495bSYour Name  */
363*5113495bSYour Name void
364*5113495bSYour Name ol_txrx_set_txq_peer(
365*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
366*5113495bSYour Name 	struct ol_txrx_peer_t *peer);
367*5113495bSYour Name 
368*5113495bSYour Name /**
369*5113495bSYour Name  * @brief - initialize the peer balance context
370*5113495bSYour Name  * @param pdev - the physical device object, which stores the txqs
371*5113495bSYour Name  */
372*5113495bSYour Name void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev);
373*5113495bSYour Name 
374*5113495bSYour Name /**
375*5113495bSYour Name  * @brief - deinitialize the peer balance context
376*5113495bSYour Name  * @param pdev - the physical device object, which stores the txqs
377*5113495bSYour Name  */
378*5113495bSYour Name void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev);
379*5113495bSYour Name 
380*5113495bSYour Name #else
381*5113495bSYour Name 
ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id,u_int16_t peer_limit)382*5113495bSYour Name static inline void ol_txrx_peer_bal_add_limit_peer(
383*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
384*5113495bSYour Name 		u_int16_t peer_id,
385*5113495bSYour Name 		u_int16_t peer_limit)
386*5113495bSYour Name {
387*5113495bSYour Name }
388*5113495bSYour Name 
ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id)389*5113495bSYour Name static inline void ol_txrx_peer_bal_remove_limit_peer(
390*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
391*5113495bSYour Name 		u_int16_t peer_id)
392*5113495bSYour Name {
393*5113495bSYour Name }
394*5113495bSYour Name 
ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)395*5113495bSYour Name static inline void ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
396*5113495bSYour Name {
397*5113495bSYour Name }
398*5113495bSYour Name 
ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)399*5113495bSYour Name static inline void ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
400*5113495bSYour Name {
401*5113495bSYour Name }
402*5113495bSYour Name 
403*5113495bSYour Name static inline u_int16_t
ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t * txq,u_int16_t max_frames,u_int16_t * tx_limit_flag)404*5113495bSYour Name ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
405*5113495bSYour Name 			     u_int16_t max_frames,
406*5113495bSYour Name 			     u_int16_t *tx_limit_flag)
407*5113495bSYour Name {
408*5113495bSYour Name 	/* just return max_frames */
409*5113495bSYour Name 	return max_frames;
410*5113495bSYour Name }
411*5113495bSYour Name 
412*5113495bSYour Name static inline void
ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int16_t frames,u_int16_t tx_limit_flag)413*5113495bSYour Name ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
414*5113495bSYour Name 			       struct ol_tx_frms_queue_t *txq,
415*5113495bSYour Name 			       u_int16_t frames,
416*5113495bSYour Name 			       u_int16_t tx_limit_flag)
417*5113495bSYour Name {
418*5113495bSYour Name }
419*5113495bSYour Name 
420*5113495bSYour Name static inline void
ol_txrx_set_txq_peer(struct ol_tx_frms_queue_t * txq,struct ol_txrx_peer_t * peer)421*5113495bSYour Name ol_txrx_set_txq_peer(
422*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
423*5113495bSYour Name 		struct ol_txrx_peer_t *peer)
424*5113495bSYour Name {
425*5113495bSYour Name }
426*5113495bSYour Name 
ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t * pdev)427*5113495bSYour Name static inline void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
428*5113495bSYour Name {
429*5113495bSYour Name }
430*5113495bSYour Name 
ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t * pdev)431*5113495bSYour Name static inline void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
432*5113495bSYour Name {
433*5113495bSYour Name }
434*5113495bSYour Name 
435*5113495bSYour Name #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
436*5113495bSYour Name 
437*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
438*5113495bSYour Name 
439*5113495bSYour Name /**
440*5113495bSYour Name  * ol_tx_queue_log_sched() - start logging of tx queues for HL
441*5113495bSYour Name  * @pdev: physical device object
442*5113495bSYour Name  * @credit: number of credits
443*5113495bSYour Name  * @num_active_tids: number of active tids for which logging needs to be done
444*5113495bSYour Name  * @active_bitmap:bitmap
445*5113495bSYour Name  * @data: buffer
446*5113495bSYour Name  *
447*5113495bSYour Name  * Return: None
448*5113495bSYour Name  */
449*5113495bSYour Name void
450*5113495bSYour Name ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
451*5113495bSYour Name 		      int credit,
452*5113495bSYour Name 		      int *num_active_tids,
453*5113495bSYour Name 		      uint32_t **active_bitmap, uint8_t **data);
454*5113495bSYour Name #else
455*5113495bSYour Name 
456*5113495bSYour Name static inline void
ol_tx_queue_log_sched(struct ol_txrx_pdev_t * pdev,int credit,int * num_active_tids,uint32_t ** active_bitmap,uint8_t ** data)457*5113495bSYour Name ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
458*5113495bSYour Name 		      int credit,
459*5113495bSYour Name 		      int *num_active_tids,
460*5113495bSYour Name 		      uint32_t **active_bitmap, uint8_t **data)
461*5113495bSYour Name {
462*5113495bSYour Name }
463*5113495bSYour Name #endif /* defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING) */
464*5113495bSYour Name 
465*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && TXRX_DEBUG_LEVEL > 5
466*5113495bSYour Name /**
467*5113495bSYour Name  * @brief - show current state of all tx queues
468*5113495bSYour Name  * @param pdev - the physical device object, which stores the txqs
469*5113495bSYour Name  */
470*5113495bSYour Name void
471*5113495bSYour Name ol_tx_queues_display(struct ol_txrx_pdev_t *pdev);
472*5113495bSYour Name 
473*5113495bSYour Name #else
474*5113495bSYour Name 
475*5113495bSYour Name static inline void
ol_tx_queues_display(struct ol_txrx_pdev_t * pdev)476*5113495bSYour Name ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
477*5113495bSYour Name {
478*5113495bSYour Name }
479*5113495bSYour Name #endif
480*5113495bSYour Name 
481*5113495bSYour Name #define ol_tx_queue_decs_reinit(peer, peer_id)  /* no-op */
482*5113495bSYour Name 
483*5113495bSYour Name #ifdef QCA_SUPPORT_TX_THROTTLE
484*5113495bSYour Name void ol_tx_throttle_set_level(struct cdp_soc_t *soc_hdl,
485*5113495bSYour Name 			      uint8_t pdev_id, int level);
486*5113495bSYour Name void ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl,
487*5113495bSYour Name 				uint8_t pdev_id, int period,
488*5113495bSYour Name 				uint8_t *dutycycle_level);
489*5113495bSYour Name 
490*5113495bSYour Name /**
491*5113495bSYour Name  * @brief - initialize the throttle context
492*5113495bSYour Name  * @param pdev - the physical device object, which stores the txqs
493*5113495bSYour Name  */
494*5113495bSYour Name void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev);
495*5113495bSYour Name #else
ol_tx_throttle_init(struct ol_txrx_pdev_t * pdev)496*5113495bSYour Name static inline void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev) {}
497*5113495bSYour Name 
ol_tx_throttle_set_level(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int level)498*5113495bSYour Name static inline void ol_tx_throttle_set_level(struct cdp_soc_t *soc_hdl,
499*5113495bSYour Name 					    uint8_t pdev_id, int level)
500*5113495bSYour Name {}
501*5113495bSYour Name 
502*5113495bSYour Name static inline void
ol_tx_throttle_init_period(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int period,uint8_t * dutycycle_level)503*5113495bSYour Name ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
504*5113495bSYour Name 			   int period, uint8_t *dutycycle_level)
505*5113495bSYour Name {}
506*5113495bSYour Name #endif
507*5113495bSYour Name 
508*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
509*5113495bSYour Name 
510*5113495bSYour Name static inline bool
ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t * first,struct ol_tx_frms_queue_t * txq)511*5113495bSYour Name ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t *first,
512*5113495bSYour Name 			  struct ol_tx_frms_queue_t *txq)
513*5113495bSYour Name {
514*5113495bSYour Name 	return (first != txq);
515*5113495bSYour Name }
516*5113495bSYour Name 
517*5113495bSYour Name /**
518*5113495bSYour Name  * ol_tx_txq_group_credit_limit() - check for credit limit of a given tx queue
519*5113495bSYour Name  * @pdev: physical device object
520*5113495bSYour Name  * @txq: tx queue for which credit limit needs be to checked
521*5113495bSYour Name  * @credit: number of credits of the selected category
522*5113495bSYour Name  *
523*5113495bSYour Name  * Return: updated credits
524*5113495bSYour Name  */
525*5113495bSYour Name u_int32_t ol_tx_txq_group_credit_limit(
526*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
527*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
528*5113495bSYour Name 		u_int32_t credit);
529*5113495bSYour Name 
530*5113495bSYour Name /**
531*5113495bSYour Name  * ol_tx_txq_group_credit_update() - update group credits of the
532*5113495bSYour Name  *				     selected catoegory
533*5113495bSYour Name  * @pdev: physical device object
534*5113495bSYour Name  * @txq: tx queue for which credit needs to be updated
535*5113495bSYour Name  * @credit: number of credits by which selected category needs to be updated
536*5113495bSYour Name  * @absolute: TXQ group absolute value
537*5113495bSYour Name  *
538*5113495bSYour Name  * Return: None
539*5113495bSYour Name  */
540*5113495bSYour Name void ol_tx_txq_group_credit_update(
541*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
542*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
543*5113495bSYour Name 		int32_t credit,
544*5113495bSYour Name 		u_int8_t absolute);
545*5113495bSYour Name 
546*5113495bSYour Name /**
547*5113495bSYour Name  * ol_tx_set_vdev_group_ptr() - update vdev queues group pointer
548*5113495bSYour Name  * @pdev: physical device object
549*5113495bSYour Name  * @vdev_id: vdev id for which group pointer needs to update
550*5113495bSYour Name  * @grp_ptr: pointer to ol tx queue group which needs to be set for vdev queues
551*5113495bSYour Name  *
552*5113495bSYour Name  * Return: None
553*5113495bSYour Name  */
554*5113495bSYour Name void
555*5113495bSYour Name ol_tx_set_vdev_group_ptr(
556*5113495bSYour Name 		ol_txrx_pdev_handle pdev,
557*5113495bSYour Name 		u_int8_t vdev_id,
558*5113495bSYour Name 		struct ol_tx_queue_group_t *grp_ptr);
559*5113495bSYour Name 
560*5113495bSYour Name /**
561*5113495bSYour Name  * ol_tx_txq_set_group_ptr() - update tx queue group pointer
562*5113495bSYour Name  * @txq: tx queue of which group pointer needs to update
563*5113495bSYour Name  * @grp_ptr: pointer to ol tx queue group which needs to be
564*5113495bSYour Name  *	     set for given tx queue
565*5113495bSYour Name  *
566*5113495bSYour Name  *
567*5113495bSYour Name  * Return: None
568*5113495bSYour Name  */
569*5113495bSYour Name void
570*5113495bSYour Name ol_tx_txq_set_group_ptr(
571*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
572*5113495bSYour Name 		struct ol_tx_queue_group_t *grp_ptr);
573*5113495bSYour Name 
574*5113495bSYour Name /**
575*5113495bSYour Name  * ol_tx_set_peer_group_ptr() - update peer tx queues group pointer
576*5113495bSYour Name  *				for a given tid
577*5113495bSYour Name  * @pdev: physical device object
578*5113495bSYour Name  * @peer: peer device object
579*5113495bSYour Name  * @vdev_id: vdev id
580*5113495bSYour Name  * @tid: tid for which group pointer needs to update
581*5113495bSYour Name  *
582*5113495bSYour Name  *
583*5113495bSYour Name  * Return: None
584*5113495bSYour Name  */
585*5113495bSYour Name void
586*5113495bSYour Name ol_tx_set_peer_group_ptr(
587*5113495bSYour Name 		ol_txrx_pdev_handle pdev,
588*5113495bSYour Name 		struct ol_txrx_peer_t *peer,
589*5113495bSYour Name 		u_int8_t vdev_id,
590*5113495bSYour Name 		u_int8_t tid);
591*5113495bSYour Name #else
592*5113495bSYour Name 
593*5113495bSYour Name static inline bool
ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t * first,struct ol_tx_frms_queue_t * txq)594*5113495bSYour Name ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t *first,
595*5113495bSYour Name 			  struct ol_tx_frms_queue_t *txq)
596*5113495bSYour Name {
597*5113495bSYour Name 	return 0;
598*5113495bSYour Name }
599*5113495bSYour Name 
600*5113495bSYour Name static inline
ol_tx_txq_group_credit_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int32_t credit)601*5113495bSYour Name u_int32_t ol_tx_txq_group_credit_limit(
602*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
603*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
604*5113495bSYour Name 		u_int32_t credit)
605*5113495bSYour Name {
606*5113495bSYour Name 	return credit;
607*5113495bSYour Name }
608*5113495bSYour Name 
ol_tx_txq_group_credit_update(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int32_t credit,u_int8_t absolute)609*5113495bSYour Name static inline void ol_tx_txq_group_credit_update(
610*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
611*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
612*5113495bSYour Name 		int32_t credit,
613*5113495bSYour Name 		u_int8_t absolute)
614*5113495bSYour Name {
615*5113495bSYour Name }
616*5113495bSYour Name 
617*5113495bSYour Name static inline void
ol_tx_txq_set_group_ptr(struct ol_tx_frms_queue_t * txq,struct ol_tx_queue_group_t * grp_ptr)618*5113495bSYour Name ol_tx_txq_set_group_ptr(
619*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq,
620*5113495bSYour Name 		struct ol_tx_queue_group_t *grp_ptr)
621*5113495bSYour Name {
622*5113495bSYour Name }
623*5113495bSYour Name 
624*5113495bSYour Name static inline void
ol_tx_set_peer_group_ptr(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer,u_int8_t vdev_id,u_int8_t tid)625*5113495bSYour Name ol_tx_set_peer_group_ptr(
626*5113495bSYour Name 		ol_txrx_pdev_handle pdev,
627*5113495bSYour Name 		struct ol_txrx_peer_t *peer,
628*5113495bSYour Name 		u_int8_t vdev_id,
629*5113495bSYour Name 		u_int8_t tid)
630*5113495bSYour Name {
631*5113495bSYour Name }
632*5113495bSYour Name #endif
633*5113495bSYour Name 
634*5113495bSYour Name #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
635*5113495bSYour Name 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
636*5113495bSYour Name /**
637*5113495bSYour Name  * @brief: Update group frame count
638*5113495bSYour Name  * @details: This function is used to maintain the count of frames
639*5113495bSYour Name  * enqueued in a particular group.
640*5113495bSYour Name  *
641*5113495bSYour Name  * @param: txq - The txq to which the frame is getting enqueued.
642*5113495bSYour Name  * @param: num_frms - Number of frames to be added/removed from the group.
643*5113495bSYour Name  */
644*5113495bSYour Name void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq, int num_frms);
645*5113495bSYour Name 
646*5113495bSYour Name u32 ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t *pdev,
647*5113495bSYour Name 					    struct ol_tx_frms_queue_t *txq,
648*5113495bSYour Name 					    u32 credits_used);
649*5113495bSYour Name #else
ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t * txq,int num_frms)650*5113495bSYour Name static inline void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq,
651*5113495bSYour Name 					      int num_frms)
652*5113495bSYour Name {}
653*5113495bSYour Name 
654*5113495bSYour Name static inline u32
ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u32 credits_used)655*5113495bSYour Name ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t *pdev,
656*5113495bSYour Name 					struct ol_tx_frms_queue_t *txq,
657*5113495bSYour Name 					u32 credits_used)
658*5113495bSYour Name {
659*5113495bSYour Name 	return credits_used;
660*5113495bSYour Name }
661*5113495bSYour Name #endif /*
662*5113495bSYour Name 	* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
663*5113495bSYour Name 	*  FEATURE_HL_DBS_GROUP_CREDIT_SHARING
664*5113495bSYour Name 	*/
665*5113495bSYour Name 
666*5113495bSYour Name #endif /* _OL_TX_QUEUE__H_ */
667