xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_queue.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file ol_tx_queue.h
21  * @brief API definitions for the tx frame queue module within the data SW.
22  */
23 #ifndef _OL_TX_QUEUE__H_
24 #define _OL_TX_QUEUE__H_
25 
26 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
27 #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
28 #include <qdf_types.h>          /* bool */
29 
30 /*--- function prototypes for optional queue log feature --------------------*/
31 #if defined(ENABLE_TX_QUEUE_LOG) || \
32 	(defined(DEBUG_HL_LOGGING) && defined(CONFIG_HL_SUPPORT))
33 
34 /**
35  * ol_tx_queue_log_enqueue() - enqueue tx queue logs
36  * @pdev: physical device object
37  * @msdu_info: tx msdu meta data
38  * @frms: number of frames for which logs need to be enqueued
39  * @bytes: number of bytes
40  *
41  *
42  * Return: None
43  */
44 void
45 ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
46 			struct ol_txrx_msdu_info_t *msdu_info,
47 			int frms, int bytes);
48 
49 /**
50  * ol_tx_queue_log_dequeue() - dequeue tx queue logs
51  * @pdev: physical device object
52  * @txq: tx queue
53  * @frms: number of frames for which logs need to be dequeued
54  * @bytes: number of bytes
55  *
56  *
57  * Return: None
58  */
59 void
60 ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
61 			struct ol_tx_frms_queue_t *txq, int frms, int bytes);
62 
63 /**
64  * ol_tx_queue_log_free() - free tx queue logs
65  * @pdev: physical device object
66  * @txq: tx queue
67  * @tid: tid value
68  * @frms: number of frames for which logs need to be freed
69  * @bytes: number of bytes
70  * @is_peer_txq - peer queue or not
71  *
72  *
73  * Return: None
74  */
75 void
76 ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
77 		     struct ol_tx_frms_queue_t *txq,
78 		     int tid, int frms, int bytes, bool is_peer_txq);
79 
80 #else
81 
82 static inline void
ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_txrx_msdu_info_t * msdu_info,int frms,int bytes)83 ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
84 			struct ol_txrx_msdu_info_t *msdu_info,
85 			int frms, int bytes)
86 {
87 }
88 
89 static inline void
ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int frms,int bytes)90 ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
91 			struct ol_tx_frms_queue_t *txq, int frms, int bytes)
92 {
93 }
94 
95 static inline void
ol_tx_queue_log_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes,bool is_peer_txq)96 ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
97 		     struct ol_tx_frms_queue_t *txq,
98 		     int tid, int frms, int bytes, bool is_peer_txq)
99 {
100 }
101 
102 #endif
103 
104 #if defined(CONFIG_HL_SUPPORT)
105 
106 /**
107  * @brief Queue a tx frame to the tid queue.
108  *
109  * @param pdev - the data virtual device sending the data
110  *      (for storing the tx desc in the virtual dev's tx_target_list,
111  *      and for accessing the phy dev)
112  * @param txq - which queue the tx frame gets stored in
113  * @param tx_desc - tx meta-data, including prev and next ptrs
114  * @param tx_msdu_info - characteristics of the tx frame
115  */
116 void
117 ol_tx_enqueue(
118 		struct ol_txrx_pdev_t *pdev,
119 		struct ol_tx_frms_queue_t *txq,
120 		struct ol_tx_desc_t *tx_desc,
121 		struct ol_txrx_msdu_info_t *tx_msdu_info);
122 
123 /**
124  * @brief - remove the specified number of frames from the head of a tx queue
125  * @details
126  *  This function removes frames from the head of a tx queue,
127  *  and returns them as a NULL-terminated linked list.
128  *  The function will remove frames until one of the following happens:
129  *  1.  The tx queue is empty
130  *  2.  The specified number of frames have been removed
131  *  3.  Removal of more frames would exceed the specified credit limit
132  *
133  * @param pdev - the physical device object
134  * @param txq - which tx queue to remove frames from
135  * @param head - which contains return linked-list of tx frames (descriptors)
136  * @param num_frames - maximum number of frames to remove
137  * @param[in/out] credit -
138  *     input:  max credit the dequeued frames can consume
139  *     output: how much credit the dequeued frames consume
140  * @param[out] bytes - the sum of the sizes of the dequeued frames
141  * @return number of frames dequeued
142  */
143 u_int16_t
144 ol_tx_dequeue(
145 	struct ol_txrx_pdev_t *pdev,
146 	struct ol_tx_frms_queue_t *txq,
147 	ol_tx_desc_list *head,
148 	u_int16_t num_frames,
149 	u_int32_t *credit,
150 	int *bytes);
151 
152 /**
153  * @brief - free all of frames from the tx queue while deletion
154  * @details
155  *  This function frees all of frames from the tx queue.
156  *  This function is called during peer or vdev deletion.
157  *  This function notifies the scheduler, so the scheduler can update
158  *  its state to account for the absence of the queue.
159  *
160  * @param pdev - the physical device object, which stores the txqs
161  * @param txq - which tx queue to free frames from
162  * @param tid - the extended TID that the queue belongs to
163  * @param is_peer_txq - peer queue or not
164  */
165 void
166 ol_tx_queue_free(
167 		struct ol_txrx_pdev_t *pdev,
168 		struct ol_tx_frms_queue_t *txq,
169 		int tid, bool is_peer_txq);
170 
171 /**
172  * @brief - discard pending tx frames from the tx queue
173  * @details
174  *  This function is called if there are too many queues in tx scheduler.
175  *  This function is called if we wants to flush all pending tx
176  *  queues in tx scheduler.
177  *
178  * @param pdev - the physical device object, which stores the txqs
179  * @param flush_all - flush all pending tx queues if set to true
180  * @param tx_descs - List Of tx_descs to be discarded will be returned by this
181  *                   function
182  */
183 
184 void
185 ol_tx_queue_discard(
186 		struct ol_txrx_pdev_t *pdev,
187 		bool flush_all,
188 		ol_tx_desc_list *tx_descs);
189 
190 #else
191 
192 static inline void
ol_tx_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,struct ol_tx_desc_t * tx_desc,struct ol_txrx_msdu_info_t * tx_msdu_info)193 ol_tx_enqueue(
194 		struct ol_txrx_pdev_t *pdev,
195 		struct ol_tx_frms_queue_t *txq,
196 		struct ol_tx_desc_t *tx_desc,
197 		struct ol_txrx_msdu_info_t *tx_msdu_info)
198 {
199 }
200 
201 static inline u_int16_t
ol_tx_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,ol_tx_desc_list * head,u_int16_t num_frames,u_int32_t * credit,int * bytes)202 ol_tx_dequeue(
203 	struct ol_txrx_pdev_t *pdev,
204 	struct ol_tx_frms_queue_t *txq,
205 	ol_tx_desc_list *head,
206 	u_int16_t num_frames,
207 	u_int32_t *credit,
208 	int *bytes)
209 {
210 	return 0;
211 }
212 
213 static inline void
ol_tx_queue_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,bool is_peer_txq)214 ol_tx_queue_free(
215 		struct ol_txrx_pdev_t *pdev,
216 		struct ol_tx_frms_queue_t *txq,
217 		int tid, bool is_peer_txq)
218 {
219 }
220 
221 static inline void
ol_tx_queue_discard(struct ol_txrx_pdev_t * pdev,bool flush_all,ol_tx_desc_list * tx_descs)222 ol_tx_queue_discard(
223 		struct ol_txrx_pdev_t *pdev,
224 		bool flush_all,
225 		ol_tx_desc_list *tx_descs)
226 {
227 }
228 #endif /* defined(CONFIG_HL_SUPPORT) */
229 
230 #if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
231 static inline
ol_txrx_vdev_flush(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)232 void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
233 {
234 }
235 #else
236 /**
237  * ol_txrx_vdev_flush() - Drop all tx data for the specified virtual device
238  * @soc_hdl: soc handle
239  * @vdev_id: vdev id
240  *
241  * Returns: none
242  *
243  * This function applies primarily to HL systems, but also applies to
244  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
245  * This function would typically be used by the ctrl SW after it parks
246  * a STA vdev and then resumes it, but to a new AP.  In this case, though
247  * the same vdev can be used, any old tx frames queued inside it would be
248  * stale, and would need to be discarded.
249  */
250 void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
251 #endif
252 
253 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
254    (defined(QCA_LL_TX_FLOW_CONTROL_V2)) || \
255    defined(CONFIG_HL_SUPPORT)
256 /**
257  * ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
258  * soc_hdl: Datapath soc handle
259  * @vdev_id: id of vdev
260  * @reason: the reason for which vdev queue is getting paused
261  * @pause_type: type of pause
262  *
263  * Return: none
264  *
265  * This function applies primarily to HL systems, but also
266  * applies to LL systems that use per-vdev tx queues for MCC or
267  * thermal throttling. As an example, this function could be
268  * used when a single-channel physical device supports multiple
269  * channels by jumping back and forth between the channels in a
270  * time-shared manner.  As the device is switched from channel A
271  * to channel B, the virtual devices that operate on channel A
272  * will be paused.
273  */
274 void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
275 			uint32_t reason, uint32_t pause_type);
276 
277 /**
278  * ol_txrx_vdev_unpause - Resume tx for the specified virtual device
279  * soc_hdl: Datapath soc handle
280  * @vdev_id: id of vdev being unpaused
281  * @reason: the reason for which vdev queue is getting unpaused
282  * @pause_type: type of pause
283  *
284  * Return: none
285  *
286  * This function applies primarily to HL systems, but also applies to
287  * LL systems that use per-vdev tx queues for MCC or thermal throttling.
288  */
289 void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
290 			  uint32_t reason, uint32_t pause_type);
291 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
292 
293 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
294 
295 void
296 ol_txrx_peer_bal_add_limit_peer(
297 		struct ol_txrx_pdev_t *pdev,
298 		u_int16_t peer_id,
299 		u_int16_t peer_limit);
300 
301 void
302 ol_txrx_peer_bal_remove_limit_peer(
303 		struct ol_txrx_pdev_t *pdev,
304 		u_int16_t peer_id);
305 
306 /**
307  * ol_txrx_peer_pause_but_no_mgmt_q() - suspend/pause all txqs except
308  *					management queue for a given peer
309  * @peer: peer device object
310  *
311  * Return: None
312  */
313 void
314 ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer);
315 
316 /**
317  * ol_txrx_peer_unpause_but_no_mgmt_q() - unpause all txqs except management
318  *					  queue for a given peer
319  * @peer: peer device object
320  *
321  * Return: None
322  */
323 void
324 ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer);
325 
326 /**
327  * ol_tx_bad_peer_dequeue_check() - retrieve the send limit
328  *				    of the tx queue category
329  * @txq: tx queue of the head of the category list
330  * @max_frames: send limit of the txq category
331  * @tx_limit_flag: set true is tx limit is reached
332  *
333  * Return: send limit
334  */
335 u_int16_t
336 ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
337 			     u_int16_t max_frames,
338 			     u_int16_t *tx_limit_flag);
339 
340 /**
341  * ol_tx_bad_peer_update_tx_limit() - update the send limit of the
342  *				      tx queue category
343  * @pdev: the physical device object
344  * @txq: tx queue of the head of the category list
345  * @frames: frames that has been dequeued
346  * @tx_limit_flag: tx limit reached flag
347  *
348  * Return: None
349  */
350 void
351 ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
352 			       struct ol_tx_frms_queue_t *txq,
353 			       u_int16_t frames,
354 			       u_int16_t tx_limit_flag);
355 
356 /**
357  * ol_txrx_set_txq_peer() - set peer to the tx queue's peer
358  * @txq: tx queue for a given tid
359  * @peer: the peer device object
360  *
361  * Return: None
362  */
363 void
364 ol_txrx_set_txq_peer(
365 	struct ol_tx_frms_queue_t *txq,
366 	struct ol_txrx_peer_t *peer);
367 
368 /**
369  * @brief - initialize the peer balance context
370  * @param pdev - the physical device object, which stores the txqs
371  */
372 void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev);
373 
374 /**
375  * @brief - deinitialize the peer balance context
376  * @param pdev - the physical device object, which stores the txqs
377  */
378 void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev);
379 
380 #else
381 
ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id,u_int16_t peer_limit)382 static inline void ol_txrx_peer_bal_add_limit_peer(
383 		struct ol_txrx_pdev_t *pdev,
384 		u_int16_t peer_id,
385 		u_int16_t peer_limit)
386 {
387 }
388 
ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id)389 static inline void ol_txrx_peer_bal_remove_limit_peer(
390 		struct ol_txrx_pdev_t *pdev,
391 		u_int16_t peer_id)
392 {
393 }
394 
ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)395 static inline void ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
396 {
397 }
398 
ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)399 static inline void ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
400 {
401 }
402 
403 static inline u_int16_t
ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t * txq,u_int16_t max_frames,u_int16_t * tx_limit_flag)404 ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
405 			     u_int16_t max_frames,
406 			     u_int16_t *tx_limit_flag)
407 {
408 	/* just return max_frames */
409 	return max_frames;
410 }
411 
412 static inline void
ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int16_t frames,u_int16_t tx_limit_flag)413 ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
414 			       struct ol_tx_frms_queue_t *txq,
415 			       u_int16_t frames,
416 			       u_int16_t tx_limit_flag)
417 {
418 }
419 
420 static inline void
ol_txrx_set_txq_peer(struct ol_tx_frms_queue_t * txq,struct ol_txrx_peer_t * peer)421 ol_txrx_set_txq_peer(
422 		struct ol_tx_frms_queue_t *txq,
423 		struct ol_txrx_peer_t *peer)
424 {
425 }
426 
ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t * pdev)427 static inline void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
428 {
429 }
430 
ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t * pdev)431 static inline void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
432 {
433 }
434 
435 #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
436 
437 #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
438 
439 /**
440  * ol_tx_queue_log_sched() - start logging of tx queues for HL
441  * @pdev: physical device object
442  * @credit: number of credits
443  * @num_active_tids: number of active tids for which logging needs to be done
444  * @active_bitmap:bitmap
445  * @data: buffer
446  *
447  * Return: None
448  */
449 void
450 ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
451 		      int credit,
452 		      int *num_active_tids,
453 		      uint32_t **active_bitmap, uint8_t **data);
454 #else
455 
456 static inline void
ol_tx_queue_log_sched(struct ol_txrx_pdev_t * pdev,int credit,int * num_active_tids,uint32_t ** active_bitmap,uint8_t ** data)457 ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
458 		      int credit,
459 		      int *num_active_tids,
460 		      uint32_t **active_bitmap, uint8_t **data)
461 {
462 }
463 #endif /* defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING) */
464 
465 #if defined(CONFIG_HL_SUPPORT) && TXRX_DEBUG_LEVEL > 5
466 /**
467  * @brief - show current state of all tx queues
468  * @param pdev - the physical device object, which stores the txqs
469  */
470 void
471 ol_tx_queues_display(struct ol_txrx_pdev_t *pdev);
472 
473 #else
474 
475 static inline void
ol_tx_queues_display(struct ol_txrx_pdev_t * pdev)476 ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
477 {
478 }
479 #endif
480 
481 #define ol_tx_queue_decs_reinit(peer, peer_id)  /* no-op */
482 
483 #ifdef QCA_SUPPORT_TX_THROTTLE
484 void ol_tx_throttle_set_level(struct cdp_soc_t *soc_hdl,
485 			      uint8_t pdev_id, int level);
486 void ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl,
487 				uint8_t pdev_id, int period,
488 				uint8_t *dutycycle_level);
489 
490 /**
491  * @brief - initialize the throttle context
492  * @param pdev - the physical device object, which stores the txqs
493  */
494 void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev);
495 #else
ol_tx_throttle_init(struct ol_txrx_pdev_t * pdev)496 static inline void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev) {}
497 
ol_tx_throttle_set_level(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int level)498 static inline void ol_tx_throttle_set_level(struct cdp_soc_t *soc_hdl,
499 					    uint8_t pdev_id, int level)
500 {}
501 
502 static inline void
ol_tx_throttle_init_period(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int period,uint8_t * dutycycle_level)503 ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
504 			   int period, uint8_t *dutycycle_level)
505 {}
506 #endif
507 
508 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
509 
510 static inline bool
ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t * first,struct ol_tx_frms_queue_t * txq)511 ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t *first,
512 			  struct ol_tx_frms_queue_t *txq)
513 {
514 	return (first != txq);
515 }
516 
517 /**
518  * ol_tx_txq_group_credit_limit() - check for credit limit of a given tx queue
519  * @pdev: physical device object
520  * @txq: tx queue for which credit limit needs be to checked
521  * @credit: number of credits of the selected category
522  *
523  * Return: updated credits
524  */
525 u_int32_t ol_tx_txq_group_credit_limit(
526 		struct ol_txrx_pdev_t *pdev,
527 		struct ol_tx_frms_queue_t *txq,
528 		u_int32_t credit);
529 
530 /**
531  * ol_tx_txq_group_credit_update() - update group credits of the
532  *				     selected catoegory
533  * @pdev: physical device object
534  * @txq: tx queue for which credit needs to be updated
535  * @credit: number of credits by which selected category needs to be updated
536  * @absolute: TXQ group absolute value
537  *
538  * Return: None
539  */
540 void ol_tx_txq_group_credit_update(
541 		struct ol_txrx_pdev_t *pdev,
542 		struct ol_tx_frms_queue_t *txq,
543 		int32_t credit,
544 		u_int8_t absolute);
545 
546 /**
547  * ol_tx_set_vdev_group_ptr() - update vdev queues group pointer
548  * @pdev: physical device object
549  * @vdev_id: vdev id for which group pointer needs to update
550  * @grp_ptr: pointer to ol tx queue group which needs to be set for vdev queues
551  *
552  * Return: None
553  */
554 void
555 ol_tx_set_vdev_group_ptr(
556 		ol_txrx_pdev_handle pdev,
557 		u_int8_t vdev_id,
558 		struct ol_tx_queue_group_t *grp_ptr);
559 
560 /**
561  * ol_tx_txq_set_group_ptr() - update tx queue group pointer
562  * @txq: tx queue of which group pointer needs to update
563  * @grp_ptr: pointer to ol tx queue group which needs to be
564  *	     set for given tx queue
565  *
566  *
567  * Return: None
568  */
569 void
570 ol_tx_txq_set_group_ptr(
571 		struct ol_tx_frms_queue_t *txq,
572 		struct ol_tx_queue_group_t *grp_ptr);
573 
574 /**
575  * ol_tx_set_peer_group_ptr() - update peer tx queues group pointer
576  *				for a given tid
577  * @pdev: physical device object
578  * @peer: peer device object
579  * @vdev_id: vdev id
580  * @tid: tid for which group pointer needs to update
581  *
582  *
583  * Return: None
584  */
585 void
586 ol_tx_set_peer_group_ptr(
587 		ol_txrx_pdev_handle pdev,
588 		struct ol_txrx_peer_t *peer,
589 		u_int8_t vdev_id,
590 		u_int8_t tid);
591 #else
592 
593 static inline bool
ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t * first,struct ol_tx_frms_queue_t * txq)594 ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t *first,
595 			  struct ol_tx_frms_queue_t *txq)
596 {
597 	return 0;
598 }
599 
600 static inline
ol_tx_txq_group_credit_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int32_t credit)601 u_int32_t ol_tx_txq_group_credit_limit(
602 		struct ol_txrx_pdev_t *pdev,
603 		struct ol_tx_frms_queue_t *txq,
604 		u_int32_t credit)
605 {
606 	return credit;
607 }
608 
ol_tx_txq_group_credit_update(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int32_t credit,u_int8_t absolute)609 static inline void ol_tx_txq_group_credit_update(
610 		struct ol_txrx_pdev_t *pdev,
611 		struct ol_tx_frms_queue_t *txq,
612 		int32_t credit,
613 		u_int8_t absolute)
614 {
615 }
616 
617 static inline void
ol_tx_txq_set_group_ptr(struct ol_tx_frms_queue_t * txq,struct ol_tx_queue_group_t * grp_ptr)618 ol_tx_txq_set_group_ptr(
619 		struct ol_tx_frms_queue_t *txq,
620 		struct ol_tx_queue_group_t *grp_ptr)
621 {
622 }
623 
624 static inline void
ol_tx_set_peer_group_ptr(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer,u_int8_t vdev_id,u_int8_t tid)625 ol_tx_set_peer_group_ptr(
626 		ol_txrx_pdev_handle pdev,
627 		struct ol_txrx_peer_t *peer,
628 		u_int8_t vdev_id,
629 		u_int8_t tid)
630 {
631 }
632 #endif
633 
634 #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
635 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
636 /**
637  * @brief: Update group frame count
638  * @details: This function is used to maintain the count of frames
639  * enqueued in a particular group.
640  *
641  * @param: txq - The txq to which the frame is getting enqueued.
642  * @param: num_frms - Number of frames to be added/removed from the group.
643  */
644 void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq, int num_frms);
645 
646 u32 ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t *pdev,
647 					    struct ol_tx_frms_queue_t *txq,
648 					    u32 credits_used);
649 #else
ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t * txq,int num_frms)650 static inline void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq,
651 					      int num_frms)
652 {}
653 
654 static inline u32
ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u32 credits_used)655 ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t *pdev,
656 					struct ol_tx_frms_queue_t *txq,
657 					u32 credits_used)
658 {
659 	return credits_used;
660 }
661 #endif /*
662 	* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
663 	*  FEATURE_HL_DBS_GROUP_CREDIT_SHARING
664 	*/
665 
666 #endif /* _OL_TX_QUEUE__H_ */
667