xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_sched.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_nbuf.h>         /* qdf_nbuf_t, etc. */
21*5113495bSYour Name #include <htt.h>              /* HTT_TX_EXT_TID_MGMT */
22*5113495bSYour Name #include <ol_htt_tx_api.h>    /* htt_tx_desc_tid */
23*5113495bSYour Name #include <ol_txrx_api.h>      /* ol_txrx_vdev_handle */
24*5113495bSYour Name #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
25*5113495bSYour Name #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
26*5113495bSYour Name #include <ol_txrx_types.h>    /* pdev stats, etc. */
27*5113495bSYour Name #include <ol_tx_desc.h>       /* ol_tx_desc */
28*5113495bSYour Name #include <ol_tx_send.h>       /* ol_tx_send */
29*5113495bSYour Name #include <ol_tx_sched.h>      /* OL_TX_SCHED, etc. */
30*5113495bSYour Name #include <ol_tx_queue.h>
31*5113495bSYour Name #include <ol_txrx.h>
32*5113495bSYour Name #include <qdf_types.h>
33*5113495bSYour Name #include <qdf_mem.h>         /* qdf_os_mem_alloc_consistent et al */
34*5113495bSYour Name #include <cdp_txrx_handle.h>
35*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT)
36*5113495bSYour Name 
37*5113495bSYour Name #if defined(DEBUG_HL_LOGGING)
38*5113495bSYour Name static void
39*5113495bSYour Name ol_tx_sched_log(struct ol_txrx_pdev_t *pdev);
40*5113495bSYour Name 
41*5113495bSYour Name #else
42*5113495bSYour Name static void
ol_tx_sched_log(struct ol_txrx_pdev_t * pdev)43*5113495bSYour Name ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
44*5113495bSYour Name {
45*5113495bSYour Name }
46*5113495bSYour Name #endif /* defined(DEBUG_HL_LOGGING) */
47*5113495bSYour Name 
48*5113495bSYour Name #if DEBUG_HTT_CREDIT
49*5113495bSYour Name #define OL_TX_DISPATCH_LOG_CREDIT()                                           \
50*5113495bSYour Name 	do {								      \
51*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,	\
52*5113495bSYour Name 			"TX %d bytes\n", qdf_nbuf_len(msdu));	\
53*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,	\
54*5113495bSYour Name 			" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",  \
55*5113495bSYour Name 			qdf_atomic_read(&pdev->target_tx_credit),	\
56*5113495bSYour Name 			qdf_atomic_read(&pdev->target_tx_credit) - 1,	\
57*5113495bSYour Name 			qdf_nbuf_len(msdu));				\
58*5113495bSYour Name 	} while (0)
59*5113495bSYour Name #else
60*5113495bSYour Name #define OL_TX_DISPATCH_LOG_CREDIT()
61*5113495bSYour Name #endif
62*5113495bSYour Name 
63*5113495bSYour Name /*--- generic definitions used by the scheduler framework for all algs ---*/
64*5113495bSYour Name 
65*5113495bSYour Name struct ol_tx_sched_ctx {
66*5113495bSYour Name 	ol_tx_desc_list head;
67*5113495bSYour Name 	int frms;
68*5113495bSYour Name };
69*5113495bSYour Name 
70*5113495bSYour Name typedef TAILQ_HEAD(ol_tx_frms_queue_list_s, ol_tx_frms_queue_t)
71*5113495bSYour Name 	ol_tx_frms_queue_list;
72*5113495bSYour Name 
73*5113495bSYour Name 	/*--- scheduler algorithm selection ---*/
74*5113495bSYour Name 
75*5113495bSYour Name 	/*--- scheduler options -----------------------------------------------
76*5113495bSYour Name 	 * 1. Round-robin scheduler:
77*5113495bSYour Name 	 *    Select the TID that is at the head of the list of active TIDs.
78*5113495bSYour Name 	 *    Select the head tx queue for this TID.
79*5113495bSYour Name 	 *    Move the tx queue to the back of the list of tx queues for
80*5113495bSYour Name 	 *    this TID.
81*5113495bSYour Name 	 *    Move the TID to the back of the list of active TIDs.
82*5113495bSYour Name 	 *    Send as many frames from the tx queue as credit allows.
83*5113495bSYour Name 	 * 2. Weighted-round-robin advanced scheduler:
84*5113495bSYour Name 	 *    Keep an ordered list of which TID gets selected next.
85*5113495bSYour Name 	 *    Use a weighted-round-robin scheme to determine when to promote
86*5113495bSYour Name 	 *    a TID within this list.
87*5113495bSYour Name 	 *    If a TID at the head of the list is inactive, leave it at the
88*5113495bSYour Name 	 *    head, but check the next TIDs.
89*5113495bSYour Name 	 *    If the credit available is less than the credit threshold for the
90*5113495bSYour Name 	 *    next active TID, don't send anything, and leave the TID at the
91*5113495bSYour Name 	 *    head of the list.
92*5113495bSYour Name 	 *    After a TID is selected, move it to the back of the list.
93*5113495bSYour Name 	 *    Select the head tx queue for this TID.
94*5113495bSYour Name 	 *    Move the tx queue to the back of the list of tx queues for this
95*5113495bSYour Name 	 *    TID.
96*5113495bSYour Name 	 *    Send no more frames than the limit specified for the TID.
97*5113495bSYour Name 	 */
98*5113495bSYour Name #define OL_TX_SCHED_RR  1
99*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV 2
100*5113495bSYour Name 
101*5113495bSYour Name #ifndef OL_TX_SCHED
102*5113495bSYour Name 	/*#define OL_TX_SCHED OL_TX_SCHED_RR*/
103*5113495bSYour Name #define OL_TX_SCHED OL_TX_SCHED_WRR_ADV /* default */
104*5113495bSYour Name #endif
105*5113495bSYour Name 
106*5113495bSYour Name 
107*5113495bSYour Name #if OL_TX_SCHED == OL_TX_SCHED_RR
108*5113495bSYour Name 
109*5113495bSYour Name #define ol_tx_sched_rr_t ol_tx_sched_t
110*5113495bSYour Name 
111*5113495bSYour Name #define OL_TX_SCHED_NUM_CATEGORIES (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
112*5113495bSYour Name 
113*5113495bSYour Name #define ol_tx_sched_init                ol_tx_sched_init_rr
114*5113495bSYour Name #define ol_tx_sched_select_init(pdev)   /* no-op */
115*5113495bSYour Name #define ol_tx_sched_select_batch        ol_tx_sched_select_batch_rr
116*5113495bSYour Name #define ol_tx_sched_txq_enqueue         ol_tx_sched_txq_enqueue_rr
117*5113495bSYour Name #define ol_tx_sched_txq_deactivate      ol_tx_sched_txq_deactivate_rr
118*5113495bSYour Name #define ol_tx_sched_category_tx_queues  ol_tx_sched_category_tx_queues_rr
119*5113495bSYour Name #define ol_tx_sched_txq_discard         ol_tx_sched_txq_discard_rr
120*5113495bSYour Name #define ol_tx_sched_category_info       ol_tx_sched_category_info_rr
121*5113495bSYour Name #define ol_tx_sched_discard_select_category \
122*5113495bSYour Name 		ol_tx_sched_discard_select_category_rr
123*5113495bSYour Name 
124*5113495bSYour Name #elif OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
125*5113495bSYour Name 
126*5113495bSYour Name #define ol_tx_sched_wrr_adv_t ol_tx_sched_t
127*5113495bSYour Name 
128*5113495bSYour Name #define OL_TX_SCHED_NUM_CATEGORIES OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES
129*5113495bSYour Name 
130*5113495bSYour Name #define ol_tx_sched_init                ol_tx_sched_init_wrr_adv
131*5113495bSYour Name #define ol_tx_sched_select_init(pdev) \
132*5113495bSYour Name 		do { \
133*5113495bSYour Name 			qdf_spin_lock_bh(&pdev->tx_queue_spinlock); \
134*5113495bSYour Name 			ol_tx_sched_select_init_wrr_adv(pdev); \
135*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->tx_queue_spinlock); \
136*5113495bSYour Name 		} while (0)
137*5113495bSYour Name #define ol_tx_sched_select_batch        ol_tx_sched_select_batch_wrr_adv
138*5113495bSYour Name #define ol_tx_sched_txq_enqueue         ol_tx_sched_txq_enqueue_wrr_adv
139*5113495bSYour Name #define ol_tx_sched_txq_deactivate      ol_tx_sched_txq_deactivate_wrr_adv
140*5113495bSYour Name #define ol_tx_sched_category_tx_queues  ol_tx_sched_category_tx_queues_wrr_adv
141*5113495bSYour Name #define ol_tx_sched_txq_discard         ol_tx_sched_txq_discard_wrr_adv
142*5113495bSYour Name #define ol_tx_sched_category_info       ol_tx_sched_category_info_wrr_adv
143*5113495bSYour Name #define ol_tx_sched_discard_select_category \
144*5113495bSYour Name 		ol_tx_sched_discard_select_category_wrr_adv
145*5113495bSYour Name 
146*5113495bSYour Name #else
147*5113495bSYour Name 
148*5113495bSYour Name #error Unknown OL TX SCHED specification
149*5113495bSYour Name 
150*5113495bSYour Name #endif /* OL_TX_SCHED */
151*5113495bSYour Name 
152*5113495bSYour Name 	/*--- round-robin scheduler ----------------------------------------*/
153*5113495bSYour Name #if OL_TX_SCHED == OL_TX_SCHED_RR
154*5113495bSYour Name 
155*5113495bSYour Name 	/*--- definitions ---*/
156*5113495bSYour Name 
157*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t {
158*5113495bSYour Name 		/* list_elem is used to queue up into up level queues*/
159*5113495bSYour Name 		TAILQ_ENTRY(ol_tx_active_queues_in_tid_t) list_elem;
160*5113495bSYour Name 		u_int32_t frms;
161*5113495bSYour Name 		u_int32_t bytes;
162*5113495bSYour Name 		ol_tx_frms_queue_list head;
163*5113495bSYour Name 		bool    active;
164*5113495bSYour Name 		int tid;
165*5113495bSYour Name 	};
166*5113495bSYour Name 
167*5113495bSYour Name 	struct ol_tx_sched_rr_t {
168*5113495bSYour Name 		struct ol_tx_active_queues_in_tid_t
169*5113495bSYour Name 			tx_active_queues_in_tid_array[OL_TX_NUM_TIDS
170*5113495bSYour Name 						+ OL_TX_VDEV_NUM_QUEUES];
171*5113495bSYour Name 	TAILQ_HEAD(ol_tx_active_tids_s, ol_tx_active_queues_in_tid_t)
172*5113495bSYour Name 							tx_active_tids_list;
173*5113495bSYour Name 		u_int8_t discard_weights[OL_TX_NUM_TIDS
174*5113495bSYour Name 					+ OL_TX_VDEV_NUM_QUEUES];
175*5113495bSYour Name 	};
176*5113495bSYour Name 
177*5113495bSYour Name #define TX_SCH_MAX_CREDIT_FOR_THIS_TID(tidq) 16
178*5113495bSYour Name 
179*5113495bSYour Name /*--- functions ---*/
180*5113495bSYour Name 
181*5113495bSYour Name /*
182*5113495bSYour Name  * The scheduler sync spinlock has been acquired outside this function,
183*5113495bSYour Name  * so there is no need to worry about mutex within this function.
184*5113495bSYour Name  */
185*5113495bSYour Name static int
ol_tx_sched_select_batch_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_ctx * sctx,u_int32_t credit)186*5113495bSYour Name ol_tx_sched_select_batch_rr(
187*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
188*5113495bSYour Name 	struct ol_tx_sched_ctx *sctx,
189*5113495bSYour Name 	u_int32_t credit)
190*5113495bSYour Name {
191*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
192*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t *txq_queue;
193*5113495bSYour Name 	struct ol_tx_frms_queue_t *next_tq;
194*5113495bSYour Name 	u_int16_t frames, used_credits = 0, tx_limit, tx_limit_flag = 0;
195*5113495bSYour Name 	int bytes;
196*5113495bSYour Name 
197*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
198*5113495bSYour Name 
199*5113495bSYour Name 	if (TAILQ_EMPTY(&scheduler->tx_active_tids_list))
200*5113495bSYour Name 		return used_credits;
201*5113495bSYour Name 
202*5113495bSYour Name 	txq_queue = TAILQ_FIRST(&scheduler->tx_active_tids_list);
203*5113495bSYour Name 
204*5113495bSYour Name 	TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue, list_elem);
205*5113495bSYour Name 	txq_queue->active = false;
206*5113495bSYour Name 
207*5113495bSYour Name 	next_tq = TAILQ_FIRST(&txq_queue->head);
208*5113495bSYour Name 	TAILQ_REMOVE(&txq_queue->head, next_tq, list_elem);
209*5113495bSYour Name 
210*5113495bSYour Name 	credit = QDF_MIN(credit, TX_SCH_MAX_CREDIT_FOR_THIS_TID(next_tq));
211*5113495bSYour Name 	frames = next_tq->frms; /* download as many frames as credit allows */
212*5113495bSYour Name 	tx_limit = ol_tx_bad_peer_dequeue_check(next_tq,
213*5113495bSYour Name 					frames,
214*5113495bSYour Name 					&tx_limit_flag);
215*5113495bSYour Name 	frames = ol_tx_dequeue(
216*5113495bSYour Name 			pdev, next_tq, &sctx->head, tx_limit, &credit, &bytes);
217*5113495bSYour Name 	ol_tx_bad_peer_update_tx_limit(pdev, next_tq, frames, tx_limit_flag);
218*5113495bSYour Name 
219*5113495bSYour Name 	used_credits = credit;
220*5113495bSYour Name 	txq_queue->frms -= frames;
221*5113495bSYour Name 	txq_queue->bytes -= bytes;
222*5113495bSYour Name 
223*5113495bSYour Name 	if (next_tq->frms > 0) {
224*5113495bSYour Name 		TAILQ_INSERT_TAIL(&txq_queue->head, next_tq, list_elem);
225*5113495bSYour Name 		TAILQ_INSERT_TAIL(
226*5113495bSYour Name 				&scheduler->tx_active_tids_list,
227*5113495bSYour Name 						txq_queue, list_elem);
228*5113495bSYour Name 		txq_queue->active = true;
229*5113495bSYour Name 	} else if (!TAILQ_EMPTY(&txq_queue->head)) {
230*5113495bSYour Name 		/*
231*5113495bSYour Name 		 * This tx queue is empty, but there's another tx queue for the
232*5113495bSYour Name 		 * same TID that is not empty.
233*5113495bSYour Name 		 *Thus, the TID as a whole is active.
234*5113495bSYour Name 		 */
235*5113495bSYour Name 		TAILQ_INSERT_TAIL(
236*5113495bSYour Name 				&scheduler->tx_active_tids_list,
237*5113495bSYour Name 						txq_queue, list_elem);
238*5113495bSYour Name 		txq_queue->active = true;
239*5113495bSYour Name 	}
240*5113495bSYour Name 	sctx->frms += frames;
241*5113495bSYour Name 
242*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
243*5113495bSYour Name 	return used_credits;
244*5113495bSYour Name }
245*5113495bSYour Name 
246*5113495bSYour Name static inline void
ol_tx_sched_txq_enqueue_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes)247*5113495bSYour Name ol_tx_sched_txq_enqueue_rr(
248*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
249*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
250*5113495bSYour Name 	int tid,
251*5113495bSYour Name 	int frms,
252*5113495bSYour Name 	int bytes)
253*5113495bSYour Name {
254*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
255*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t *txq_queue;
256*5113495bSYour Name 
257*5113495bSYour Name 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
258*5113495bSYour Name 	if (txq->flag != ol_tx_queue_active)
259*5113495bSYour Name 		TAILQ_INSERT_TAIL(&txq_queue->head, txq, list_elem);
260*5113495bSYour Name 
261*5113495bSYour Name 	txq_queue->frms += frms;
262*5113495bSYour Name 	txq_queue->bytes += bytes;
263*5113495bSYour Name 
264*5113495bSYour Name 	if (!txq_queue->active) {
265*5113495bSYour Name 		TAILQ_INSERT_TAIL(
266*5113495bSYour Name 				&scheduler->tx_active_tids_list,
267*5113495bSYour Name 				txq_queue, list_elem);
268*5113495bSYour Name 		txq_queue->active = true;
269*5113495bSYour Name 	}
270*5113495bSYour Name }
271*5113495bSYour Name 
272*5113495bSYour Name static inline void
ol_tx_sched_txq_deactivate_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid)273*5113495bSYour Name ol_tx_sched_txq_deactivate_rr(
274*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
275*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
276*5113495bSYour Name 	int tid)
277*5113495bSYour Name {
278*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
279*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t *txq_queue;
280*5113495bSYour Name 
281*5113495bSYour Name 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
282*5113495bSYour Name 	txq_queue->frms -= txq->frms;
283*5113495bSYour Name 	txq_queue->bytes -= txq->bytes;
284*5113495bSYour Name 
285*5113495bSYour Name 	TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
286*5113495bSYour Name 	/*if (txq_queue->frms == 0 && txq_queue->active) {*/
287*5113495bSYour Name 	if (TAILQ_EMPTY(&txq_queue->head) && txq_queue->active) {
288*5113495bSYour Name 		TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
289*5113495bSYour Name 			     list_elem);
290*5113495bSYour Name 		txq_queue->active = false;
291*5113495bSYour Name 	}
292*5113495bSYour Name }
293*5113495bSYour Name 
294*5113495bSYour Name ol_tx_frms_queue_list *
ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t * pdev,int tid)295*5113495bSYour Name ol_tx_sched_category_tx_queues_rr(struct ol_txrx_pdev_t *pdev, int tid)
296*5113495bSYour Name {
297*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
298*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t *txq_queue;
299*5113495bSYour Name 
300*5113495bSYour Name 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
301*5113495bSYour Name 	return &txq_queue->head;
302*5113495bSYour Name }
303*5113495bSYour Name 
304*5113495bSYour Name int
ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t * pdev)305*5113495bSYour Name ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t *pdev)
306*5113495bSYour Name {
307*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler;
308*5113495bSYour Name 	u_int8_t i, tid = 0;
309*5113495bSYour Name 	int max_score = 0;
310*5113495bSYour Name 
311*5113495bSYour Name 	scheduler = pdev->tx_sched.scheduler;
312*5113495bSYour Name 	/*
313*5113495bSYour Name 	 * Choose which TID's tx frames to drop next based on two factors:
314*5113495bSYour Name 	 * 1.  Which TID has the most tx frames present
315*5113495bSYour Name 	 * 2.  The TID's priority (high-priority TIDs have a low discard_weight)
316*5113495bSYour Name 	 */
317*5113495bSYour Name 	for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
318*5113495bSYour Name 		int score;
319*5113495bSYour Name 
320*5113495bSYour Name 		score =
321*5113495bSYour Name 			scheduler->tx_active_queues_in_tid_array[i].frms *
322*5113495bSYour Name 			scheduler->discard_weights[i];
323*5113495bSYour Name 		if (max_score == 0 || score > max_score) {
324*5113495bSYour Name 			max_score = score;
325*5113495bSYour Name 			tid = i;
326*5113495bSYour Name 		}
327*5113495bSYour Name 	}
328*5113495bSYour Name 	return tid;
329*5113495bSYour Name }
330*5113495bSYour Name 
331*5113495bSYour Name void
ol_tx_sched_txq_discard_rr(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frames,int bytes)332*5113495bSYour Name ol_tx_sched_txq_discard_rr(
333*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
334*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
335*5113495bSYour Name 	int tid, int frames, int bytes)
336*5113495bSYour Name {
337*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
338*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t *txq_queue;
339*5113495bSYour Name 
340*5113495bSYour Name 	txq_queue = &scheduler->tx_active_queues_in_tid_array[tid];
341*5113495bSYour Name 
342*5113495bSYour Name 	if (0 == txq->frms)
343*5113495bSYour Name 		TAILQ_REMOVE(&txq_queue->head, txq, list_elem);
344*5113495bSYour Name 
345*5113495bSYour Name 	txq_queue->frms -= frames;
346*5113495bSYour Name 	txq_queue->bytes -= bytes;
347*5113495bSYour Name 	if (txq_queue->active == true && txq_queue->frms == 0) {
348*5113495bSYour Name 		TAILQ_REMOVE(&scheduler->tx_active_tids_list, txq_queue,
349*5113495bSYour Name 			     list_elem);
350*5113495bSYour Name 		txq_queue->active = false;
351*5113495bSYour Name 	}
352*5113495bSYour Name }
353*5113495bSYour Name 
354*5113495bSYour Name void
ol_tx_sched_category_info_rr(struct ol_txrx_pdev_t * pdev,int cat,int * active,int * frms,int * bytes)355*5113495bSYour Name ol_tx_sched_category_info_rr(
356*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
357*5113495bSYour Name 	int cat, int *active,
358*5113495bSYour Name 	int *frms, int *bytes)
359*5113495bSYour Name {
360*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler = pdev->tx_sched.scheduler;
361*5113495bSYour Name 	struct ol_tx_active_queues_in_tid_t *txq_queue;
362*5113495bSYour Name 
363*5113495bSYour Name 	txq_queue = &scheduler->tx_active_queues_in_tid_array[cat];
364*5113495bSYour Name 
365*5113495bSYour Name 	*active = txq_queue->active;
366*5113495bSYour Name 	*frms = txq_queue->frms;
367*5113495bSYour Name 	*bytes = txq_queue->bytes;
368*5113495bSYour Name }
369*5113495bSYour Name 
370*5113495bSYour Name enum {
371*5113495bSYour Name 	ol_tx_sched_discard_weight_voice = 1,
372*5113495bSYour Name 	ol_tx_sched_discard_weight_video = 4,
373*5113495bSYour Name 	ol_tx_sched_discard_weight_ucast_default = 8,
374*5113495bSYour Name 	ol_tx_sched_discard_weight_mgmt_non_qos = 1, /* 0? */
375*5113495bSYour Name 	ol_tx_sched_discard_weight_mcast = 1, /* 0? also for probe & assoc */
376*5113495bSYour Name };
377*5113495bSYour Name 
378*5113495bSYour Name void *
ol_tx_sched_init_rr(struct ol_txrx_pdev_t * pdev)379*5113495bSYour Name ol_tx_sched_init_rr(
380*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev)
381*5113495bSYour Name {
382*5113495bSYour Name 	struct ol_tx_sched_rr_t *scheduler;
383*5113495bSYour Name 	int i;
384*5113495bSYour Name 
385*5113495bSYour Name 	scheduler = qdf_mem_malloc(sizeof(struct ol_tx_sched_rr_t));
386*5113495bSYour Name 	if (!scheduler)
387*5113495bSYour Name 		return scheduler;
388*5113495bSYour Name 
389*5113495bSYour Name 	for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
390*5113495bSYour Name 		scheduler->tx_active_queues_in_tid_array[i].tid = i;
391*5113495bSYour Name 		TAILQ_INIT(&scheduler->tx_active_queues_in_tid_array[i].head);
392*5113495bSYour Name 		scheduler->tx_active_queues_in_tid_array[i].active = 0;
393*5113495bSYour Name 		scheduler->tx_active_queues_in_tid_array[i].frms = 0;
394*5113495bSYour Name 		scheduler->tx_active_queues_in_tid_array[i].bytes = 0;
395*5113495bSYour Name 	}
396*5113495bSYour Name 	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
397*5113495bSYour Name 		scheduler->tx_active_queues_in_tid_array[i].tid = i;
398*5113495bSYour Name 		if (i < OL_TX_NON_QOS_TID) {
399*5113495bSYour Name 			int ac = TXRX_TID_TO_WMM_AC(i);
400*5113495bSYour Name 
401*5113495bSYour Name 			switch (ac) {
402*5113495bSYour Name 			case TXRX_WMM_AC_VO:
403*5113495bSYour Name 				scheduler->discard_weights[i] =
404*5113495bSYour Name 					ol_tx_sched_discard_weight_voice;
405*5113495bSYour Name 			case TXRX_WMM_AC_VI:
406*5113495bSYour Name 				scheduler->discard_weights[i] =
407*5113495bSYour Name 					ol_tx_sched_discard_weight_video;
408*5113495bSYour Name 			default:
409*5113495bSYour Name 				scheduler->discard_weights[i] =
410*5113495bSYour Name 				ol_tx_sched_discard_weight_ucast_default;
411*5113495bSYour Name 			};
412*5113495bSYour Name 		} else {
413*5113495bSYour Name 			scheduler->discard_weights[i] =
414*5113495bSYour Name 				ol_tx_sched_discard_weight_mgmt_non_qos;
415*5113495bSYour Name 		}
416*5113495bSYour Name 	}
417*5113495bSYour Name 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
418*5113495bSYour Name 		int j = i + OL_TX_NUM_TIDS;
419*5113495bSYour Name 
420*5113495bSYour Name 		scheduler->tx_active_queues_in_tid_array[j].tid =
421*5113495bSYour Name 							OL_TX_NUM_TIDS - 1;
422*5113495bSYour Name 		scheduler->discard_weights[j] =
423*5113495bSYour Name 					ol_tx_sched_discard_weight_mcast;
424*5113495bSYour Name 	}
425*5113495bSYour Name 	TAILQ_INIT(&scheduler->tx_active_tids_list);
426*5113495bSYour Name 
427*5113495bSYour Name 	return scheduler;
428*5113495bSYour Name }
429*5113495bSYour Name 
430*5113495bSYour Name void
ol_txrx_set_wmm_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_tx_wmm_param_t wmm_param)431*5113495bSYour Name ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
432*5113495bSYour Name 		      struct ol_tx_wmm_param_t wmm_param)
433*5113495bSYour Name {
434*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
435*5113495bSYour Name 		  "Dummy function when OL_TX_SCHED_RR is enabled\n");
436*5113495bSYour Name }
437*5113495bSYour Name 
438*5113495bSYour Name /**
439*5113495bSYour Name  * ol_tx_sched_stats_display() - tx sched stats display
440*5113495bSYour Name  * @pdev: Pointer to the PDEV structure.
441*5113495bSYour Name  *
442*5113495bSYour Name  * Return: none.
443*5113495bSYour Name  */
ol_tx_sched_stats_display(struct ol_txrx_pdev_t * pdev)444*5113495bSYour Name void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
445*5113495bSYour Name {
446*5113495bSYour Name }
447*5113495bSYour Name 
448*5113495bSYour Name /**
449*5113495bSYour Name  * ol_tx_sched_cur_state_display() - tx sched cur stat display
450*5113495bSYour Name  * @pdev: Pointer to the PDEV structure.
451*5113495bSYour Name  *
452*5113495bSYour Name  * Return: none.
453*5113495bSYour Name  */
ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t * pdev)454*5113495bSYour Name void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
455*5113495bSYour Name {
456*5113495bSYour Name }
457*5113495bSYour Name 
458*5113495bSYour Name /**
459*5113495bSYour Name  * ol_tx_sched_cur_state_display() - reset tx sched stats
460*5113495bSYour Name  * @pdev: Pointer to the PDEV structure.
461*5113495bSYour Name  *
462*5113495bSYour Name  * Return: none.
463*5113495bSYour Name  */
ol_tx_sched_stats_clear(struct ol_txrx_pdev_t * pdev)464*5113495bSYour Name void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
465*5113495bSYour Name {
466*5113495bSYour Name }
467*5113495bSYour Name 
468*5113495bSYour Name #endif /* OL_TX_SCHED == OL_TX_SCHED_RR */
469*5113495bSYour Name 
470*5113495bSYour Name /*--- advanced scheduler ----------------------------------------------------*/
471*5113495bSYour Name #if OL_TX_SCHED == OL_TX_SCHED_WRR_ADV
472*5113495bSYour Name 
473*5113495bSYour Name /*--- definitions ---*/
474*5113495bSYour Name 
475*5113495bSYour Name struct ol_tx_sched_wrr_adv_category_info_t {
476*5113495bSYour Name 	struct {
477*5113495bSYour Name 		int wrr_skip_weight;
478*5113495bSYour Name 		u_int32_t credit_threshold;
479*5113495bSYour Name 		u_int16_t send_limit;
480*5113495bSYour Name 		int credit_reserve;
481*5113495bSYour Name 		int discard_weight;
482*5113495bSYour Name 	} specs;
483*5113495bSYour Name 	struct {
484*5113495bSYour Name 		int wrr_count;
485*5113495bSYour Name 		int frms;
486*5113495bSYour Name 		int bytes;
487*5113495bSYour Name 		ol_tx_frms_queue_list head;
488*5113495bSYour Name 		bool active;
489*5113495bSYour Name 	} state;
490*5113495bSYour Name #ifdef DEBUG_HL_LOGGING
491*5113495bSYour Name 	struct {
492*5113495bSYour Name 		char *cat_name;
493*5113495bSYour Name 		unsigned int queued;
494*5113495bSYour Name 		unsigned int dispatched;
495*5113495bSYour Name 		unsigned int discard;
496*5113495bSYour Name 	} stat;
497*5113495bSYour Name #endif
498*5113495bSYour Name };
499*5113495bSYour Name 
500*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(cat, \
501*5113495bSYour Name 		wrr_skip_weight, \
502*5113495bSYour Name 		credit_threshold, \
503*5113495bSYour Name 		send_limit, \
504*5113495bSYour Name 		credit_reserve, \
505*5113495bSYour Name 		discard_weights) \
506*5113495bSYour Name 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
507*5113495bSYour Name 			(wrr_skip_weight) }; \
508*5113495bSYour Name 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
509*5113495bSYour Name 			(credit_threshold) }; \
510*5113495bSYour Name 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
511*5113495bSYour Name 			(send_limit) }; \
512*5113495bSYour Name 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
513*5113495bSYour Name 			(credit_reserve) }; \
514*5113495bSYour Name 		enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
515*5113495bSYour Name 			(discard_weights) };
516*5113495bSYour Name /* Rome:
517*5113495bSYour Name  * For high-volume traffic flows (VI, BE, BK), use a credit threshold
518*5113495bSYour Name  * roughly equal to a large A-MPDU (occupying half the target memory
519*5113495bSYour Name  * available for holding tx frames) to download AMPDU-sized batches
520*5113495bSYour Name  * of traffic.
521*5113495bSYour Name  * For high-priority, low-volume traffic flows (VO and mgmt), use no
522*5113495bSYour Name  * credit threshold, to minimize download latency.
523*5113495bSYour Name  */
524*5113495bSYour Name /*                                            WRR           send
525*5113495bSYour Name  *                                           skip  credit  limit credit disc
526*5113495bSYour Name  *                                            wts  thresh (frms) reserv  wts
527*5113495bSYour Name  */
528*5113495bSYour Name #ifdef HIF_SDIO
529*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO,           1,     17,    24,     0,  1);
530*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI,           3,     17,    16,     1,  4);
531*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE,          10,     17,    16,     1,  8);
532*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK,          12,      6,     6,     1,  8);
533*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA,10,     17,    16,     1,  8);
534*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT,   1,      1,     4,     0,  1);
535*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA,  10,     17,     4,     1,  4);
536*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT,   1,      1,     4,     0,  1);
537*5113495bSYour Name #else
538*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VO,           1,     16,    24,     0,  1);
539*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(VI,           3,     16,    16,     1,  4);
540*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BE,          10,     12,    12,     1,  8);
541*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(BK,          12,      6,     6,     1,  8);
542*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(NON_QOS_DATA, 12,      6,     4,     1,  8);
543*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(UCAST_MGMT,   1,      1,     4,     0,  1);
544*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_DATA,  10,     16,     4,     1,  4);
545*5113495bSYour Name OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT,   1,      1,     4,     0,  1);
546*5113495bSYour Name #endif
547*5113495bSYour Name 
548*5113495bSYour Name #ifdef DEBUG_HL_LOGGING
549*5113495bSYour Name 
550*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)               \
551*5113495bSYour Name 	do {                                                                 \
552*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
553*5113495bSYour Name 		.stat.queued = 0;					\
554*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
555*5113495bSYour Name 		.stat.discard = 0;					\
556*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
557*5113495bSYour Name 		.stat.dispatched = 0;					\
558*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category]  \
559*5113495bSYour Name 		.stat.cat_name = #category;				\
560*5113495bSYour Name 	} while (0)
561*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)             \
562*5113495bSYour Name 	category->stat.queued += frms;
563*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)           \
564*5113495bSYour Name 	category->stat.discard += frms;
565*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)         \
566*5113495bSYour Name 	category->stat.dispatched += frms;
567*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)                        \
568*5113495bSYour Name 	ol_tx_sched_wrr_adv_cat_stat_dump(scheduler)
569*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)                   \
570*5113495bSYour Name 	ol_tx_sched_wrr_adv_cat_cur_state_dump(scheduler)
571*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)                       \
572*5113495bSYour Name 	ol_tx_sched_wrr_adv_cat_stat_clear(scheduler)
573*5113495bSYour Name 
574*5113495bSYour Name #else   /* DEBUG_HL_LOGGING */
575*5113495bSYour Name 
576*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler)
577*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms)
578*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms)
579*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms)
580*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler)
581*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler)
582*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(scheduler)
583*5113495bSYour Name 
584*5113495bSYour Name #endif  /* DEBUG_HL_LOGGING */
585*5113495bSYour Name 
586*5113495bSYour Name #define OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(category, scheduler) \
587*5113495bSYour Name 	do { \
588*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
589*5113495bSYour Name 		.specs.wrr_skip_weight = \
590*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_ ## category ## _WRR_SKIP_WEIGHT; \
591*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
592*5113495bSYour Name 		.specs.credit_threshold = \
593*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_THRESHOLD; \
594*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
595*5113495bSYour Name 		.specs.send_limit = \
596*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_ ## category ## _SEND_LIMIT; \
597*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
598*5113495bSYour Name 		.specs.credit_reserve = \
599*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_ ## category ## _CREDIT_RESERVE; \
600*5113495bSYour Name 		scheduler->categories[OL_TX_SCHED_WRR_ADV_CAT_ ## category] \
601*5113495bSYour Name 		.specs.discard_weight = \
602*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_ ## category ## _DISCARD_WEIGHT; \
603*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_CAT_STAT_INIT(category, scheduler); \
604*5113495bSYour Name 	} while (0)
605*5113495bSYour Name 
606*5113495bSYour Name struct ol_tx_sched_wrr_adv_t {
607*5113495bSYour Name 	int order[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
608*5113495bSYour Name 	int index;
609*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t
610*5113495bSYour Name 		categories[OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES];
611*5113495bSYour Name };
612*5113495bSYour Name 
613*5113495bSYour Name #define OL_TX_AIFS_DEFAULT_VO   2
614*5113495bSYour Name #define OL_TX_AIFS_DEFAULT_VI   2
615*5113495bSYour Name #define OL_TX_AIFS_DEFAULT_BE   3
616*5113495bSYour Name #define OL_TX_AIFS_DEFAULT_BK   7
617*5113495bSYour Name #define OL_TX_CW_MIN_DEFAULT_VO   3
618*5113495bSYour Name #define OL_TX_CW_MIN_DEFAULT_VI   7
619*5113495bSYour Name #define OL_TX_CW_MIN_DEFAULT_BE   15
620*5113495bSYour Name #define OL_TX_CW_MIN_DEFAULT_BK   15
621*5113495bSYour Name 
622*5113495bSYour Name /*--- functions ---*/
623*5113495bSYour Name 
624*5113495bSYour Name #ifdef DEBUG_HL_LOGGING
ol_tx_sched_wrr_adv_cat_stat_dump(struct ol_tx_sched_wrr_adv_t * scheduler)625*5113495bSYour Name static void ol_tx_sched_wrr_adv_cat_stat_dump(
626*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler)
627*5113495bSYour Name {
628*5113495bSYour Name 	int i;
629*5113495bSYour Name 
630*5113495bSYour Name 	txrx_nofl_info("Scheduler Stats:");
631*5113495bSYour Name 	txrx_nofl_info("====category(CRR,CRT,WSW): Queued  Discard  Dequeued  frms  wrr===");
632*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
633*5113495bSYour Name 		txrx_nofl_info("%12s(%2d, %2d, %2d):  %6d  %7d  %8d  %4d  %3d",
634*5113495bSYour Name 			       scheduler->categories[i].stat.cat_name,
635*5113495bSYour Name 			       scheduler->categories[i].specs.credit_reserve,
636*5113495bSYour Name 			       scheduler->categories[i].specs.
637*5113495bSYour Name 					credit_threshold,
638*5113495bSYour Name 			       scheduler->categories[i].
639*5113495bSYour Name 					specs.wrr_skip_weight,
640*5113495bSYour Name 			       scheduler->categories[i].stat.queued,
641*5113495bSYour Name 			       scheduler->categories[i].stat.discard,
642*5113495bSYour Name 			       scheduler->categories[i].stat.dispatched,
643*5113495bSYour Name 			       scheduler->categories[i].state.frms,
644*5113495bSYour Name 			       scheduler->categories[i].state.wrr_count);
645*5113495bSYour Name 	}
646*5113495bSYour Name }
647*5113495bSYour Name 
ol_tx_sched_wrr_adv_cat_cur_state_dump(struct ol_tx_sched_wrr_adv_t * scheduler)648*5113495bSYour Name static void ol_tx_sched_wrr_adv_cat_cur_state_dump(
649*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler)
650*5113495bSYour Name {
651*5113495bSYour Name 	int i;
652*5113495bSYour Name 
653*5113495bSYour Name 	txrx_nofl_info("Scheduler State Snapshot:");
654*5113495bSYour Name 	txrx_nofl_info("====category(CRR,CRT,WSW): IS_Active  Pend_Frames  Pend_bytes  wrr===");
655*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
656*5113495bSYour Name 		txrx_nofl_info("%12s(%2d, %2d, %2d):  %9d  %11d  %10d  %3d",
657*5113495bSYour Name 			       scheduler->categories[i].stat.cat_name,
658*5113495bSYour Name 			       scheduler->categories[i].specs.credit_reserve,
659*5113495bSYour Name 			       scheduler->categories[i].specs.
660*5113495bSYour Name 					credit_threshold,
661*5113495bSYour Name 			       scheduler->categories[i].specs.
662*5113495bSYour Name 					wrr_skip_weight,
663*5113495bSYour Name 			       scheduler->categories[i].state.active,
664*5113495bSYour Name 			       scheduler->categories[i].state.frms,
665*5113495bSYour Name 			       scheduler->categories[i].state.bytes,
666*5113495bSYour Name 			       scheduler->categories[i].state.wrr_count);
667*5113495bSYour Name 	}
668*5113495bSYour Name }
669*5113495bSYour Name 
ol_tx_sched_wrr_adv_cat_stat_clear(struct ol_tx_sched_wrr_adv_t * scheduler)670*5113495bSYour Name static void ol_tx_sched_wrr_adv_cat_stat_clear(
671*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler)
672*5113495bSYour Name {
673*5113495bSYour Name 	int i;
674*5113495bSYour Name 
675*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
676*5113495bSYour Name 		scheduler->categories[i].stat.queued = 0;
677*5113495bSYour Name 		scheduler->categories[i].stat.discard = 0;
678*5113495bSYour Name 		scheduler->categories[i].stat.dispatched = 0;
679*5113495bSYour Name 	}
680*5113495bSYour Name }
681*5113495bSYour Name 
682*5113495bSYour Name #endif
683*5113495bSYour Name 
684*5113495bSYour Name static void
ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t * pdev)685*5113495bSYour Name ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
686*5113495bSYour Name {
687*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
688*5113495bSYour Name 	/* start selection from the front of the ordered list */
689*5113495bSYour Name 	scheduler->index = 0;
690*5113495bSYour Name }
691*5113495bSYour Name 
692*5113495bSYour Name static void
ol_tx_sched_wrr_adv_rotate_order_list_tail(struct ol_tx_sched_wrr_adv_t * scheduler,int idx)693*5113495bSYour Name ol_tx_sched_wrr_adv_rotate_order_list_tail(
694*5113495bSYour Name 		struct ol_tx_sched_wrr_adv_t *scheduler, int idx)
695*5113495bSYour Name {
696*5113495bSYour Name 	int value;
697*5113495bSYour Name 	/* remember the value of the specified element */
698*5113495bSYour Name 	value = scheduler->order[idx];
699*5113495bSYour Name 	/* shift all further elements up one space */
700*5113495bSYour Name 	for (; idx < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES-1; idx++)
701*5113495bSYour Name 		scheduler->order[idx] = scheduler->order[idx + 1];
702*5113495bSYour Name 
703*5113495bSYour Name 	/* put the specified element at the end */
704*5113495bSYour Name 	scheduler->order[idx] = value;
705*5113495bSYour Name }
706*5113495bSYour Name 
707*5113495bSYour Name static void
ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t * pdev,u_int32_t credit)708*5113495bSYour Name ol_tx_sched_wrr_adv_credit_sanity_check(struct ol_txrx_pdev_t *pdev,
709*5113495bSYour Name 					u_int32_t credit)
710*5113495bSYour Name {
711*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
712*5113495bSYour Name 	int i;
713*5113495bSYour Name 	int okay = 1;
714*5113495bSYour Name 
715*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
716*5113495bSYour Name 		if (scheduler->categories[i].specs.credit_threshold > credit) {
717*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
718*5113495bSYour Name 				  "*** Config error: credit (%d) not enough to support category %d threshold (%d)\n",
719*5113495bSYour Name 				  credit, i,
720*5113495bSYour Name 				  scheduler->categories[i].specs.
721*5113495bSYour Name 						credit_threshold);
722*5113495bSYour Name 			okay = 0;
723*5113495bSYour Name 		}
724*5113495bSYour Name 	}
725*5113495bSYour Name 	qdf_assert(okay);
726*5113495bSYour Name }
727*5113495bSYour Name 
728*5113495bSYour Name /*
729*5113495bSYour Name  * The scheduler sync spinlock has been acquired outside this function,
730*5113495bSYour Name  * so there is no need to worry about mutex within this function.
731*5113495bSYour Name  */
732*5113495bSYour Name static int
ol_tx_sched_select_batch_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_ctx * sctx,u_int32_t credit)733*5113495bSYour Name ol_tx_sched_select_batch_wrr_adv(
734*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
735*5113495bSYour Name 	struct ol_tx_sched_ctx *sctx,
736*5113495bSYour Name 	u_int32_t credit)
737*5113495bSYour Name {
738*5113495bSYour Name 	static int first = 1;
739*5113495bSYour Name 	int category_index = 0;
740*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
741*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq, *first_txq = NULL;
742*5113495bSYour Name 	int index;
743*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
744*5113495bSYour Name 	int frames, bytes, used_credits = 0, tx_limit;
745*5113495bSYour Name 	u_int16_t tx_limit_flag;
746*5113495bSYour Name 	u32 credit_rem = credit;
747*5113495bSYour Name 
748*5113495bSYour Name 	/*
749*5113495bSYour Name 	 * Just for good measure, do a sanity check that the initial credit
750*5113495bSYour Name 	 * is enough to cover every category's credit threshold.
751*5113495bSYour Name 	 */
752*5113495bSYour Name 	if (first) {
753*5113495bSYour Name 		first = 0;
754*5113495bSYour Name 		ol_tx_sched_wrr_adv_credit_sanity_check(pdev, credit);
755*5113495bSYour Name 	}
756*5113495bSYour Name 
757*5113495bSYour Name 	/* choose the traffic category from the ordered list */
758*5113495bSYour Name 	index = scheduler->index;
759*5113495bSYour Name 	while (index < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
760*5113495bSYour Name 		category_index = scheduler->order[index];
761*5113495bSYour Name 		category = &scheduler->categories[category_index];
762*5113495bSYour Name 		if (!category->state.active) {
763*5113495bSYour Name 			/* move on to the next category */
764*5113495bSYour Name 			index++;
765*5113495bSYour Name 			continue;
766*5113495bSYour Name 		}
767*5113495bSYour Name 		if (++category->state.wrr_count <
768*5113495bSYour Name 					category->specs.wrr_skip_weight) {
769*5113495bSYour Name 			/* skip this category (move it to the back) */
770*5113495bSYour Name 			ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler,
771*5113495bSYour Name 								   index);
772*5113495bSYour Name 			/*
773*5113495bSYour Name 			 * try again (iterate) on the new element
774*5113495bSYour Name 			 * that was moved up
775*5113495bSYour Name 			 */
776*5113495bSYour Name 			continue;
777*5113495bSYour Name 		}
778*5113495bSYour Name 		/* found the first active category whose WRR turn is present */
779*5113495bSYour Name 		break;
780*5113495bSYour Name 	}
781*5113495bSYour Name 	if (index >= OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES) {
782*5113495bSYour Name 		/* no categories are active */
783*5113495bSYour Name 		return 0;
784*5113495bSYour Name 	}
785*5113495bSYour Name 
786*5113495bSYour Name 	/* is there enough credit for the selected category? */
787*5113495bSYour Name 	if (credit < category->specs.credit_threshold) {
788*5113495bSYour Name 		/*
789*5113495bSYour Name 		 * Can't send yet - wait until more credit becomes available.
790*5113495bSYour Name 		 * In the meantime, restore the WRR counter (since we didn't
791*5113495bSYour Name 		 * service this category after all).
792*5113495bSYour Name 		 */
793*5113495bSYour Name 		category->state.wrr_count = category->state.wrr_count - 1;
794*5113495bSYour Name 		return 0;
795*5113495bSYour Name 	}
796*5113495bSYour Name 	/* enough credit is available - go ahead and send some frames */
797*5113495bSYour Name 	/*
798*5113495bSYour Name 	 * This category was serviced - reset the WRR counter, and move this
799*5113495bSYour Name 	 * category to the back of the order list.
800*5113495bSYour Name 	 */
801*5113495bSYour Name 	category->state.wrr_count = 0;
802*5113495bSYour Name 	ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler, index);
803*5113495bSYour Name 	/*
804*5113495bSYour Name 	 * With this category moved to the back, if there's still any credit
805*5113495bSYour Name 	 * left, set up the next invocation of this function to start from
806*5113495bSYour Name 	 * where this one left off, by looking at the category that just got
807*5113495bSYour Name 	 * shifted forward into the position the service category was
808*5113495bSYour Name 	 * occupying.
809*5113495bSYour Name 	 */
810*5113495bSYour Name 	scheduler->index = index;
811*5113495bSYour Name 
812*5113495bSYour Name 	/*
813*5113495bSYour Name 	 * Take the tx queue from the head of the category list.
814*5113495bSYour Name 	 */
815*5113495bSYour Name 	txq = TAILQ_FIRST(&category->state.head);
816*5113495bSYour Name 
817*5113495bSYour Name 	while (txq) {
818*5113495bSYour Name 		TAILQ_REMOVE(&category->state.head, txq, list_elem);
819*5113495bSYour Name 		credit = ol_tx_txq_group_credit_limit(pdev, txq, credit);
820*5113495bSYour Name 		if (credit > category->specs.credit_reserve) {
821*5113495bSYour Name 			credit -= category->specs.credit_reserve;
822*5113495bSYour Name 			tx_limit = ol_tx_bad_peer_dequeue_check(txq,
823*5113495bSYour Name 					category->specs.send_limit,
824*5113495bSYour Name 					&tx_limit_flag);
825*5113495bSYour Name 			frames = ol_tx_dequeue(
826*5113495bSYour Name 					pdev, txq, &sctx->head,
827*5113495bSYour Name 					tx_limit, &credit, &bytes);
828*5113495bSYour Name 			ol_tx_bad_peer_update_tx_limit(pdev, txq,
829*5113495bSYour Name 						       frames,
830*5113495bSYour Name 						       tx_limit_flag);
831*5113495bSYour Name 
832*5113495bSYour Name 			OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category,
833*5113495bSYour Name 								    frames);
834*5113495bSYour Name 			/* Update used global credits */
835*5113495bSYour Name 			used_credits = credit;
836*5113495bSYour Name 			credit =
837*5113495bSYour Name 			ol_tx_txq_update_borrowed_group_credits(pdev, txq,
838*5113495bSYour Name 								credit);
839*5113495bSYour Name 			category->state.frms -= frames;
840*5113495bSYour Name 			category->state.bytes -= bytes;
841*5113495bSYour Name 			if (txq->frms > 0) {
842*5113495bSYour Name 				TAILQ_INSERT_TAIL(&category->state.head,
843*5113495bSYour Name 						  txq, list_elem);
844*5113495bSYour Name 			} else {
845*5113495bSYour Name 				if (category->state.frms == 0)
846*5113495bSYour Name 					category->state.active = 0;
847*5113495bSYour Name 			}
848*5113495bSYour Name 			sctx->frms += frames;
849*5113495bSYour Name 			ol_tx_txq_group_credit_update(pdev, txq, -credit, 0);
850*5113495bSYour Name 			break;
851*5113495bSYour Name 		} else {
852*5113495bSYour Name 			/*
853*5113495bSYour Name 			 * Current txq belongs to a group which does not have
854*5113495bSYour Name 			 * enough credits,
855*5113495bSYour Name 			 * Iterate over to next txq and see if we can download
856*5113495bSYour Name 			 * packets from that queue.
857*5113495bSYour Name 			 */
858*5113495bSYour Name 			if (ol_tx_if_iterate_next_txq(first_txq, txq)) {
859*5113495bSYour Name 				credit = credit_rem;
860*5113495bSYour Name 				if (!first_txq)
861*5113495bSYour Name 					first_txq = txq;
862*5113495bSYour Name 
863*5113495bSYour Name 				TAILQ_INSERT_TAIL(&category->state.head,
864*5113495bSYour Name 						  txq, list_elem);
865*5113495bSYour Name 
866*5113495bSYour Name 				txq = TAILQ_FIRST(&category->state.head);
867*5113495bSYour Name 			} else {
868*5113495bSYour Name 				TAILQ_INSERT_HEAD(&category->state.head, txq,
869*5113495bSYour Name 					  list_elem);
870*5113495bSYour Name 				break;
871*5113495bSYour Name 			}
872*5113495bSYour Name 		}
873*5113495bSYour Name 	} /* while(txq) */
874*5113495bSYour Name 
875*5113495bSYour Name 	return used_credits;
876*5113495bSYour Name }
877*5113495bSYour Name 
878*5113495bSYour Name static inline void
ol_tx_sched_txq_enqueue_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes)879*5113495bSYour Name ol_tx_sched_txq_enqueue_wrr_adv(
880*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
881*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
882*5113495bSYour Name 	int tid,
883*5113495bSYour Name 	int frms,
884*5113495bSYour Name 	int bytes)
885*5113495bSYour Name {
886*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
887*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t *category;
888*5113495bSYour Name 
889*5113495bSYour Name 	category = &scheduler->categories[pdev->tid_to_ac[tid]];
890*5113495bSYour Name 	category->state.frms += frms;
891*5113495bSYour Name 	category->state.bytes += bytes;
892*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms);
893*5113495bSYour Name 	if (txq->flag != ol_tx_queue_active) {
894*5113495bSYour Name 		TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
895*5113495bSYour Name 		category->state.active = 1; /* may have already been active */
896*5113495bSYour Name 	}
897*5113495bSYour Name }
898*5113495bSYour Name 
899*5113495bSYour Name static inline void
ol_tx_sched_txq_deactivate_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid)900*5113495bSYour Name ol_tx_sched_txq_deactivate_wrr_adv(
901*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
902*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
903*5113495bSYour Name 	int tid)
904*5113495bSYour Name {
905*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
906*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t *category;
907*5113495bSYour Name 
908*5113495bSYour Name 	category = &scheduler->categories[pdev->tid_to_ac[tid]];
909*5113495bSYour Name 	category->state.frms -= txq->frms;
910*5113495bSYour Name 	category->state.bytes -= txq->bytes;
911*5113495bSYour Name 
912*5113495bSYour Name 	TAILQ_REMOVE(&category->state.head, txq, list_elem);
913*5113495bSYour Name 
914*5113495bSYour Name 	if (category->state.frms == 0 && category->state.active)
915*5113495bSYour Name 		category->state.active = 0;
916*5113495bSYour Name }
917*5113495bSYour Name 
918*5113495bSYour Name static ol_tx_frms_queue_list *
ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t * pdev,int cat)919*5113495bSYour Name ol_tx_sched_category_tx_queues_wrr_adv(struct ol_txrx_pdev_t *pdev, int cat)
920*5113495bSYour Name {
921*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
922*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t *category;
923*5113495bSYour Name 
924*5113495bSYour Name 	category = &scheduler->categories[cat];
925*5113495bSYour Name 	return &category->state.head;
926*5113495bSYour Name }
927*5113495bSYour Name 
928*5113495bSYour Name static int
ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t * pdev)929*5113495bSYour Name ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t *pdev)
930*5113495bSYour Name {
931*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler;
932*5113495bSYour Name 	u_int8_t i, cat = 0;
933*5113495bSYour Name 	int max_score = 0;
934*5113495bSYour Name 
935*5113495bSYour Name 	scheduler = pdev->tx_sched.scheduler;
936*5113495bSYour Name 	/*
937*5113495bSYour Name 	 * Choose which category's tx frames to drop next based on two factors:
938*5113495bSYour Name 	 * 1.  Which category has the most tx frames present
939*5113495bSYour Name 	 * 2.  The category's priority (high-priority categories have a low
940*5113495bSYour Name 	 *     discard_weight)
941*5113495bSYour Name 	 */
942*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
943*5113495bSYour Name 		int score;
944*5113495bSYour Name 
945*5113495bSYour Name 		score =
946*5113495bSYour Name 			scheduler->categories[i].state.frms *
947*5113495bSYour Name 			scheduler->categories[i].specs.discard_weight;
948*5113495bSYour Name 		if (max_score == 0 || score > max_score) {
949*5113495bSYour Name 			max_score = score;
950*5113495bSYour Name 			cat = i;
951*5113495bSYour Name 		}
952*5113495bSYour Name 	}
953*5113495bSYour Name 	return cat;
954*5113495bSYour Name }
955*5113495bSYour Name 
956*5113495bSYour Name static void
ol_tx_sched_txq_discard_wrr_adv(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int cat,int frames,int bytes)957*5113495bSYour Name ol_tx_sched_txq_discard_wrr_adv(
958*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
959*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
960*5113495bSYour Name 	int cat, int frames, int bytes)
961*5113495bSYour Name {
962*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
963*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t *category;
964*5113495bSYour Name 
965*5113495bSYour Name 	category = &scheduler->categories[cat];
966*5113495bSYour Name 
967*5113495bSYour Name 	if (0 == txq->frms)
968*5113495bSYour Name 		TAILQ_REMOVE(&category->state.head, txq, list_elem);
969*5113495bSYour Name 
970*5113495bSYour Name 
971*5113495bSYour Name 	category->state.frms -= frames;
972*5113495bSYour Name 	category->state.bytes -= bytes;
973*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frames);
974*5113495bSYour Name 	if (category->state.frms == 0)
975*5113495bSYour Name 		category->state.active = 0;
976*5113495bSYour Name }
977*5113495bSYour Name 
978*5113495bSYour Name static void
ol_tx_sched_category_info_wrr_adv(struct ol_txrx_pdev_t * pdev,int cat,int * active,int * frms,int * bytes)979*5113495bSYour Name ol_tx_sched_category_info_wrr_adv(
980*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
981*5113495bSYour Name 	int cat, int *active,
982*5113495bSYour Name 	int *frms, int *bytes)
983*5113495bSYour Name {
984*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
985*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_category_info_t *category;
986*5113495bSYour Name 
987*5113495bSYour Name 	category = &scheduler->categories[cat];
988*5113495bSYour Name 	*active = category->state.active;
989*5113495bSYour Name 	*frms = category->state.frms;
990*5113495bSYour Name 	*bytes = category->state.bytes;
991*5113495bSYour Name }
992*5113495bSYour Name 
993*5113495bSYour Name /**
994*5113495bSYour Name  * ol_tx_sched_wrr_param_update() - update the WRR TX sched params
995*5113495bSYour Name  * @pdev: Pointer to PDEV structure.
996*5113495bSYour Name  * @scheduler: Pointer to tx scheduler.
997*5113495bSYour Name  *
998*5113495bSYour Name  * Update the WRR TX schedule parameters for each category if it is
999*5113495bSYour Name  * specified in the ini file by user.
1000*5113495bSYour Name  *
1001*5113495bSYour Name  * Return: none
1002*5113495bSYour Name  */
ol_tx_sched_wrr_param_update(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_wrr_adv_t * scheduler)1003*5113495bSYour Name static void ol_tx_sched_wrr_param_update(struct ol_txrx_pdev_t *pdev,
1004*5113495bSYour Name 					 struct ol_tx_sched_wrr_adv_t *
1005*5113495bSYour Name 					 scheduler)
1006*5113495bSYour Name {
1007*5113495bSYour Name 	int i;
1008*5113495bSYour Name 	static const char * const tx_sched_wrr_name[4] = {
1009*5113495bSYour Name 		"BE",
1010*5113495bSYour Name 		"BK",
1011*5113495bSYour Name 		"VI",
1012*5113495bSYour Name 		"VO"
1013*5113495bSYour Name 	};
1014*5113495bSYour Name 
1015*5113495bSYour Name 	if (!scheduler)
1016*5113495bSYour Name 		return;
1017*5113495bSYour Name 
1018*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1019*5113495bSYour Name 		"%s: Tuning the TX scheduler wrr parameters by ini file:",
1020*5113495bSYour Name 		__func__);
1021*5113495bSYour Name 
1022*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1023*5113495bSYour Name 		"         skip credit limit credit disc");
1024*5113495bSYour Name 
1025*5113495bSYour Name 	for (i = OL_TX_SCHED_WRR_ADV_CAT_BE;
1026*5113495bSYour Name 		i <= OL_TX_SCHED_WRR_ADV_CAT_VO; i++) {
1027*5113495bSYour Name 		if (ol_cfg_get_wrr_skip_weight(pdev->ctrl_pdev, i)) {
1028*5113495bSYour Name 			scheduler->categories[i].specs.wrr_skip_weight =
1029*5113495bSYour Name 				ol_cfg_get_wrr_skip_weight(pdev->ctrl_pdev, i);
1030*5113495bSYour Name 			scheduler->categories[i].specs.credit_threshold =
1031*5113495bSYour Name 				ol_cfg_get_credit_threshold(pdev->ctrl_pdev, i);
1032*5113495bSYour Name 			scheduler->categories[i].specs.send_limit =
1033*5113495bSYour Name 				ol_cfg_get_send_limit(pdev->ctrl_pdev, i);
1034*5113495bSYour Name 			scheduler->categories[i].specs.credit_reserve =
1035*5113495bSYour Name 				ol_cfg_get_credit_reserve(pdev->ctrl_pdev, i);
1036*5113495bSYour Name 			scheduler->categories[i].specs.discard_weight =
1037*5113495bSYour Name 				ol_cfg_get_discard_weight(pdev->ctrl_pdev, i);
1038*5113495bSYour Name 
1039*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1040*5113495bSYour Name 				"%s-update: %d,  %d,    %d,   %d,    %d",
1041*5113495bSYour Name 				tx_sched_wrr_name[i],
1042*5113495bSYour Name 				scheduler->categories[i].specs.wrr_skip_weight,
1043*5113495bSYour Name 				scheduler->categories[i].specs.credit_threshold,
1044*5113495bSYour Name 				scheduler->categories[i].specs.send_limit,
1045*5113495bSYour Name 				scheduler->categories[i].specs.credit_reserve,
1046*5113495bSYour Name 				scheduler->categories[i].specs.discard_weight);
1047*5113495bSYour Name 		} else {
1048*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1049*5113495bSYour Name 				"%s-orig: %d,  %d,    %d,   %d,    %d",
1050*5113495bSYour Name 				tx_sched_wrr_name[i],
1051*5113495bSYour Name 				scheduler->categories[i].specs.wrr_skip_weight,
1052*5113495bSYour Name 				scheduler->categories[i].specs.credit_threshold,
1053*5113495bSYour Name 				scheduler->categories[i].specs.send_limit,
1054*5113495bSYour Name 				scheduler->categories[i].specs.credit_reserve,
1055*5113495bSYour Name 				scheduler->categories[i].specs.discard_weight);
1056*5113495bSYour Name 		}
1057*5113495bSYour Name 	}
1058*5113495bSYour Name }
1059*5113495bSYour Name 
1060*5113495bSYour Name static void *
ol_tx_sched_init_wrr_adv(struct ol_txrx_pdev_t * pdev)1061*5113495bSYour Name ol_tx_sched_init_wrr_adv(
1062*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev)
1063*5113495bSYour Name {
1064*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler;
1065*5113495bSYour Name 	int i;
1066*5113495bSYour Name 
1067*5113495bSYour Name 	scheduler = qdf_mem_malloc(
1068*5113495bSYour Name 			sizeof(struct ol_tx_sched_wrr_adv_t));
1069*5113495bSYour Name 	if (!scheduler)
1070*5113495bSYour Name 		return scheduler;
1071*5113495bSYour Name 
1072*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, scheduler);
1073*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, scheduler);
1074*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, scheduler);
1075*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, scheduler);
1076*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(NON_QOS_DATA, scheduler);
1077*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(UCAST_MGMT, scheduler);
1078*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_DATA, scheduler);
1079*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(MCAST_MGMT, scheduler);
1080*5113495bSYour Name 
1081*5113495bSYour Name 	ol_tx_sched_wrr_param_update(pdev, scheduler);
1082*5113495bSYour Name 
1083*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
1084*5113495bSYour Name 		scheduler->categories[i].state.active = 0;
1085*5113495bSYour Name 		scheduler->categories[i].state.frms = 0;
1086*5113495bSYour Name 		/*scheduler->categories[i].state.bytes = 0;*/
1087*5113495bSYour Name 		TAILQ_INIT(&scheduler->categories[i].state.head);
1088*5113495bSYour Name 		/*
1089*5113495bSYour Name 		 * init categories to not be skipped before
1090*5113495bSYour Name 		 * their initial selection
1091*5113495bSYour Name 		 */
1092*5113495bSYour Name 		scheduler->categories[i].state.wrr_count =
1093*5113495bSYour Name 			scheduler->categories[i].specs.wrr_skip_weight - 1;
1094*5113495bSYour Name 	}
1095*5113495bSYour Name 
1096*5113495bSYour Name 	/*
1097*5113495bSYour Name 	 * Init the order array - the initial ordering doesn't matter, as the
1098*5113495bSYour Name 	 * order array will get reshuffled as data arrives.
1099*5113495bSYour Name 	 */
1100*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++)
1101*5113495bSYour Name 		scheduler->order[i] = i;
1102*5113495bSYour Name 
1103*5113495bSYour Name 	return scheduler;
1104*5113495bSYour Name }
1105*5113495bSYour Name 
1106*5113495bSYour Name 
1107*5113495bSYour Name /* WMM parameters are suppposed to be passed when associate with AP.
1108*5113495bSYour Name  * According to AIFS+CWMin, the function maps each queue to one of four default
1109*5113495bSYour Name  * settings of the scheduler, ie. VO, VI, BE, or BK.
1110*5113495bSYour Name  */
1111*5113495bSYour Name void
ol_txrx_set_wmm_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_tx_wmm_param_t wmm_param)1112*5113495bSYour Name ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1113*5113495bSYour Name 		      struct ol_tx_wmm_param_t wmm_param)
1114*5113495bSYour Name {
1115*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1116*5113495bSYour Name 	ol_txrx_pdev_handle data_pdev =
1117*5113495bSYour Name 				ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1118*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t def_cfg;
1119*5113495bSYour Name 	struct ol_tx_sched_wrr_adv_t *scheduler =
1120*5113495bSYour Name 					data_pdev->tx_sched.scheduler;
1121*5113495bSYour Name 	u_int32_t i, ac_selected;
1122*5113495bSYour Name 	u_int32_t  weight[QCA_WLAN_AC_ALL], default_edca[QCA_WLAN_AC_ALL];
1123*5113495bSYour Name 
1124*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VO, (&def_cfg));
1125*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(VI, (&def_cfg));
1126*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BE, (&def_cfg));
1127*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CFG_STORE(BK, (&def_cfg));
1128*5113495bSYour Name 
1129*5113495bSYour Name 	/* default_eca = AIFS + CWMin */
1130*5113495bSYour Name 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] =
1131*5113495bSYour Name 		OL_TX_AIFS_DEFAULT_VO + OL_TX_CW_MIN_DEFAULT_VO;
1132*5113495bSYour Name 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] =
1133*5113495bSYour Name 		OL_TX_AIFS_DEFAULT_VI + OL_TX_CW_MIN_DEFAULT_VI;
1134*5113495bSYour Name 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] =
1135*5113495bSYour Name 		OL_TX_AIFS_DEFAULT_BE + OL_TX_CW_MIN_DEFAULT_BE;
1136*5113495bSYour Name 	default_edca[OL_TX_SCHED_WRR_ADV_CAT_BK] =
1137*5113495bSYour Name 		OL_TX_AIFS_DEFAULT_BK + OL_TX_CW_MIN_DEFAULT_BK;
1138*5113495bSYour Name 
1139*5113495bSYour Name 	weight[OL_TX_SCHED_WRR_ADV_CAT_VO] =
1140*5113495bSYour Name 		wmm_param.ac[QCA_WLAN_AC_VO].aifs +
1141*5113495bSYour Name 				wmm_param.ac[QCA_WLAN_AC_VO].cwmin;
1142*5113495bSYour Name 	weight[OL_TX_SCHED_WRR_ADV_CAT_VI] =
1143*5113495bSYour Name 		wmm_param.ac[QCA_WLAN_AC_VI].aifs +
1144*5113495bSYour Name 				wmm_param.ac[QCA_WLAN_AC_VI].cwmin;
1145*5113495bSYour Name 	weight[OL_TX_SCHED_WRR_ADV_CAT_BK] =
1146*5113495bSYour Name 		wmm_param.ac[QCA_WLAN_AC_BK].aifs +
1147*5113495bSYour Name 				wmm_param.ac[QCA_WLAN_AC_BK].cwmin;
1148*5113495bSYour Name 	weight[OL_TX_SCHED_WRR_ADV_CAT_BE] =
1149*5113495bSYour Name 		wmm_param.ac[QCA_WLAN_AC_BE].aifs +
1150*5113495bSYour Name 				wmm_param.ac[QCA_WLAN_AC_BE].cwmin;
1151*5113495bSYour Name 
1152*5113495bSYour Name 	for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
1153*5113495bSYour Name 		if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VO] >= weight[i])
1154*5113495bSYour Name 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VO;
1155*5113495bSYour Name 		else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_VI] >= weight[i])
1156*5113495bSYour Name 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_VI;
1157*5113495bSYour Name 		else if (default_edca[OL_TX_SCHED_WRR_ADV_CAT_BE] >= weight[i])
1158*5113495bSYour Name 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BE;
1159*5113495bSYour Name 		else
1160*5113495bSYour Name 			ac_selected = OL_TX_SCHED_WRR_ADV_CAT_BK;
1161*5113495bSYour Name 
1162*5113495bSYour Name 
1163*5113495bSYour Name 		scheduler->categories[i].specs.wrr_skip_weight =
1164*5113495bSYour Name 			def_cfg.categories[ac_selected].specs.wrr_skip_weight;
1165*5113495bSYour Name 		scheduler->categories[i].specs.credit_threshold =
1166*5113495bSYour Name 			def_cfg.categories[ac_selected].specs.credit_threshold;
1167*5113495bSYour Name 		scheduler->categories[i].specs.send_limit =
1168*5113495bSYour Name 			def_cfg.categories[ac_selected].specs.send_limit;
1169*5113495bSYour Name 		scheduler->categories[i].specs.credit_reserve =
1170*5113495bSYour Name 			def_cfg.categories[ac_selected].specs.credit_reserve;
1171*5113495bSYour Name 		scheduler->categories[i].specs.discard_weight =
1172*5113495bSYour Name 			def_cfg.categories[ac_selected].specs.discard_weight;
1173*5113495bSYour Name 	}
1174*5113495bSYour Name }
1175*5113495bSYour Name 
1176*5113495bSYour Name /**
1177*5113495bSYour Name  * ol_tx_sched_stats_display() - tx sched stats display
1178*5113495bSYour Name  * @pdev: Pointer to the PDEV structure.
1179*5113495bSYour Name  *
1180*5113495bSYour Name  * Return: none.
1181*5113495bSYour Name  */
ol_tx_sched_stats_display(struct ol_txrx_pdev_t * pdev)1182*5113495bSYour Name void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
1183*5113495bSYour Name {
1184*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
1185*5113495bSYour Name }
1186*5113495bSYour Name 
1187*5113495bSYour Name /**
1188*5113495bSYour Name  * ol_tx_sched_cur_state_display() - tx sched cur stat display
1189*5113495bSYour Name  * @pdev: Pointer to the PDEV structure.
1190*5113495bSYour Name  *
1191*5113495bSYour Name  * Return: none.
1192*5113495bSYour Name  */
ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t * pdev)1193*5113495bSYour Name void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
1194*5113495bSYour Name {
1195*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
1196*5113495bSYour Name }
1197*5113495bSYour Name 
1198*5113495bSYour Name /**
1199*5113495bSYour Name  * ol_tx_sched_cur_state_display() - reset tx sched stats
1200*5113495bSYour Name  * @pdev: Pointer to the PDEV structure.
1201*5113495bSYour Name  *
1202*5113495bSYour Name  * Return: none.
1203*5113495bSYour Name  */
ol_tx_sched_stats_clear(struct ol_txrx_pdev_t * pdev)1204*5113495bSYour Name void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
1205*5113495bSYour Name {
1206*5113495bSYour Name 	OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
1207*5113495bSYour Name }
1208*5113495bSYour Name 
1209*5113495bSYour Name #endif /* OL_TX_SCHED == OL_TX_SCHED_WRR_ADV */
1210*5113495bSYour Name 
1211*5113495bSYour Name /*--- congestion control discard --------------------------------------------*/
1212*5113495bSYour Name 
1213*5113495bSYour Name static struct ol_tx_frms_queue_t *
ol_tx_sched_discard_select_txq(struct ol_txrx_pdev_t * pdev,ol_tx_frms_queue_list * tx_queues)1214*5113495bSYour Name ol_tx_sched_discard_select_txq(
1215*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
1216*5113495bSYour Name 		ol_tx_frms_queue_list *tx_queues)
1217*5113495bSYour Name {
1218*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq;
1219*5113495bSYour Name 	struct ol_tx_frms_queue_t *selected_txq = NULL;
1220*5113495bSYour Name 	int max_frms = 0;
1221*5113495bSYour Name 
1222*5113495bSYour Name 	/* return the tx queue with the most frames */
1223*5113495bSYour Name 	TAILQ_FOREACH(txq, tx_queues, list_elem) {
1224*5113495bSYour Name 		if (txq->frms > max_frms) {
1225*5113495bSYour Name 			max_frms = txq->frms;
1226*5113495bSYour Name 			selected_txq = txq;
1227*5113495bSYour Name 		}
1228*5113495bSYour Name 	}
1229*5113495bSYour Name 	return selected_txq;
1230*5113495bSYour Name }
1231*5113495bSYour Name 
1232*5113495bSYour Name u_int16_t
ol_tx_sched_discard_select(struct ol_txrx_pdev_t * pdev,u_int16_t frms,ol_tx_desc_list * tx_descs,bool force)1233*5113495bSYour Name ol_tx_sched_discard_select(
1234*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
1235*5113495bSYour Name 		u_int16_t frms,
1236*5113495bSYour Name 		ol_tx_desc_list *tx_descs,
1237*5113495bSYour Name 		bool force)
1238*5113495bSYour Name {
1239*5113495bSYour Name 	int cat;
1240*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq;
1241*5113495bSYour Name 	int bytes;
1242*5113495bSYour Name 	u_int32_t credit;
1243*5113495bSYour Name 	struct ol_tx_sched_notify_ctx_t notify_ctx;
1244*5113495bSYour Name 
1245*5113495bSYour Name 	/*
1246*5113495bSYour Name 	 * first decide what category of traffic (e.g. TID or AC)
1247*5113495bSYour Name 	 * to discard next
1248*5113495bSYour Name 	 */
1249*5113495bSYour Name 	cat = ol_tx_sched_discard_select_category(pdev);
1250*5113495bSYour Name 
1251*5113495bSYour Name 	/* then decide which peer within this category to discard from next */
1252*5113495bSYour Name 	txq = ol_tx_sched_discard_select_txq(
1253*5113495bSYour Name 			pdev, ol_tx_sched_category_tx_queues(pdev, cat));
1254*5113495bSYour Name 	if (!txq)
1255*5113495bSYour Name 		/* No More pending Tx Packets in Tx Queue. Exit Discard loop */
1256*5113495bSYour Name 		return 0;
1257*5113495bSYour Name 
1258*5113495bSYour Name 
1259*5113495bSYour Name 	if (force == false) {
1260*5113495bSYour Name 		/*
1261*5113495bSYour Name 		 * Now decide how many frames to discard from this peer-TID.
1262*5113495bSYour Name 		 * Don't discard more frames than the caller has specified.
1263*5113495bSYour Name 		 * Don't discard more than a fixed quantum of frames at a time.
1264*5113495bSYour Name 		 * Don't discard more than 50% of the queue's frames at a time,
1265*5113495bSYour Name 		 * but if there's only 1 frame left, go ahead and discard it.
1266*5113495bSYour Name 		 */
1267*5113495bSYour Name #define OL_TX_DISCARD_QUANTUM 10
1268*5113495bSYour Name 		if (OL_TX_DISCARD_QUANTUM < frms)
1269*5113495bSYour Name 			frms = OL_TX_DISCARD_QUANTUM;
1270*5113495bSYour Name 
1271*5113495bSYour Name 
1272*5113495bSYour Name 		if (txq->frms > 1 && frms >= (txq->frms >> 1))
1273*5113495bSYour Name 			frms = txq->frms >> 1;
1274*5113495bSYour Name 	}
1275*5113495bSYour Name 
1276*5113495bSYour Name 	/*
1277*5113495bSYour Name 	 * Discard from the head of the queue, because:
1278*5113495bSYour Name 	 * 1.  Front-dropping gives applications like TCP that include ARQ
1279*5113495bSYour Name 	 *     an early notification of congestion.
1280*5113495bSYour Name 	 * 2.  For time-sensitive applications like RTP, the newest frames are
1281*5113495bSYour Name 	 *     most relevant.
1282*5113495bSYour Name 	 */
1283*5113495bSYour Name 	credit = 10000; /* no credit limit */
1284*5113495bSYour Name 	frms = ol_tx_dequeue(pdev, txq, tx_descs, frms, &credit, &bytes);
1285*5113495bSYour Name 
1286*5113495bSYour Name 	notify_ctx.event = OL_TX_DISCARD_FRAMES;
1287*5113495bSYour Name 	notify_ctx.frames = frms;
1288*5113495bSYour Name 	notify_ctx.bytes = bytes;
1289*5113495bSYour Name 	notify_ctx.txq = txq;
1290*5113495bSYour Name 	notify_ctx.info.ext_tid = cat;
1291*5113495bSYour Name 	ol_tx_sched_notify(pdev, &notify_ctx);
1292*5113495bSYour Name 
1293*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Tx Drop : %d", frms);
1294*5113495bSYour Name 	return frms;
1295*5113495bSYour Name }
1296*5113495bSYour Name 
1297*5113495bSYour Name /*--- scheduler framework ---------------------------------------------------*/
1298*5113495bSYour Name 
1299*5113495bSYour Name /*
1300*5113495bSYour Name  * The scheduler mutex spinlock has been acquired outside this function,
1301*5113495bSYour Name  * so there is need to take locks inside this function.
1302*5113495bSYour Name  */
1303*5113495bSYour Name void
ol_tx_sched_notify(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_notify_ctx_t * ctx)1304*5113495bSYour Name ol_tx_sched_notify(
1305*5113495bSYour Name 		struct ol_txrx_pdev_t *pdev,
1306*5113495bSYour Name 		struct ol_tx_sched_notify_ctx_t *ctx)
1307*5113495bSYour Name {
1308*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq = ctx->txq;
1309*5113495bSYour Name 	int tid;
1310*5113495bSYour Name 
1311*5113495bSYour Name 	if (!pdev->tx_sched.scheduler)
1312*5113495bSYour Name 		return;
1313*5113495bSYour Name 
1314*5113495bSYour Name 	switch (ctx->event) {
1315*5113495bSYour Name 	case OL_TX_ENQUEUE_FRAME:
1316*5113495bSYour Name 		tid = ctx->info.tx_msdu_info->htt.info.ext_tid;
1317*5113495bSYour Name 		ol_tx_sched_txq_enqueue(pdev, txq, tid, 1, ctx->bytes);
1318*5113495bSYour Name 		break;
1319*5113495bSYour Name 	case OL_TX_DELETE_QUEUE:
1320*5113495bSYour Name 		tid = ctx->info.ext_tid;
1321*5113495bSYour Name 		if (txq->flag == ol_tx_queue_active)
1322*5113495bSYour Name 			ol_tx_sched_txq_deactivate(pdev, txq, tid);
1323*5113495bSYour Name 
1324*5113495bSYour Name 		break;
1325*5113495bSYour Name 	case OL_TX_PAUSE_QUEUE:
1326*5113495bSYour Name 		tid = ctx->info.ext_tid;
1327*5113495bSYour Name 		if (txq->flag == ol_tx_queue_active)
1328*5113495bSYour Name 			ol_tx_sched_txq_deactivate(pdev, txq, tid);
1329*5113495bSYour Name 
1330*5113495bSYour Name 		break;
1331*5113495bSYour Name 	case OL_TX_UNPAUSE_QUEUE:
1332*5113495bSYour Name 		tid = ctx->info.ext_tid;
1333*5113495bSYour Name 		if (txq->frms != 0)
1334*5113495bSYour Name 			ol_tx_sched_txq_enqueue(pdev, txq, tid,
1335*5113495bSYour Name 						txq->frms, txq->bytes);
1336*5113495bSYour Name 
1337*5113495bSYour Name 		break;
1338*5113495bSYour Name 	case OL_TX_DISCARD_FRAMES:
1339*5113495bSYour Name 		/* not necessarily TID, could be category */
1340*5113495bSYour Name 		tid = ctx->info.ext_tid;
1341*5113495bSYour Name 		ol_tx_sched_txq_discard(pdev, txq, tid,
1342*5113495bSYour Name 					ctx->frames, ctx->bytes);
1343*5113495bSYour Name 		break;
1344*5113495bSYour Name 	default:
1345*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1346*5113495bSYour Name 			  "Error: unknown sched notification (%d)\n",
1347*5113495bSYour Name 			  ctx->event);
1348*5113495bSYour Name 		qdf_assert(0);
1349*5113495bSYour Name 		break;
1350*5113495bSYour Name 	}
1351*5113495bSYour Name }
1352*5113495bSYour Name 
1353*5113495bSYour Name #define OL_TX_MSDU_ID_STORAGE_ERR(ptr) (!ptr)
1354*5113495bSYour Name 
1355*5113495bSYour Name static void
ol_tx_sched_dispatch(struct ol_txrx_pdev_t * pdev,struct ol_tx_sched_ctx * sctx)1356*5113495bSYour Name ol_tx_sched_dispatch(
1357*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1358*5113495bSYour Name 	struct ol_tx_sched_ctx *sctx)
1359*5113495bSYour Name {
1360*5113495bSYour Name 	qdf_nbuf_t msdu, prev = NULL, head_msdu = NULL;
1361*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
1362*5113495bSYour Name 	u_int16_t *msdu_id_storage;
1363*5113495bSYour Name 	u_int16_t msdu_id;
1364*5113495bSYour Name 	int num_msdus = 0;
1365*5113495bSYour Name 
1366*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
1367*5113495bSYour Name 	while (sctx->frms) {
1368*5113495bSYour Name 		tx_desc = TAILQ_FIRST(&sctx->head);
1369*5113495bSYour Name 		if (!tx_desc) {
1370*5113495bSYour Name 			/* TODO: find its reason */
1371*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1372*5113495bSYour Name 				  "%s: err, no enough tx_desc from stx->head.\n",
1373*5113495bSYour Name 				  __func__);
1374*5113495bSYour Name 			break;
1375*5113495bSYour Name 		}
1376*5113495bSYour Name 		msdu = tx_desc->netbuf;
1377*5113495bSYour Name 		TAILQ_REMOVE(&sctx->head, tx_desc, tx_desc_list_elem);
1378*5113495bSYour Name 		if (!head_msdu)
1379*5113495bSYour Name 			head_msdu = msdu;
1380*5113495bSYour Name 
1381*5113495bSYour Name 		if (prev)
1382*5113495bSYour Name 			qdf_nbuf_set_next(prev, msdu);
1383*5113495bSYour Name 
1384*5113495bSYour Name 		prev = msdu;
1385*5113495bSYour Name 
1386*5113495bSYour Name #ifndef ATH_11AC_TXCOMPACT
1387*5113495bSYour Name 		/*
1388*5113495bSYour Name 		 * When the tx frame is downloaded to the target, there are two
1389*5113495bSYour Name 		 * outstanding references:
1390*5113495bSYour Name 		 * 1.  The host download SW (HTT, HTC, HIF)
1391*5113495bSYour Name 		 *     This reference is cleared by the ol_tx_send_done callback
1392*5113495bSYour Name 		 *     functions.
1393*5113495bSYour Name 		 * 2.  The target FW
1394*5113495bSYour Name 		 *     This reference is cleared by the ol_tx_completion_handler
1395*5113495bSYour Name 		 *     function.
1396*5113495bSYour Name 		 * It is extremely probable that the download completion is
1397*5113495bSYour Name 		 * processed before the tx completion message.  However, under
1398*5113495bSYour Name 		 * exceptional conditions the tx completion may be processed
1399*5113495bSYour Name 		 *first. Thus, rather that assuming that reference (1) is
1400*5113495bSYour Name 		 *done before reference (2),
1401*5113495bSYour Name 		 * explicit reference tracking is needed.
1402*5113495bSYour Name 		 * Double-increment the ref count to account for both references
1403*5113495bSYour Name 		 * described above.
1404*5113495bSYour Name 		 */
1405*5113495bSYour Name 		qdf_atomic_init(&tx_desc->ref_cnt);
1406*5113495bSYour Name 		qdf_atomic_inc(&tx_desc->ref_cnt);
1407*5113495bSYour Name 		qdf_atomic_inc(&tx_desc->ref_cnt);
1408*5113495bSYour Name #endif
1409*5113495bSYour Name 
1410*5113495bSYour Name 		/*Store the MSDU Id for each MSDU*/
1411*5113495bSYour Name 		/* store MSDU ID */
1412*5113495bSYour Name 		msdu_id = ol_tx_desc_id(pdev, tx_desc);
1413*5113495bSYour Name 		msdu_id_storage = ol_tx_msdu_id_storage(msdu);
1414*5113495bSYour Name 		if (OL_TX_MSDU_ID_STORAGE_ERR(msdu_id_storage)) {
1415*5113495bSYour Name 			/*
1416*5113495bSYour Name 			 * Send the prior frames as a batch,
1417*5113495bSYour Name 			 *then send this as a single,
1418*5113495bSYour Name 			 * then resume handling the remaining frames.
1419*5113495bSYour Name 			 */
1420*5113495bSYour Name 			if (head_msdu)
1421*5113495bSYour Name 				ol_tx_send_batch(pdev, head_msdu, num_msdus);
1422*5113495bSYour Name 
1423*5113495bSYour Name 			prev = NULL;
1424*5113495bSYour Name 			head_msdu = prev;
1425*5113495bSYour Name 			num_msdus = 0;
1426*5113495bSYour Name 
1427*5113495bSYour Name 			if (htt_tx_send_std(pdev->htt_pdev, msdu, msdu_id)) {
1428*5113495bSYour Name 				ol_tx_target_credit_incr(pdev, msdu);
1429*5113495bSYour Name 				ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1430*5113495bSYour Name 							     1 /* error */);
1431*5113495bSYour Name 			}
1432*5113495bSYour Name 		} else {
1433*5113495bSYour Name 			*msdu_id_storage = msdu_id;
1434*5113495bSYour Name 			num_msdus++;
1435*5113495bSYour Name 		}
1436*5113495bSYour Name 		sctx->frms--;
1437*5113495bSYour Name 	}
1438*5113495bSYour Name 
1439*5113495bSYour Name 	/*Send Batch Of Frames*/
1440*5113495bSYour Name 	if (head_msdu)
1441*5113495bSYour Name 		ol_tx_send_batch(pdev, head_msdu, num_msdus);
1442*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
1443*5113495bSYour Name }
1444*5113495bSYour Name 
1445*5113495bSYour Name #ifdef QCA_TX_PADDING_CREDIT_SUPPORT
replenish_tx_pad_credit(struct ol_txrx_pdev_t * pdev)1446*5113495bSYour Name static void replenish_tx_pad_credit(struct ol_txrx_pdev_t *pdev)
1447*5113495bSYour Name {
1448*5113495bSYour Name 	int replenish_credit = 0, avail_targ_tx_credit = 0;
1449*5113495bSYour Name 	int cur_tx_pad_credit = 0, grp_credit = 0, i = 0;
1450*5113495bSYour Name 	qdf_atomic_t *tx_grp_credit = NULL;
1451*5113495bSYour Name 
1452*5113495bSYour Name 	cur_tx_pad_credit = qdf_atomic_read(&pdev->pad_reserve_tx_credit);
1453*5113495bSYour Name 	if (cur_tx_pad_credit < MIN_TX_PAD_CREDIT_THRESH) {
1454*5113495bSYour Name 		replenish_credit = MAX_TX_PAD_CREDIT_THRESH - cur_tx_pad_credit;
1455*5113495bSYour Name 		avail_targ_tx_credit = qdf_atomic_read(&pdev->target_tx_credit);
1456*5113495bSYour Name 		replenish_credit = (replenish_credit < avail_targ_tx_credit) ?
1457*5113495bSYour Name 				   replenish_credit : avail_targ_tx_credit;
1458*5113495bSYour Name 		if (replenish_credit < 0) {
1459*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
1460*5113495bSYour Name 				  "Tx Pad Credits = %d Target Tx Credits = %d",
1461*5113495bSYour Name 				  cur_tx_pad_credit,
1462*5113495bSYour Name 				  avail_targ_tx_credit);
1463*5113495bSYour Name 			qdf_assert(0);
1464*5113495bSYour Name 		}
1465*5113495bSYour Name 		qdf_atomic_add(replenish_credit, &pdev->pad_reserve_tx_credit);
1466*5113495bSYour Name 		qdf_atomic_add(-replenish_credit, &pdev->target_tx_credit);
1467*5113495bSYour Name 
1468*5113495bSYour Name 		while (replenish_credit > 0) {
1469*5113495bSYour Name 			for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1470*5113495bSYour Name 				tx_grp_credit = &pdev->txq_grps[i].credit;
1471*5113495bSYour Name 				grp_credit = qdf_atomic_read(tx_grp_credit);
1472*5113495bSYour Name 				if (grp_credit) {
1473*5113495bSYour Name 					qdf_atomic_add(-1, tx_grp_credit);
1474*5113495bSYour Name 					replenish_credit--;
1475*5113495bSYour Name 				}
1476*5113495bSYour Name 				if (!replenish_credit)
1477*5113495bSYour Name 					break;
1478*5113495bSYour Name 			}
1479*5113495bSYour Name 		}
1480*5113495bSYour Name 	}
1481*5113495bSYour Name }
1482*5113495bSYour Name #else
replenish_tx_pad_credit(struct ol_txrx_pdev_t * pdev)1483*5113495bSYour Name static void replenish_tx_pad_credit(struct ol_txrx_pdev_t *pdev)
1484*5113495bSYour Name {
1485*5113495bSYour Name }
1486*5113495bSYour Name #endif
1487*5113495bSYour Name 
1488*5113495bSYour Name void
ol_tx_sched(struct ol_txrx_pdev_t * pdev)1489*5113495bSYour Name ol_tx_sched(struct ol_txrx_pdev_t *pdev)
1490*5113495bSYour Name {
1491*5113495bSYour Name 	struct ol_tx_sched_ctx sctx;
1492*5113495bSYour Name 	u_int32_t credit;
1493*5113495bSYour Name 
1494*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
1495*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1496*5113495bSYour Name 	if (pdev->tx_sched.tx_sched_status != ol_tx_scheduler_idle) {
1497*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1498*5113495bSYour Name 		return;
1499*5113495bSYour Name 	}
1500*5113495bSYour Name 	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_running;
1501*5113495bSYour Name 
1502*5113495bSYour Name 	ol_tx_sched_log(pdev);
1503*5113495bSYour Name 	/*
1504*5113495bSYour Name 	 *adf_os_print("BEFORE tx sched:\n");
1505*5113495bSYour Name 	 *ol_tx_queues_display(pdev);
1506*5113495bSYour Name 	 */
1507*5113495bSYour Name 	replenish_tx_pad_credit(pdev);
1508*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1509*5113495bSYour Name 
1510*5113495bSYour Name 	TAILQ_INIT(&sctx.head);
1511*5113495bSYour Name 	sctx.frms = 0;
1512*5113495bSYour Name 
1513*5113495bSYour Name 	ol_tx_sched_select_init(pdev);
1514*5113495bSYour Name 	while (qdf_atomic_read(&pdev->target_tx_credit) > 0) {
1515*5113495bSYour Name 		int num_credits;
1516*5113495bSYour Name 
1517*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1518*5113495bSYour Name 		replenish_tx_pad_credit(pdev);
1519*5113495bSYour Name 		credit = qdf_atomic_read(&pdev->target_tx_credit);
1520*5113495bSYour Name 		num_credits = ol_tx_sched_select_batch(pdev, &sctx, credit);
1521*5113495bSYour Name 		if (num_credits > 0) {
1522*5113495bSYour Name #if DEBUG_HTT_CREDIT
1523*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1524*5113495bSYour Name 				  " <HTT> Decrease credit %d - %d = %d.\n",
1525*5113495bSYour Name 				  qdf_atomic_read(&pdev->target_tx_credit),
1526*5113495bSYour Name 				  num_credits,
1527*5113495bSYour Name 				  qdf_atomic_read(&pdev->target_tx_credit) -
1528*5113495bSYour Name 				  num_credits);
1529*5113495bSYour Name #endif
1530*5113495bSYour Name 			DPTRACE(qdf_dp_trace_credit_record(QDF_TX_SCHED,
1531*5113495bSYour Name 				QDF_CREDIT_DEC, num_credits,
1532*5113495bSYour Name 				qdf_atomic_read(&pdev->target_tx_credit) -
1533*5113495bSYour Name 				num_credits,
1534*5113495bSYour Name 				qdf_atomic_read(&pdev->txq_grps[0].credit),
1535*5113495bSYour Name 				qdf_atomic_read(&pdev->txq_grps[1].credit)));
1536*5113495bSYour Name 
1537*5113495bSYour Name 			qdf_atomic_add(-num_credits, &pdev->target_tx_credit);
1538*5113495bSYour Name 		}
1539*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1540*5113495bSYour Name 
1541*5113495bSYour Name 		if (num_credits == 0)
1542*5113495bSYour Name 			break;
1543*5113495bSYour Name 	}
1544*5113495bSYour Name 	ol_tx_sched_dispatch(pdev, &sctx);
1545*5113495bSYour Name 
1546*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1547*5113495bSYour Name 	/*
1548*5113495bSYour Name 	 *adf_os_print("AFTER tx sched:\n");
1549*5113495bSYour Name 	 *ol_tx_queues_display(pdev);
1550*5113495bSYour Name 	 */
1551*5113495bSYour Name 
1552*5113495bSYour Name 	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
1553*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1554*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
1555*5113495bSYour Name }
1556*5113495bSYour Name 
1557*5113495bSYour Name void *
ol_tx_sched_attach(struct ol_txrx_pdev_t * pdev)1558*5113495bSYour Name ol_tx_sched_attach(
1559*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev)
1560*5113495bSYour Name {
1561*5113495bSYour Name 	pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
1562*5113495bSYour Name 	return ol_tx_sched_init(pdev);
1563*5113495bSYour Name }
1564*5113495bSYour Name 
1565*5113495bSYour Name void
ol_tx_sched_detach(struct ol_txrx_pdev_t * pdev)1566*5113495bSYour Name ol_tx_sched_detach(
1567*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev)
1568*5113495bSYour Name {
1569*5113495bSYour Name 	if (pdev->tx_sched.scheduler) {
1570*5113495bSYour Name 		qdf_mem_free(pdev->tx_sched.scheduler);
1571*5113495bSYour Name 		pdev->tx_sched.scheduler = NULL;
1572*5113495bSYour Name 	}
1573*5113495bSYour Name }
1574*5113495bSYour Name 
1575*5113495bSYour Name /*--- debug functions -------------------------------------------------------*/
1576*5113495bSYour Name 
1577*5113495bSYour Name #if defined(DEBUG_HL_LOGGING)
1578*5113495bSYour Name 
1579*5113495bSYour Name static void
ol_tx_sched_log(struct ol_txrx_pdev_t * pdev)1580*5113495bSYour Name ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
1581*5113495bSYour Name {
1582*5113495bSYour Name 	u_int8_t  *buf;
1583*5113495bSYour Name 	u_int32_t *active_bitmap;
1584*5113495bSYour Name 	int i, j, num_cats_active;
1585*5113495bSYour Name 	int active, frms, bytes;
1586*5113495bSYour Name 	int credit;
1587*5113495bSYour Name 
1588*5113495bSYour Name 	/* don't bother recording state if credit is zero */
1589*5113495bSYour Name 	credit = qdf_atomic_read(&pdev->target_tx_credit);
1590*5113495bSYour Name 	if (credit == 0)
1591*5113495bSYour Name 		return;
1592*5113495bSYour Name 
1593*5113495bSYour Name 
1594*5113495bSYour Name 	/*
1595*5113495bSYour Name 	 * See how many TIDs are active, so queue state can be stored only
1596*5113495bSYour Name 	 * for those TIDs.
1597*5113495bSYour Name 	 * Do an initial iteration through all categories to see if any
1598*5113495bSYour Name 	 * are active.  Doing an extra iteration is inefficient, but
1599*5113495bSYour Name 	 * efficiency is not a dominant concern when logging is enabled.
1600*5113495bSYour Name 	 */
1601*5113495bSYour Name 	num_cats_active = 0;
1602*5113495bSYour Name 	for (i = 0; i < OL_TX_SCHED_NUM_CATEGORIES; i++) {
1603*5113495bSYour Name 		ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
1604*5113495bSYour Name 		if (active)
1605*5113495bSYour Name 			num_cats_active++;
1606*5113495bSYour Name 	}
1607*5113495bSYour Name 	/* don't bother recording state if there are no active queues */
1608*5113495bSYour Name 	if (num_cats_active == 0)
1609*5113495bSYour Name 		return;
1610*5113495bSYour Name 
1611*5113495bSYour Name 
1612*5113495bSYour Name 	ol_tx_queue_log_sched(pdev, credit, &num_cats_active,
1613*5113495bSYour Name 			      &active_bitmap, &buf);
1614*5113495bSYour Name 
1615*5113495bSYour Name 	if (num_cats_active == 0)
1616*5113495bSYour Name 		return;
1617*5113495bSYour Name 
1618*5113495bSYour Name 	*active_bitmap = 0;
1619*5113495bSYour Name 	for (i = 0, j = 0;
1620*5113495bSYour Name 			i < OL_TX_SCHED_NUM_CATEGORIES && j < num_cats_active;
1621*5113495bSYour Name 			i++) {
1622*5113495bSYour Name 		u_int8_t *p;
1623*5113495bSYour Name 
1624*5113495bSYour Name 		ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
1625*5113495bSYour Name 		if (!active)
1626*5113495bSYour Name 			continue;
1627*5113495bSYour Name 
1628*5113495bSYour Name 		p = &buf[j*6];
1629*5113495bSYour Name 		p[0]   = (frms >> 0) & 0xff;
1630*5113495bSYour Name 		p[1] = (frms >> 8) & 0xff;
1631*5113495bSYour Name 
1632*5113495bSYour Name 		p[2] = (bytes >> 0) & 0xff;
1633*5113495bSYour Name 		p[3] = (bytes >> 8) & 0xff;
1634*5113495bSYour Name 		p[4] = (bytes >> 16) & 0xff;
1635*5113495bSYour Name 		p[5] = (bytes >> 24) & 0xff;
1636*5113495bSYour Name 		j++;
1637*5113495bSYour Name 		*active_bitmap |= 1 << i;
1638*5113495bSYour Name 	}
1639*5113495bSYour Name }
1640*5113495bSYour Name 
1641*5113495bSYour Name #endif /* defined(DEBUG_HL_LOGGING) */
1642*5113495bSYour Name 
1643*5113495bSYour Name #endif /* defined(CONFIG_HL_SUPPORT) */
1644