xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_queue.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
21*5113495bSYour Name #include <qdf_atomic.h>         /* qdf_atomic_read, etc. */
22*5113495bSYour Name #include <ol_cfg.h>             /* ol_cfg_addba_retry */
23*5113495bSYour Name #include <htt.h>                /* HTT_TX_EXT_TID_MGMT */
24*5113495bSYour Name #include <ol_htt_tx_api.h>      /* htt_tx_desc_tid */
25*5113495bSYour Name #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle */
26*5113495bSYour Name #include <ol_txrx_ctrl_api.h>   /* ol_txrx_sync, ol_tx_addba_conf */
27*5113495bSYour Name #include <cdp_txrx_tx_throttle.h>
28*5113495bSYour Name #include <ol_ctrl_txrx_api.h>   /* ol_ctrl_addba_req */
29*5113495bSYour Name #include <ol_txrx_internal.h>   /* TXRX_ASSERT1, etc. */
30*5113495bSYour Name #include <ol_tx_desc.h>         /* ol_tx_desc, ol_tx_desc_frame_list_free */
31*5113495bSYour Name #include <ol_tx.h>              /* ol_tx_vdev_ll_pause_queue_send */
32*5113495bSYour Name #include <ol_tx_sched.h>	/* ol_tx_sched_notify, etc. */
33*5113495bSYour Name #include <ol_tx_queue.h>
34*5113495bSYour Name #include <ol_txrx.h>          /* ol_tx_desc_pool_size_hl */
35*5113495bSYour Name #include <ol_txrx_dbg.h>        /* ENABLE_TX_QUEUE_LOG */
36*5113495bSYour Name #include <qdf_types.h>          /* bool */
37*5113495bSYour Name #include "cdp_txrx_flow_ctrl_legacy.h"
38*5113495bSYour Name #include <ol_txrx_peer_find.h>
39*5113495bSYour Name #include <cdp_txrx_handle.h>
40*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT)
41*5113495bSYour Name 
42*5113495bSYour Name #ifndef offsetof
43*5113495bSYour Name #define offsetof(type, field)   ((qdf_size_t)(&((type *)0)->field))
44*5113495bSYour Name #endif
45*5113495bSYour Name 
46*5113495bSYour Name /*--- function prototypes for optional host ADDBA negotiation ---------------*/
47*5113495bSYour Name 
48*5113495bSYour Name #define OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info) /* no-op */
49*5113495bSYour Name 
50*5113495bSYour Name #ifndef container_of
51*5113495bSYour Name #define container_of(ptr, type, member) ((type *)( \
52*5113495bSYour Name 			(char *)(ptr) - (char *)(&((type *)0)->member)))
53*5113495bSYour Name #endif
54*5113495bSYour Name /*--- function definitions --------------------------------------------------*/
55*5113495bSYour Name 
56*5113495bSYour Name /**
57*5113495bSYour Name  * ol_tx_queue_vdev_flush() - try to flush pending frames in the tx queues
58*5113495bSYour Name  *			      no matter it's queued in the TX scheduler or not
59*5113495bSYour Name  * @pdev: the physical device object
60*5113495bSYour Name  * @vdev: the virtual device object
61*5113495bSYour Name  *
62*5113495bSYour Name  * Return: None
63*5113495bSYour Name  */
64*5113495bSYour Name static void
ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev)65*5113495bSYour Name ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
66*5113495bSYour Name {
67*5113495bSYour Name #define PEER_ARRAY_COUNT        10
68*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq;
69*5113495bSYour Name 	struct ol_txrx_peer_t *peer, *peers[PEER_ARRAY_COUNT];
70*5113495bSYour Name 	int i, j, peer_count;
71*5113495bSYour Name 
72*5113495bSYour Name 	ol_tx_hl_queue_flush_all(vdev);
73*5113495bSYour Name 
74*5113495bSYour Name 	/* flush VDEV TX queues */
75*5113495bSYour Name 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
76*5113495bSYour Name 		txq = &vdev->txqs[i];
77*5113495bSYour Name 		/*
78*5113495bSYour Name 		 * currently txqs of MCAST_BCAST/DEFAULT_MGMT packet are using
79*5113495bSYour Name 		 * tid HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST/HTT_TX_EXT_TID_MGMT
80*5113495bSYour Name 		 * when inserted into scheduler, so use same tid when we flush
81*5113495bSYour Name 		 * them
82*5113495bSYour Name 		 */
83*5113495bSYour Name 		if (i == OL_TX_VDEV_MCAST_BCAST)
84*5113495bSYour Name 			ol_tx_queue_free(pdev,
85*5113495bSYour Name 					txq,
86*5113495bSYour Name 					HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST,
87*5113495bSYour Name 					false);
88*5113495bSYour Name 		else if (i == OL_TX_VDEV_DEFAULT_MGMT)
89*5113495bSYour Name 			ol_tx_queue_free(pdev,
90*5113495bSYour Name 					txq,
91*5113495bSYour Name 					HTT_TX_EXT_TID_MGMT,
92*5113495bSYour Name 					false);
93*5113495bSYour Name 		else
94*5113495bSYour Name 			ol_tx_queue_free(pdev,
95*5113495bSYour Name 					txq,
96*5113495bSYour Name 					(i + OL_TX_NUM_TIDS),
97*5113495bSYour Name 					false);
98*5113495bSYour Name 	}
99*5113495bSYour Name 	/* flush PEER TX queues */
100*5113495bSYour Name 	do {
101*5113495bSYour Name 		peer_count = 0;
102*5113495bSYour Name 		/* select candidate peers */
103*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->peer_ref_mutex);
104*5113495bSYour Name 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
105*5113495bSYour Name 			for (i = 0; i < OL_TX_NUM_TIDS; i++) {
106*5113495bSYour Name 				txq = &peer->txqs[i];
107*5113495bSYour Name 				if (txq->frms) {
108*5113495bSYour Name 					ol_txrx_peer_get_ref
109*5113495bSYour Name 						(peer,
110*5113495bSYour Name 						 PEER_DEBUG_ID_OL_TXQ_VDEV_FL);
111*5113495bSYour Name 					peers[peer_count++] = peer;
112*5113495bSYour Name 					break;
113*5113495bSYour Name 				}
114*5113495bSYour Name 			}
115*5113495bSYour Name 			if (peer_count >= PEER_ARRAY_COUNT)
116*5113495bSYour Name 				break;
117*5113495bSYour Name 		}
118*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
119*5113495bSYour Name 		/* flush TX queues of candidate peers */
120*5113495bSYour Name 		for (i = 0; i < peer_count; i++) {
121*5113495bSYour Name 			for (j = 0; j < OL_TX_NUM_TIDS; j++) {
122*5113495bSYour Name 				txq = &peers[i]->txqs[j];
123*5113495bSYour Name 				if (txq->frms)
124*5113495bSYour Name 					ol_tx_queue_free(pdev, txq, j, true);
125*5113495bSYour Name 			}
126*5113495bSYour Name 			ol_txrx_info("Delete Peer %pK", peer);
127*5113495bSYour Name 			ol_txrx_peer_release_ref(peers[i],
128*5113495bSYour Name 						 PEER_DEBUG_ID_OL_TXQ_VDEV_FL);
129*5113495bSYour Name 		}
130*5113495bSYour Name 	} while (peer_count >= PEER_ARRAY_COUNT);
131*5113495bSYour Name }
132*5113495bSYour Name 
133*5113495bSYour Name /**
134*5113495bSYour Name  * ol_tx_queue_flush() - try to flush pending frames in the tx queues
135*5113495bSYour Name  *			 no matter it's queued in the TX scheduler or not
136*5113495bSYour Name  * @pdev: the physical device object
137*5113495bSYour Name  *
138*5113495bSYour Name  * Return: None
139*5113495bSYour Name  */
140*5113495bSYour Name static inline void
ol_tx_queue_flush(struct ol_txrx_pdev_t * pdev)141*5113495bSYour Name ol_tx_queue_flush(struct ol_txrx_pdev_t *pdev)
142*5113495bSYour Name {
143*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
144*5113495bSYour Name 
145*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
146*5113495bSYour Name 		ol_tx_queue_vdev_flush(pdev, vdev);
147*5113495bSYour Name 	}
148*5113495bSYour Name }
149*5113495bSYour Name 
150*5113495bSYour Name void
ol_tx_queue_discard(struct ol_txrx_pdev_t * pdev,bool flush_all,ol_tx_desc_list * tx_descs)151*5113495bSYour Name ol_tx_queue_discard(
152*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
153*5113495bSYour Name 	bool flush_all,
154*5113495bSYour Name 	ol_tx_desc_list *tx_descs)
155*5113495bSYour Name {
156*5113495bSYour Name 	u_int16_t num;
157*5113495bSYour Name 	u_int16_t discarded, actual_discarded = 0;
158*5113495bSYour Name 
159*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
160*5113495bSYour Name 
161*5113495bSYour Name 	if (flush_all == true)
162*5113495bSYour Name 		/* flush all the pending tx queues in the scheduler */
163*5113495bSYour Name 		num = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) -
164*5113495bSYour Name 			qdf_atomic_read(&pdev->tx_queue.rsrc_cnt);
165*5113495bSYour Name 	else
166*5113495bSYour Name 		/*TODO: Discard frames for a particular vdev only */
167*5113495bSYour Name 		num = pdev->tx_queue.rsrc_threshold_hi -
168*5113495bSYour Name 			pdev->tx_queue.rsrc_threshold_lo;
169*5113495bSYour Name 
170*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("+%u", qdf_atomic_read(&pdev->tx_queue.rsrc_cnt));
171*5113495bSYour Name 	while (num > 0) {
172*5113495bSYour Name 		discarded = ol_tx_sched_discard_select(
173*5113495bSYour Name 				pdev, (u_int16_t)num, tx_descs, flush_all);
174*5113495bSYour Name 		if (discarded == 0)
175*5113495bSYour Name 			/*
176*5113495bSYour Name 			 * No more packets could be discarded.
177*5113495bSYour Name 			 * Probably tx queues are empty.
178*5113495bSYour Name 			 */
179*5113495bSYour Name 			break;
180*5113495bSYour Name 
181*5113495bSYour Name 		num -= discarded;
182*5113495bSYour Name 		actual_discarded += discarded;
183*5113495bSYour Name 	}
184*5113495bSYour Name 	qdf_atomic_add(actual_discarded, &pdev->tx_queue.rsrc_cnt);
185*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("-");
186*5113495bSYour Name 
187*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
188*5113495bSYour Name 
189*5113495bSYour Name 	if (flush_all == true && num > 0)
190*5113495bSYour Name 		/*
191*5113495bSYour Name 		 * try to flush pending frames in the tx queues
192*5113495bSYour Name 		 * which are not queued in the TX scheduler.
193*5113495bSYour Name 		 */
194*5113495bSYour Name 		ol_tx_queue_flush(pdev);
195*5113495bSYour Name }
196*5113495bSYour Name 
197*5113495bSYour Name #ifdef QCA_HL_NETDEV_FLOW_CONTROL
198*5113495bSYour Name 
199*5113495bSYour Name /**
200*5113495bSYour Name  * is_ol_tx_discard_frames_success() - check whether currently queued tx frames
201*5113495bSYour Name  *				       can be discarded or not
202*5113495bSYour Name  * @pdev: the physical device object
203*5113495bSYour Name  * @tx_desc: tx descriptor ptr
204*5113495bSYour Name  *
205*5113495bSYour Name  * Return: Success if available tx descriptors are too few
206*5113495bSYour Name  */
207*5113495bSYour Name static inline bool
is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)208*5113495bSYour Name is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
209*5113495bSYour Name 				struct ol_tx_desc_t *tx_desc)
210*5113495bSYour Name {
211*5113495bSYour Name 	ol_txrx_vdev_handle vdev;
212*5113495bSYour Name 	bool discard_frames;
213*5113495bSYour Name 
214*5113495bSYour Name 	vdev = tx_desc->vdev;
215*5113495bSYour Name 
216*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
217*5113495bSYour Name 	if (vdev->tx_desc_limit == 0) {
218*5113495bSYour Name 		/* Flow control not enabled */
219*5113495bSYour Name 		discard_frames = qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) <=
220*5113495bSYour Name 					pdev->tx_queue.rsrc_threshold_lo;
221*5113495bSYour Name 	} else {
222*5113495bSYour Name 	/*
223*5113495bSYour Name 	 * Discard
224*5113495bSYour Name 	 * if netbuf is normal priority and tx_desc_count greater than
225*5113495bSYour Name 	 * queue stop threshold
226*5113495bSYour Name 	 * AND
227*5113495bSYour Name 	 * if netbuf is high priority and tx_desc_count greater than
228*5113495bSYour Name 	 * tx desc limit.
229*5113495bSYour Name 	 */
230*5113495bSYour Name 		discard_frames = (!ol_tx_desc_is_high_prio(tx_desc->netbuf) &&
231*5113495bSYour Name 				  qdf_atomic_read(&vdev->tx_desc_count) >
232*5113495bSYour Name 				  vdev->queue_stop_th) ||
233*5113495bSYour Name 				  (ol_tx_desc_is_high_prio(tx_desc->netbuf) &&
234*5113495bSYour Name 				  qdf_atomic_read(&vdev->tx_desc_count) >
235*5113495bSYour Name 				  vdev->tx_desc_limit);
236*5113495bSYour Name 	}
237*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
238*5113495bSYour Name 
239*5113495bSYour Name 	return discard_frames;
240*5113495bSYour Name }
241*5113495bSYour Name #else
242*5113495bSYour Name 
243*5113495bSYour Name static inline bool
is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc)244*5113495bSYour Name is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
245*5113495bSYour Name 				struct ol_tx_desc_t *tx_desc)
246*5113495bSYour Name {
247*5113495bSYour Name 	return qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) <=
248*5113495bSYour Name 				pdev->tx_queue.rsrc_threshold_lo;
249*5113495bSYour Name }
250*5113495bSYour Name #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
251*5113495bSYour Name 
252*5113495bSYour Name void
ol_tx_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,struct ol_tx_desc_t * tx_desc,struct ol_txrx_msdu_info_t * tx_msdu_info)253*5113495bSYour Name ol_tx_enqueue(
254*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
255*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
256*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc,
257*5113495bSYour Name 	struct ol_txrx_msdu_info_t *tx_msdu_info)
258*5113495bSYour Name {
259*5113495bSYour Name 	int bytes;
260*5113495bSYour Name 	struct ol_tx_sched_notify_ctx_t notify_ctx;
261*5113495bSYour Name 
262*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
263*5113495bSYour Name 
264*5113495bSYour Name 	/*
265*5113495bSYour Name 	 * If too few tx descriptors are available, drop some currently-queued
266*5113495bSYour Name 	 * tx frames, to provide enough tx descriptors for new frames, which
267*5113495bSYour Name 	 * may be higher priority than the current frames.
268*5113495bSYour Name 	 */
269*5113495bSYour Name 	if (is_ol_tx_discard_frames_success(pdev, tx_desc)) {
270*5113495bSYour Name 		ol_tx_desc_list tx_descs;
271*5113495bSYour Name 
272*5113495bSYour Name 		TAILQ_INIT(&tx_descs);
273*5113495bSYour Name 		ol_tx_queue_discard(pdev, false, &tx_descs);
274*5113495bSYour Name 		/*Discard Frames in Discard List*/
275*5113495bSYour Name 		ol_tx_desc_frame_list_free(pdev, &tx_descs, 1 /* error */);
276*5113495bSYour Name 	}
277*5113495bSYour Name 
278*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
279*5113495bSYour Name 	TAILQ_INSERT_TAIL(&txq->head, tx_desc, tx_desc_list_elem);
280*5113495bSYour Name 
281*5113495bSYour Name 	bytes = qdf_nbuf_len(tx_desc->netbuf);
282*5113495bSYour Name 	txq->frms++;
283*5113495bSYour Name 	txq->bytes += bytes;
284*5113495bSYour Name 	ol_tx_update_grp_frm_count(txq, 1);
285*5113495bSYour Name 	ol_tx_queue_log_enqueue(pdev, tx_msdu_info, 1, bytes);
286*5113495bSYour Name 
287*5113495bSYour Name 	if (txq->flag != ol_tx_queue_paused) {
288*5113495bSYour Name 		notify_ctx.event = OL_TX_ENQUEUE_FRAME;
289*5113495bSYour Name 		notify_ctx.frames = 1;
290*5113495bSYour Name 		notify_ctx.bytes = qdf_nbuf_len(tx_desc->netbuf);
291*5113495bSYour Name 		notify_ctx.txq = txq;
292*5113495bSYour Name 		notify_ctx.info.tx_msdu_info = tx_msdu_info;
293*5113495bSYour Name 		ol_tx_sched_notify(pdev, &notify_ctx);
294*5113495bSYour Name 		txq->flag = ol_tx_queue_active;
295*5113495bSYour Name 	}
296*5113495bSYour Name 
297*5113495bSYour Name 	if (!ETHERTYPE_IS_EAPOL_WAPI(tx_msdu_info->htt.info.ethertype))
298*5113495bSYour Name 		OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info);
299*5113495bSYour Name 
300*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
301*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
302*5113495bSYour Name }
303*5113495bSYour Name 
304*5113495bSYour Name u_int16_t
ol_tx_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,ol_tx_desc_list * head,u_int16_t max_frames,u_int32_t * credit,int * bytes)305*5113495bSYour Name ol_tx_dequeue(
306*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
307*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
308*5113495bSYour Name 	ol_tx_desc_list *head,
309*5113495bSYour Name 	u_int16_t max_frames,
310*5113495bSYour Name 	u_int32_t *credit,
311*5113495bSYour Name 	int *bytes)
312*5113495bSYour Name {
313*5113495bSYour Name 	u_int16_t num_frames;
314*5113495bSYour Name 	int bytes_sum;
315*5113495bSYour Name 	unsigned int credit_sum;
316*5113495bSYour Name 
317*5113495bSYour Name 	TXRX_ASSERT2(txq->flag != ol_tx_queue_paused);
318*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
319*5113495bSYour Name 
320*5113495bSYour Name 	if (txq->frms < max_frames)
321*5113495bSYour Name 		max_frames = txq->frms;
322*5113495bSYour Name 
323*5113495bSYour Name 	bytes_sum = 0;
324*5113495bSYour Name 	credit_sum = 0;
325*5113495bSYour Name 	for (num_frames = 0; num_frames < max_frames; num_frames++) {
326*5113495bSYour Name 		unsigned int frame_credit;
327*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc;
328*5113495bSYour Name 
329*5113495bSYour Name 		tx_desc = TAILQ_FIRST(&txq->head);
330*5113495bSYour Name 
331*5113495bSYour Name 		frame_credit = htt_tx_msdu_credit(tx_desc->netbuf);
332*5113495bSYour Name 		if (credit_sum + frame_credit > *credit)
333*5113495bSYour Name 			break;
334*5113495bSYour Name 
335*5113495bSYour Name 		credit_sum += frame_credit;
336*5113495bSYour Name 		bytes_sum += qdf_nbuf_len(tx_desc->netbuf);
337*5113495bSYour Name 		TAILQ_REMOVE(&txq->head, tx_desc, tx_desc_list_elem);
338*5113495bSYour Name 		TAILQ_INSERT_TAIL(head, tx_desc, tx_desc_list_elem);
339*5113495bSYour Name 	}
340*5113495bSYour Name 	txq->frms -= num_frames;
341*5113495bSYour Name 	txq->bytes -= bytes_sum;
342*5113495bSYour Name 	ol_tx_update_grp_frm_count(txq, -credit_sum);
343*5113495bSYour Name 
344*5113495bSYour Name 	/* a paused queue remains paused, regardless of whether it has frames */
345*5113495bSYour Name 	if (txq->frms == 0 && txq->flag == ol_tx_queue_active)
346*5113495bSYour Name 		txq->flag = ol_tx_queue_empty;
347*5113495bSYour Name 
348*5113495bSYour Name 	ol_tx_queue_log_dequeue(pdev, txq, num_frames, bytes_sum);
349*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
350*5113495bSYour Name 
351*5113495bSYour Name 	*bytes = bytes_sum;
352*5113495bSYour Name 	*credit = credit_sum;
353*5113495bSYour Name 	return num_frames;
354*5113495bSYour Name }
355*5113495bSYour Name 
356*5113495bSYour Name void
ol_tx_queue_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,bool is_peer_txq)357*5113495bSYour Name ol_tx_queue_free(
358*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
359*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
360*5113495bSYour Name 	int tid, bool is_peer_txq)
361*5113495bSYour Name {
362*5113495bSYour Name 	int frms = 0, bytes = 0;
363*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
364*5113495bSYour Name 	struct ol_tx_sched_notify_ctx_t notify_ctx;
365*5113495bSYour Name 	ol_tx_desc_list tx_tmp_list;
366*5113495bSYour Name 
367*5113495bSYour Name 	TAILQ_INIT(&tx_tmp_list);
368*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
369*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
370*5113495bSYour Name 
371*5113495bSYour Name 	notify_ctx.event = OL_TX_DELETE_QUEUE;
372*5113495bSYour Name 	notify_ctx.txq = txq;
373*5113495bSYour Name 	notify_ctx.info.ext_tid = tid;
374*5113495bSYour Name 	ol_tx_sched_notify(pdev, &notify_ctx);
375*5113495bSYour Name 
376*5113495bSYour Name 	frms = txq->frms;
377*5113495bSYour Name 	tx_desc = TAILQ_FIRST(&txq->head);
378*5113495bSYour Name 	while (txq->frms) {
379*5113495bSYour Name 		bytes += qdf_nbuf_len(tx_desc->netbuf);
380*5113495bSYour Name 		txq->frms--;
381*5113495bSYour Name 		tx_desc = TAILQ_NEXT(tx_desc, tx_desc_list_elem);
382*5113495bSYour Name 	}
383*5113495bSYour Name 	ol_tx_queue_log_free(pdev, txq, tid, frms, bytes, is_peer_txq);
384*5113495bSYour Name 	txq->bytes -= bytes;
385*5113495bSYour Name 	ol_tx_queue_log_free(pdev, txq, tid, frms, bytes, is_peer_txq);
386*5113495bSYour Name 	txq->flag = ol_tx_queue_empty;
387*5113495bSYour Name 	/* txq->head gets reset during the TAILQ_CONCAT call */
388*5113495bSYour Name 	TAILQ_CONCAT(&tx_tmp_list, &txq->head, tx_desc_list_elem);
389*5113495bSYour Name 	ol_tx_update_grp_frm_count(txq, -frms);
390*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
391*5113495bSYour Name 	/* free tx frames without holding tx_queue_spinlock */
392*5113495bSYour Name 	qdf_atomic_add(frms, &pdev->tx_queue.rsrc_cnt);
393*5113495bSYour Name 	while (frms) {
394*5113495bSYour Name 		tx_desc = TAILQ_FIRST(&tx_tmp_list);
395*5113495bSYour Name 		TAILQ_REMOVE(&tx_tmp_list, tx_desc, tx_desc_list_elem);
396*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 0);
397*5113495bSYour Name 		frms--;
398*5113495bSYour Name 	}
399*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
400*5113495bSYour Name }
401*5113495bSYour Name 
402*5113495bSYour Name 
403*5113495bSYour Name /*--- queue pause / unpause functions ---------------------------------------*/
404*5113495bSYour Name 
405*5113495bSYour Name /**
406*5113495bSYour Name  * ol_txrx_peer_tid_pause_base() - suspend/pause txq for a given tid given peer
407*5113495bSYour Name  * @pdev: the physical device object
408*5113495bSYour Name  * @peer: peer device object
409*5113495bSYour Name  * @tid: tid for which queue needs to be paused
410*5113495bSYour Name  *
411*5113495bSYour Name  * Return: None
412*5113495bSYour Name  */
413*5113495bSYour Name static void
ol_txrx_peer_tid_pause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,int tid)414*5113495bSYour Name ol_txrx_peer_tid_pause_base(
415*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
416*5113495bSYour Name 	struct ol_txrx_peer_t *peer,
417*5113495bSYour Name 	int tid)
418*5113495bSYour Name {
419*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
420*5113495bSYour Name 
421*5113495bSYour Name 	if (txq->paused_count.total++ == 0) {
422*5113495bSYour Name 		struct ol_tx_sched_notify_ctx_t notify_ctx;
423*5113495bSYour Name 
424*5113495bSYour Name 		notify_ctx.event = OL_TX_PAUSE_QUEUE;
425*5113495bSYour Name 		notify_ctx.txq = txq;
426*5113495bSYour Name 		notify_ctx.info.ext_tid = tid;
427*5113495bSYour Name 		ol_tx_sched_notify(pdev, &notify_ctx);
428*5113495bSYour Name 		txq->flag = ol_tx_queue_paused;
429*5113495bSYour Name 	}
430*5113495bSYour Name }
431*5113495bSYour Name #ifdef QCA_BAD_PEER_TX_FLOW_CL
432*5113495bSYour Name 
433*5113495bSYour Name /**
434*5113495bSYour Name  * ol_txrx_peer_pause_but_no_mgmt_q_base() - suspend/pause all txqs except
435*5113495bSYour Name  *					     management queue for a given peer
436*5113495bSYour Name  * @pdev: the physical device object
437*5113495bSYour Name  * @peer: peer device object
438*5113495bSYour Name  *
439*5113495bSYour Name  * Return: None
440*5113495bSYour Name  */
441*5113495bSYour Name static void
ol_txrx_peer_pause_but_no_mgmt_q_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)442*5113495bSYour Name ol_txrx_peer_pause_but_no_mgmt_q_base(
443*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
444*5113495bSYour Name 	struct ol_txrx_peer_t *peer)
445*5113495bSYour Name {
446*5113495bSYour Name 	int i;
447*5113495bSYour Name 
448*5113495bSYour Name 	for (i = 0; i < OL_TX_MGMT_TID; i++)
449*5113495bSYour Name 		ol_txrx_peer_tid_pause_base(pdev, peer, i);
450*5113495bSYour Name }
451*5113495bSYour Name #endif
452*5113495bSYour Name 
453*5113495bSYour Name 
454*5113495bSYour Name /**
455*5113495bSYour Name  * ol_txrx_peer_pause_base() - suspend/pause all txqs for a given peer
456*5113495bSYour Name  * @pdev: the physical device object
457*5113495bSYour Name  * @peer: peer device object
458*5113495bSYour Name  *
459*5113495bSYour Name  * Return: None
460*5113495bSYour Name  */
461*5113495bSYour Name static void
ol_txrx_peer_pause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)462*5113495bSYour Name ol_txrx_peer_pause_base(
463*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
464*5113495bSYour Name 	struct ol_txrx_peer_t *peer)
465*5113495bSYour Name {
466*5113495bSYour Name 	int i;
467*5113495bSYour Name 
468*5113495bSYour Name 	for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
469*5113495bSYour Name 		ol_txrx_peer_tid_pause_base(pdev, peer, i);
470*5113495bSYour Name }
471*5113495bSYour Name 
472*5113495bSYour Name /**
473*5113495bSYour Name  * ol_txrx_peer_tid_unpause_base() - unpause txq for a given tid given peer
474*5113495bSYour Name  * @pdev: the physical device object
475*5113495bSYour Name  * @peer: peer device object
476*5113495bSYour Name  * @tid: tid for which queue needs to be unpaused
477*5113495bSYour Name  *
478*5113495bSYour Name  * Return: None
479*5113495bSYour Name  */
480*5113495bSYour Name static void
ol_txrx_peer_tid_unpause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,int tid)481*5113495bSYour Name ol_txrx_peer_tid_unpause_base(
482*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
483*5113495bSYour Name 	struct ol_txrx_peer_t *peer,
484*5113495bSYour Name 	int tid)
485*5113495bSYour Name {
486*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq = &peer->txqs[tid];
487*5113495bSYour Name 	/*
488*5113495bSYour Name 	 * Don't actually unpause the tx queue until all pause requests
489*5113495bSYour Name 	 * have been removed.
490*5113495bSYour Name 	 */
491*5113495bSYour Name 	TXRX_ASSERT2(txq->paused_count.total > 0);
492*5113495bSYour Name 	/* return, if not already paused */
493*5113495bSYour Name 	if (txq->paused_count.total == 0)
494*5113495bSYour Name 		return;
495*5113495bSYour Name 
496*5113495bSYour Name 	if (--txq->paused_count.total == 0) {
497*5113495bSYour Name 		struct ol_tx_sched_notify_ctx_t notify_ctx;
498*5113495bSYour Name 
499*5113495bSYour Name 		notify_ctx.event = OL_TX_UNPAUSE_QUEUE;
500*5113495bSYour Name 		notify_ctx.txq = txq;
501*5113495bSYour Name 		notify_ctx.info.ext_tid = tid;
502*5113495bSYour Name 		ol_tx_sched_notify(pdev, &notify_ctx);
503*5113495bSYour Name 
504*5113495bSYour Name 		if (txq->frms == 0) {
505*5113495bSYour Name 			txq->flag = ol_tx_queue_empty;
506*5113495bSYour Name 		} else {
507*5113495bSYour Name 			txq->flag = ol_tx_queue_active;
508*5113495bSYour Name 			/*
509*5113495bSYour Name 			 * Now that the are new tx frames available to download,
510*5113495bSYour Name 			 * invoke the scheduling function, to see if it wants to
511*5113495bSYour Name 			 * download the new frames.
512*5113495bSYour Name 			 * Since the queue lock is currently held, and since
513*5113495bSYour Name 			 * the scheduler function takes the lock, temporarily
514*5113495bSYour Name 			 * release the lock.
515*5113495bSYour Name 			 */
516*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
517*5113495bSYour Name 			ol_tx_sched(pdev);
518*5113495bSYour Name 			qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
519*5113495bSYour Name 		}
520*5113495bSYour Name 	}
521*5113495bSYour Name }
522*5113495bSYour Name 
523*5113495bSYour Name /**
524*5113495bSYour Name  * ol_txrx_peer_unpause_base() - unpause all txqs for a given peer
525*5113495bSYour Name  * @pdev: the physical device object
526*5113495bSYour Name  * @peer: peer device object
527*5113495bSYour Name  *
528*5113495bSYour Name  * Return: None
529*5113495bSYour Name  */
530*5113495bSYour Name static void
ol_txrx_peer_unpause_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)531*5113495bSYour Name ol_txrx_peer_unpause_base(
532*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
533*5113495bSYour Name 	struct ol_txrx_peer_t *peer)
534*5113495bSYour Name {
535*5113495bSYour Name 	int i;
536*5113495bSYour Name 
537*5113495bSYour Name 	for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
538*5113495bSYour Name 		ol_txrx_peer_tid_unpause_base(pdev, peer, i);
539*5113495bSYour Name }
540*5113495bSYour Name 
541*5113495bSYour Name #ifdef QCA_BAD_PEER_TX_FLOW_CL
542*5113495bSYour Name /**
543*5113495bSYour Name  * ol_txrx_peer_unpause_but_no_mgmt_q_base() - unpause all txqs except
544*5113495bSYour Name  *					       management queue for a given peer
545*5113495bSYour Name  * @pdev: the physical device object
546*5113495bSYour Name  * @peer: peer device object
547*5113495bSYour Name  *
548*5113495bSYour Name  * Return: None
549*5113495bSYour Name  */
550*5113495bSYour Name static void
ol_txrx_peer_unpause_but_no_mgmt_q_base(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)551*5113495bSYour Name ol_txrx_peer_unpause_but_no_mgmt_q_base(
552*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
553*5113495bSYour Name 	struct ol_txrx_peer_t *peer)
554*5113495bSYour Name {
555*5113495bSYour Name 	int i;
556*5113495bSYour Name 
557*5113495bSYour Name 	for (i = 0; i < OL_TX_MGMT_TID; i++)
558*5113495bSYour Name 		ol_txrx_peer_tid_unpause_base(pdev, peer, i);
559*5113495bSYour Name }
560*5113495bSYour Name #endif
561*5113495bSYour Name 
562*5113495bSYour Name void
ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer,int tid)563*5113495bSYour Name ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
564*5113495bSYour Name {
565*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
566*5113495bSYour Name 
567*5113495bSYour Name 	/* TO DO: log the queue unpause */
568*5113495bSYour Name 
569*5113495bSYour Name 	/* acquire the mutex lock, since we'll be modifying the queues */
570*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
571*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
572*5113495bSYour Name 
573*5113495bSYour Name 	if (tid == -1) {
574*5113495bSYour Name 		int i;
575*5113495bSYour Name 
576*5113495bSYour Name 		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
577*5113495bSYour Name 			ol_txrx_peer_tid_unpause_base(pdev, peer, i);
578*5113495bSYour Name 
579*5113495bSYour Name 	} else {
580*5113495bSYour Name 		ol_txrx_peer_tid_unpause_base(pdev, peer, tid);
581*5113495bSYour Name 	}
582*5113495bSYour Name 
583*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
584*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
585*5113495bSYour Name }
586*5113495bSYour Name 
587*5113495bSYour Name void
ol_txrx_vdev_pause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)588*5113495bSYour Name ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
589*5113495bSYour Name 		   uint32_t reason, uint32_t pause_type)
590*5113495bSYour Name {
591*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
592*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
593*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
594*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
595*5113495bSYour Name 	/* TO DO: log the queue pause */
596*5113495bSYour Name 	/* acquire the mutex lock, since we'll be modifying the queues */
597*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
598*5113495bSYour Name 
599*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
600*5113495bSYour Name 		ol_txrx_err("vdev is NULL");
601*5113495bSYour Name 		return;
602*5113495bSYour Name 	}
603*5113495bSYour Name 
604*5113495bSYour Name 	pdev = vdev->pdev;
605*5113495bSYour Name 
606*5113495bSYour Name 	/* use peer_ref_mutex before accessing peer_list */
607*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
608*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
609*5113495bSYour Name 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
610*5113495bSYour Name 		if (pause_type == PAUSE_TYPE_CHOP) {
611*5113495bSYour Name 			if (!(peer->is_tdls_peer && peer->tdls_offchan_enabled))
612*5113495bSYour Name 				ol_txrx_peer_pause_base(pdev, peer);
613*5113495bSYour Name 		} else if (pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
614*5113495bSYour Name 			if (peer->is_tdls_peer && peer->tdls_offchan_enabled)
615*5113495bSYour Name 				ol_txrx_peer_pause_base(pdev, peer);
616*5113495bSYour Name 		} else {
617*5113495bSYour Name 			ol_txrx_peer_pause_base(pdev, peer);
618*5113495bSYour Name 		}
619*5113495bSYour Name 	}
620*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
621*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
622*5113495bSYour Name 
623*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
624*5113495bSYour Name }
625*5113495bSYour Name 
ol_txrx_vdev_unpause(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t reason,uint32_t pause_type)626*5113495bSYour Name void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
627*5113495bSYour Name 			  uint32_t reason, uint32_t pause_type)
628*5113495bSYour Name {
629*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
630*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
631*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
632*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
633*5113495bSYour Name 
634*5113495bSYour Name 	/* TO DO: log the queue unpause */
635*5113495bSYour Name 	/* acquire the mutex lock, since we'll be modifying the queues */
636*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
637*5113495bSYour Name 
638*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
639*5113495bSYour Name 		ol_txrx_err("vdev is NULL");
640*5113495bSYour Name 		return;
641*5113495bSYour Name 	}
642*5113495bSYour Name 
643*5113495bSYour Name 	pdev = vdev->pdev;
644*5113495bSYour Name 
645*5113495bSYour Name 	/* take peer_ref_mutex before accessing peer_list */
646*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
647*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
648*5113495bSYour Name 
649*5113495bSYour Name 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
650*5113495bSYour Name 		if (pause_type == PAUSE_TYPE_CHOP) {
651*5113495bSYour Name 			if (!(peer->is_tdls_peer && peer->tdls_offchan_enabled))
652*5113495bSYour Name 				ol_txrx_peer_unpause_base(pdev, peer);
653*5113495bSYour Name 		} else if (pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
654*5113495bSYour Name 			if (peer->is_tdls_peer && peer->tdls_offchan_enabled)
655*5113495bSYour Name 				ol_txrx_peer_unpause_base(pdev, peer);
656*5113495bSYour Name 		} else {
657*5113495bSYour Name 			ol_txrx_peer_unpause_base(pdev, peer);
658*5113495bSYour Name 		}
659*5113495bSYour Name 	}
660*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
661*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
662*5113495bSYour Name 
663*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
664*5113495bSYour Name }
665*5113495bSYour Name 
ol_txrx_vdev_flush(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)666*5113495bSYour Name void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
667*5113495bSYour Name {
668*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
669*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
670*5113495bSYour Name 
671*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
672*5113495bSYour Name 		ol_txrx_err("vdev is NULL");
673*5113495bSYour Name 		return;
674*5113495bSYour Name 	}
675*5113495bSYour Name 
676*5113495bSYour Name 	if (!vdev)
677*5113495bSYour Name 		return;
678*5113495bSYour Name 
679*5113495bSYour Name 	ol_tx_queue_vdev_flush(vdev->pdev, vdev);
680*5113495bSYour Name }
681*5113495bSYour Name 
682*5113495bSYour Name #ifdef QCA_BAD_PEER_TX_FLOW_CL
683*5113495bSYour Name 
684*5113495bSYour Name /**
685*5113495bSYour Name  * ol_txrx_peer_bal_add_limit_peer() - add one peer into limit list
686*5113495bSYour Name  * @pdev:		Pointer to PDEV structure.
687*5113495bSYour Name  * @peer_id:	Peer Identifier.
688*5113495bSYour Name  * @peer_limit	Peer limit threshold
689*5113495bSYour Name  *
690*5113495bSYour Name  * Add one peer into the limit list of pdev
691*5113495bSYour Name  * Note that the peer limit info will be also updated
692*5113495bSYour Name  * If it is the first time, start the timer
693*5113495bSYour Name  *
694*5113495bSYour Name  * Return: None
695*5113495bSYour Name  */
696*5113495bSYour Name void
ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id,u_int16_t peer_limit)697*5113495bSYour Name ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t *pdev,
698*5113495bSYour Name 				u_int16_t peer_id, u_int16_t peer_limit)
699*5113495bSYour Name {
700*5113495bSYour Name 	u_int16_t i, existed = 0;
701*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
702*5113495bSYour Name 
703*5113495bSYour Name 	for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
704*5113495bSYour Name 		if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
705*5113495bSYour Name 			existed = 1;
706*5113495bSYour Name 			break;
707*5113495bSYour Name 		}
708*5113495bSYour Name 	}
709*5113495bSYour Name 
710*5113495bSYour Name 	if (!existed) {
711*5113495bSYour Name 		u_int32_t peer_num = pdev->tx_peer_bal.peer_num;
712*5113495bSYour Name 		/* Check if peer_num has reached the capabilit */
713*5113495bSYour Name 		if (peer_num >= MAX_NO_PEERS_IN_LIMIT) {
714*5113495bSYour Name 			TX_SCHED_DEBUG_PRINT_ALWAYS(
715*5113495bSYour Name 				"reach the maximum peer num %d", peer_num);
716*5113495bSYour Name 				return;
717*5113495bSYour Name 		}
718*5113495bSYour Name 		pdev->tx_peer_bal.limit_list[peer_num].peer_id = peer_id;
719*5113495bSYour Name 		pdev->tx_peer_bal.limit_list[peer_num].limit_flag = true;
720*5113495bSYour Name 		pdev->tx_peer_bal.limit_list[peer_num].limit = peer_limit;
721*5113495bSYour Name 		pdev->tx_peer_bal.peer_num++;
722*5113495bSYour Name 
723*5113495bSYour Name 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
724*5113495bSYour Name 		if (peer) {
725*5113495bSYour Name 			peer->tx_limit_flag = true;
726*5113495bSYour Name 			peer->tx_limit = peer_limit;
727*5113495bSYour Name 		}
728*5113495bSYour Name 
729*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS(
730*5113495bSYour Name 			"Add one peer into limit queue, peer_id %d, cur peer num %d",
731*5113495bSYour Name 			peer_id,
732*5113495bSYour Name 			pdev->tx_peer_bal.peer_num);
733*5113495bSYour Name 	}
734*5113495bSYour Name 
735*5113495bSYour Name 	/* Only start the timer once */
736*5113495bSYour Name 	if (pdev->tx_peer_bal.peer_bal_timer_state ==
737*5113495bSYour Name 					ol_tx_peer_bal_timer_inactive) {
738*5113495bSYour Name 		qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
739*5113495bSYour Name 					pdev->tx_peer_bal.peer_bal_period_ms);
740*5113495bSYour Name 		pdev->tx_peer_bal.peer_bal_timer_state =
741*5113495bSYour Name 				ol_tx_peer_bal_timer_active;
742*5113495bSYour Name 	}
743*5113495bSYour Name }
744*5113495bSYour Name 
745*5113495bSYour Name /**
746*5113495bSYour Name  * ol_txrx_peer_bal_remove_limit_peer() - remove one peer from limit list
747*5113495bSYour Name  * @pdev:		Pointer to PDEV structure.
748*5113495bSYour Name  * @peer_id:	Peer Identifier.
749*5113495bSYour Name  *
750*5113495bSYour Name  * Remove one peer from the limit list of pdev
751*5113495bSYour Name  * Note that Only stop the timer if no peer in limit state
752*5113495bSYour Name  *
753*5113495bSYour Name  * Return: NULL
754*5113495bSYour Name  */
755*5113495bSYour Name void
ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t * pdev,u_int16_t peer_id)756*5113495bSYour Name ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t *pdev,
757*5113495bSYour Name 				   u_int16_t peer_id)
758*5113495bSYour Name {
759*5113495bSYour Name 	u_int16_t i;
760*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
761*5113495bSYour Name 
762*5113495bSYour Name 	for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
763*5113495bSYour Name 		if (pdev->tx_peer_bal.limit_list[i].peer_id == peer_id) {
764*5113495bSYour Name 			pdev->tx_peer_bal.limit_list[i] =
765*5113495bSYour Name 				pdev->tx_peer_bal.limit_list[
766*5113495bSYour Name 					pdev->tx_peer_bal.peer_num - 1];
767*5113495bSYour Name 			pdev->tx_peer_bal.peer_num--;
768*5113495bSYour Name 
769*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
770*5113495bSYour Name 			if (peer)
771*5113495bSYour Name 				peer->tx_limit_flag = false;
772*5113495bSYour Name 
773*5113495bSYour Name 
774*5113495bSYour Name 			TX_SCHED_DEBUG_PRINT(
775*5113495bSYour Name 				"Remove one peer from limitq, peer_id %d, cur peer num %d",
776*5113495bSYour Name 				peer_id,
777*5113495bSYour Name 				pdev->tx_peer_bal.peer_num);
778*5113495bSYour Name 			break;
779*5113495bSYour Name 		}
780*5113495bSYour Name 	}
781*5113495bSYour Name 
782*5113495bSYour Name 	/* Only stop the timer if no peer in limit state */
783*5113495bSYour Name 	if (pdev->tx_peer_bal.peer_num == 0) {
784*5113495bSYour Name 		qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
785*5113495bSYour Name 		pdev->tx_peer_bal.peer_bal_timer_state =
786*5113495bSYour Name 				ol_tx_peer_bal_timer_inactive;
787*5113495bSYour Name 	}
788*5113495bSYour Name }
789*5113495bSYour Name 
790*5113495bSYour Name void
ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)791*5113495bSYour Name ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
792*5113495bSYour Name {
793*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
794*5113495bSYour Name 
795*5113495bSYour Name 	/* TO DO: log the queue pause */
796*5113495bSYour Name 
797*5113495bSYour Name 	/* acquire the mutex lock, since we'll be modifying the queues */
798*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
799*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
800*5113495bSYour Name 
801*5113495bSYour Name 	ol_txrx_peer_pause_but_no_mgmt_q_base(pdev, peer);
802*5113495bSYour Name 
803*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
804*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
805*5113495bSYour Name }
806*5113495bSYour Name 
807*5113495bSYour Name void
ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)808*5113495bSYour Name ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
809*5113495bSYour Name {
810*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
811*5113495bSYour Name 
812*5113495bSYour Name 	/* TO DO: log the queue pause */
813*5113495bSYour Name 
814*5113495bSYour Name 	/* acquire the mutex lock, since we'll be modifying the queues */
815*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Enter");
816*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
817*5113495bSYour Name 
818*5113495bSYour Name 	ol_txrx_peer_unpause_but_no_mgmt_q_base(pdev, peer);
819*5113495bSYour Name 
820*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
821*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT("Leave");
822*5113495bSYour Name }
823*5113495bSYour Name 
824*5113495bSYour Name u_int16_t
ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t * txq,u_int16_t max_frames,u_int16_t * tx_limit_flag)825*5113495bSYour Name ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
826*5113495bSYour Name 			     u_int16_t max_frames,
827*5113495bSYour Name 			     u_int16_t *tx_limit_flag)
828*5113495bSYour Name {
829*5113495bSYour Name 	if (txq && (txq->peer) && (txq->peer->tx_limit_flag) &&
830*5113495bSYour Name 	    (txq->peer->tx_limit < max_frames)) {
831*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT(
832*5113495bSYour Name 			"Peer ID %d goes to limit, threshold is %d",
833*5113495bSYour Name 			txq->peer->peer_ids[0], txq->peer->tx_limit);
834*5113495bSYour Name 		*tx_limit_flag = 1;
835*5113495bSYour Name 		return txq->peer->tx_limit;
836*5113495bSYour Name 	} else {
837*5113495bSYour Name 		return max_frames;
838*5113495bSYour Name 	}
839*5113495bSYour Name }
840*5113495bSYour Name 
841*5113495bSYour Name void
ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int16_t frames,u_int16_t tx_limit_flag)842*5113495bSYour Name ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
843*5113495bSYour Name 			       struct ol_tx_frms_queue_t *txq,
844*5113495bSYour Name 			       u_int16_t frames,
845*5113495bSYour Name 			       u_int16_t tx_limit_flag)
846*5113495bSYour Name {
847*5113495bSYour Name 	if (unlikely(!pdev)) {
848*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler");
849*5113495bSYour Name 		return;
850*5113495bSYour Name 	}
851*5113495bSYour Name 
852*5113495bSYour Name 	if (unlikely(!txq)) {
853*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL txq");
854*5113495bSYour Name 		return;
855*5113495bSYour Name 	}
856*5113495bSYour Name 
857*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
858*5113495bSYour Name 	if (tx_limit_flag && (txq->peer) &&
859*5113495bSYour Name 	    (txq->peer->tx_limit_flag)) {
860*5113495bSYour Name 		if (txq->peer->tx_limit < frames)
861*5113495bSYour Name 			txq->peer->tx_limit = 0;
862*5113495bSYour Name 		else
863*5113495bSYour Name 			txq->peer->tx_limit -= frames;
864*5113495bSYour Name 
865*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS(
866*5113495bSYour Name 				"Peer ID %d in limit, deque %d frms",
867*5113495bSYour Name 				txq->peer->peer_ids[0], frames);
868*5113495bSYour Name 	} else if (txq->peer) {
869*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT("Download peer_id %d, num_frames %d",
870*5113495bSYour Name 				     txq->peer->peer_ids[0], frames);
871*5113495bSYour Name 	}
872*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
873*5113495bSYour Name }
874*5113495bSYour Name 
875*5113495bSYour Name void
ol_txrx_bad_peer_txctl_set_setting(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int enable,int period,int txq_limit)876*5113495bSYour Name ol_txrx_bad_peer_txctl_set_setting(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
877*5113495bSYour Name 				   int enable, int period, int txq_limit)
878*5113495bSYour Name {
879*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
880*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
881*5113495bSYour Name 
882*5113495bSYour Name 	if (enable)
883*5113495bSYour Name 		pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable;
884*5113495bSYour Name 	else
885*5113495bSYour Name 		pdev->tx_peer_bal.enabled = ol_tx_peer_bal_disable;
886*5113495bSYour Name 
887*5113495bSYour Name 	/* Set the current settingl */
888*5113495bSYour Name 	pdev->tx_peer_bal.peer_bal_period_ms = period;
889*5113495bSYour Name 	pdev->tx_peer_bal.peer_bal_txq_limit = txq_limit;
890*5113495bSYour Name }
891*5113495bSYour Name 
892*5113495bSYour Name void
ol_txrx_bad_peer_txctl_update_threshold(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int level,int tput_thresh,int tx_limit)893*5113495bSYour Name ol_txrx_bad_peer_txctl_update_threshold(struct cdp_soc_t *soc_hdl,
894*5113495bSYour Name 					uint8_t pdev_id, int level,
895*5113495bSYour Name 					int tput_thresh, int tx_limit)
896*5113495bSYour Name {
897*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
898*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
899*5113495bSYour Name 
900*5113495bSYour Name 	/* Set the current settingl */
901*5113495bSYour Name 	pdev->tx_peer_bal.ctl_thresh[level].tput_thresh =
902*5113495bSYour Name 		tput_thresh;
903*5113495bSYour Name 	pdev->tx_peer_bal.ctl_thresh[level].tx_limit =
904*5113495bSYour Name 		tx_limit;
905*5113495bSYour Name }
906*5113495bSYour Name 
907*5113495bSYour Name /**
908*5113495bSYour Name  * ol_tx_pdev_peer_bal_timer() - timer function
909*5113495bSYour Name  * @context: context of timer function
910*5113495bSYour Name  *
911*5113495bSYour Name  * Return: None
912*5113495bSYour Name  */
913*5113495bSYour Name static void
ol_tx_pdev_peer_bal_timer(void * context)914*5113495bSYour Name ol_tx_pdev_peer_bal_timer(void *context)
915*5113495bSYour Name {
916*5113495bSYour Name 	int i;
917*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
918*5113495bSYour Name 
919*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
920*5113495bSYour Name 
921*5113495bSYour Name 	for (i = 0; i < pdev->tx_peer_bal.peer_num; i++) {
922*5113495bSYour Name 		if (pdev->tx_peer_bal.limit_list[i].limit_flag) {
923*5113495bSYour Name 			u_int16_t peer_id =
924*5113495bSYour Name 				pdev->tx_peer_bal.limit_list[i].peer_id;
925*5113495bSYour Name 			u_int16_t tx_limit =
926*5113495bSYour Name 				pdev->tx_peer_bal.limit_list[i].limit;
927*5113495bSYour Name 
928*5113495bSYour Name 			struct ol_txrx_peer_t *peer = NULL;
929*5113495bSYour Name 
930*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
931*5113495bSYour Name 			TX_SCHED_DEBUG_PRINT(
932*5113495bSYour Name 				"peer_id %d  peer = 0x%x tx limit %d",
933*5113495bSYour Name 				peer_id,
934*5113495bSYour Name 				(int)peer, tx_limit);
935*5113495bSYour Name 
936*5113495bSYour Name 			/*
937*5113495bSYour Name 			 * It is possible the peer limit is still not 0,
938*5113495bSYour Name 			 * but it is the scenario should not be cared
939*5113495bSYour Name 			 */
940*5113495bSYour Name 			if (peer) {
941*5113495bSYour Name 				peer->tx_limit = tx_limit;
942*5113495bSYour Name 			} else {
943*5113495bSYour Name 				ol_txrx_peer_bal_remove_limit_peer(pdev,
944*5113495bSYour Name 								   peer_id);
945*5113495bSYour Name 				TX_SCHED_DEBUG_PRINT_ALWAYS(
946*5113495bSYour Name 					"No such a peer, peer id = %d",
947*5113495bSYour Name 					peer_id);
948*5113495bSYour Name 			}
949*5113495bSYour Name 		}
950*5113495bSYour Name 	}
951*5113495bSYour Name 
952*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
953*5113495bSYour Name 
954*5113495bSYour Name 	if (pdev->tx_peer_bal.peer_num) {
955*5113495bSYour Name 		ol_tx_sched(pdev);
956*5113495bSYour Name 		qdf_timer_start(&pdev->tx_peer_bal.peer_bal_timer,
957*5113495bSYour Name 					pdev->tx_peer_bal.peer_bal_period_ms);
958*5113495bSYour Name 	}
959*5113495bSYour Name }
960*5113495bSYour Name 
961*5113495bSYour Name void
ol_txrx_set_txq_peer(struct ol_tx_frms_queue_t * txq,struct ol_txrx_peer_t * peer)962*5113495bSYour Name ol_txrx_set_txq_peer(
963*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
964*5113495bSYour Name 	struct ol_txrx_peer_t *peer)
965*5113495bSYour Name {
966*5113495bSYour Name 	if (txq)
967*5113495bSYour Name 		txq->peer = peer;
968*5113495bSYour Name }
969*5113495bSYour Name 
ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t * pdev)970*5113495bSYour Name void ol_tx_badpeer_flow_cl_init(struct ol_txrx_pdev_t *pdev)
971*5113495bSYour Name {
972*5113495bSYour Name 	u_int32_t timer_period;
973*5113495bSYour Name 
974*5113495bSYour Name 	qdf_spinlock_create(&pdev->tx_peer_bal.mutex);
975*5113495bSYour Name 	pdev->tx_peer_bal.peer_num = 0;
976*5113495bSYour Name 	pdev->tx_peer_bal.peer_bal_timer_state
977*5113495bSYour Name 		= ol_tx_peer_bal_timer_inactive;
978*5113495bSYour Name 
979*5113495bSYour Name 	timer_period = 2000;
980*5113495bSYour Name 	pdev->tx_peer_bal.peer_bal_period_ms = timer_period;
981*5113495bSYour Name 
982*5113495bSYour Name 	qdf_timer_init(
983*5113495bSYour Name 			pdev->osdev,
984*5113495bSYour Name 			&pdev->tx_peer_bal.peer_bal_timer,
985*5113495bSYour Name 			ol_tx_pdev_peer_bal_timer,
986*5113495bSYour Name 			pdev, QDF_TIMER_TYPE_SW);
987*5113495bSYour Name }
988*5113495bSYour Name 
ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t * pdev)989*5113495bSYour Name void ol_tx_badpeer_flow_cl_deinit(struct ol_txrx_pdev_t *pdev)
990*5113495bSYour Name {
991*5113495bSYour Name 	qdf_timer_stop(&pdev->tx_peer_bal.peer_bal_timer);
992*5113495bSYour Name 	pdev->tx_peer_bal.peer_bal_timer_state =
993*5113495bSYour Name 					ol_tx_peer_bal_timer_inactive;
994*5113495bSYour Name 	qdf_timer_free(&pdev->tx_peer_bal.peer_bal_timer);
995*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->tx_peer_bal.mutex);
996*5113495bSYour Name }
997*5113495bSYour Name 
998*5113495bSYour Name void
ol_txrx_peer_link_status_handler(ol_txrx_pdev_handle pdev,u_int16_t peer_num,struct rate_report_t * peer_link_status)999*5113495bSYour Name ol_txrx_peer_link_status_handler(
1000*5113495bSYour Name 	ol_txrx_pdev_handle pdev,
1001*5113495bSYour Name 	u_int16_t peer_num,
1002*5113495bSYour Name 	struct rate_report_t *peer_link_status)
1003*5113495bSYour Name {
1004*5113495bSYour Name 	u_int16_t i = 0;
1005*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
1006*5113495bSYour Name 
1007*5113495bSYour Name 	if (!pdev) {
1008*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler");
1009*5113495bSYour Name 		return;
1010*5113495bSYour Name 	}
1011*5113495bSYour Name 
1012*5113495bSYour Name 	if (!peer_link_status) {
1013*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS(
1014*5113495bSYour Name 			"Error:NULL link report message. peer num %d",
1015*5113495bSYour Name 			peer_num);
1016*5113495bSYour Name 		return;
1017*5113495bSYour Name 	}
1018*5113495bSYour Name 
1019*5113495bSYour Name 	/* Check if bad peer tx flow CL is enabled */
1020*5113495bSYour Name 	if (pdev->tx_peer_bal.enabled != ol_tx_peer_bal_enable) {
1021*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS(
1022*5113495bSYour Name 			"Bad peer tx flow CL is not enabled, ignore it");
1023*5113495bSYour Name 		return;
1024*5113495bSYour Name 	}
1025*5113495bSYour Name 
1026*5113495bSYour Name 	/* Check peer_num is reasonable */
1027*5113495bSYour Name 	if (peer_num > MAX_NO_PEERS_IN_LIMIT) {
1028*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT_ALWAYS("Bad peer_num %d", peer_num);
1029*5113495bSYour Name 		return;
1030*5113495bSYour Name 	}
1031*5113495bSYour Name 
1032*5113495bSYour Name 	TX_SCHED_DEBUG_PRINT_ALWAYS("peer_num %d", peer_num);
1033*5113495bSYour Name 
1034*5113495bSYour Name 	for (i = 0; i < peer_num; i++) {
1035*5113495bSYour Name 		u_int16_t peer_limit, peer_id;
1036*5113495bSYour Name 		u_int16_t pause_flag, unpause_flag;
1037*5113495bSYour Name 		u_int32_t peer_phy, peer_tput;
1038*5113495bSYour Name 
1039*5113495bSYour Name 		peer_id = peer_link_status->id;
1040*5113495bSYour Name 		peer_phy = peer_link_status->phy;
1041*5113495bSYour Name 		peer_tput = peer_link_status->rate;
1042*5113495bSYour Name 
1043*5113495bSYour Name 		TX_SCHED_DEBUG_PRINT("peer id %d tput %d phy %d",
1044*5113495bSYour Name 				     peer_id, peer_tput, peer_phy);
1045*5113495bSYour Name 
1046*5113495bSYour Name 		/* Sanity check for the PHY mode value */
1047*5113495bSYour Name 		if (peer_phy > TXRX_IEEE11_AC) {
1048*5113495bSYour Name 			TX_SCHED_DEBUG_PRINT_ALWAYS(
1049*5113495bSYour Name 				"PHY value is illegal: %d, and the peer_id %d",
1050*5113495bSYour Name 				peer_link_status->phy, peer_id);
1051*5113495bSYour Name 			continue;
1052*5113495bSYour Name 		}
1053*5113495bSYour Name 		pause_flag   = false;
1054*5113495bSYour Name 		unpause_flag = false;
1055*5113495bSYour Name 		peer_limit   = 0;
1056*5113495bSYour Name 
1057*5113495bSYour Name 		/* From now on, PHY, PER info should be all fine */
1058*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->tx_peer_bal.mutex);
1059*5113495bSYour Name 
1060*5113495bSYour Name 		/* Update link status analysis for each peer */
1061*5113495bSYour Name 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
1062*5113495bSYour Name 		if (peer) {
1063*5113495bSYour Name 			u_int32_t thresh, limit, phy;
1064*5113495bSYour Name 
1065*5113495bSYour Name 			phy = peer_link_status->phy;
1066*5113495bSYour Name 			thresh = pdev->tx_peer_bal.ctl_thresh[phy].tput_thresh;
1067*5113495bSYour Name 			limit = pdev->tx_peer_bal.ctl_thresh[phy].tx_limit;
1068*5113495bSYour Name 
1069*5113495bSYour Name 			if (((peer->tx_pause_flag) || (peer->tx_limit_flag)) &&
1070*5113495bSYour Name 			    (peer_tput) && (peer_tput < thresh))
1071*5113495bSYour Name 				peer_limit = limit;
1072*5113495bSYour Name 
1073*5113495bSYour Name 			if (peer_limit) {
1074*5113495bSYour Name 				ol_txrx_peer_bal_add_limit_peer(pdev, peer_id,
1075*5113495bSYour Name 								peer_limit);
1076*5113495bSYour Name 			} else if (pdev->tx_peer_bal.peer_num) {
1077*5113495bSYour Name 				TX_SCHED_DEBUG_PRINT(
1078*5113495bSYour Name 					"Check if peer_id %d exit limit",
1079*5113495bSYour Name 					peer_id);
1080*5113495bSYour Name 				ol_txrx_peer_bal_remove_limit_peer(pdev,
1081*5113495bSYour Name 								   peer_id);
1082*5113495bSYour Name 			}
1083*5113495bSYour Name 			if ((peer_tput == 0) &&
1084*5113495bSYour Name 			    (peer->tx_pause_flag == false)) {
1085*5113495bSYour Name 				peer->tx_pause_flag = true;
1086*5113495bSYour Name 				pause_flag = true;
1087*5113495bSYour Name 			} else if (peer->tx_pause_flag) {
1088*5113495bSYour Name 				unpause_flag = true;
1089*5113495bSYour Name 				peer->tx_pause_flag = false;
1090*5113495bSYour Name 			}
1091*5113495bSYour Name 		} else {
1092*5113495bSYour Name 			TX_SCHED_DEBUG_PRINT(
1093*5113495bSYour Name 				"Remove peer_id %d from limit list", peer_id);
1094*5113495bSYour Name 			ol_txrx_peer_bal_remove_limit_peer(pdev, peer_id);
1095*5113495bSYour Name 		}
1096*5113495bSYour Name 
1097*5113495bSYour Name 		peer_link_status++;
1098*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
1099*5113495bSYour Name 		if (pause_flag)
1100*5113495bSYour Name 			ol_txrx_peer_pause_but_no_mgmt_q(peer);
1101*5113495bSYour Name 		else if (unpause_flag)
1102*5113495bSYour Name 			ol_txrx_peer_unpause_but_no_mgmt_q(peer);
1103*5113495bSYour Name 	}
1104*5113495bSYour Name }
1105*5113495bSYour Name #endif /* QCA_BAD_PEER_TX_FLOW_CL */
1106*5113495bSYour Name 
1107*5113495bSYour Name /*--- ADDBA triggering functions --------------------------------------------*/
1108*5113495bSYour Name 
1109*5113495bSYour Name 
1110*5113495bSYour Name /*=== debug functions =======================================================*/
1111*5113495bSYour Name 
1112*5113495bSYour Name /*--- queue event log -------------------------------------------------------*/
1113*5113495bSYour Name 
1114*5113495bSYour Name #if defined(DEBUG_HL_LOGGING)
1115*5113495bSYour Name 
1116*5113495bSYour Name #define negative_sign -1
1117*5113495bSYour Name 
1118*5113495bSYour Name /**
1119*5113495bSYour Name  * ol_tx_queue_log_entry_type_info() - log queues entry info
1120*5113495bSYour Name  * @type: log entry type
1121*5113495bSYour Name  * @size: size
1122*5113495bSYour Name  * @align: alignment
1123*5113495bSYour Name  * @var_size: variable size record
1124*5113495bSYour Name  *
1125*5113495bSYour Name  * Return: None
1126*5113495bSYour Name  */
1127*5113495bSYour Name static void
ol_tx_queue_log_entry_type_info(u_int8_t * type,int * size,int * align,int var_size)1128*5113495bSYour Name ol_tx_queue_log_entry_type_info(
1129*5113495bSYour Name 	u_int8_t *type, int *size, int *align, int var_size)
1130*5113495bSYour Name {
1131*5113495bSYour Name 	switch (*type) {
1132*5113495bSYour Name 	case ol_tx_log_entry_type_enqueue:
1133*5113495bSYour Name 	case ol_tx_log_entry_type_dequeue:
1134*5113495bSYour Name 	case ol_tx_log_entry_type_queue_free:
1135*5113495bSYour Name 		*size = sizeof(struct ol_tx_log_queue_add_t);
1136*5113495bSYour Name 		*align = 2;
1137*5113495bSYour Name 		break;
1138*5113495bSYour Name 
1139*5113495bSYour Name 	case ol_tx_log_entry_type_queue_state:
1140*5113495bSYour Name 		*size = offsetof(struct ol_tx_log_queue_state_var_sz_t, data);
1141*5113495bSYour Name 		*align = 4;
1142*5113495bSYour Name 		if (var_size) {
1143*5113495bSYour Name 			/* read the variable-sized record,
1144*5113495bSYour Name 			 * to see how large it is
1145*5113495bSYour Name 			 */
1146*5113495bSYour Name 			int align_pad;
1147*5113495bSYour Name 			struct ol_tx_log_queue_state_var_sz_t *record;
1148*5113495bSYour Name 
1149*5113495bSYour Name 			align_pad =
1150*5113495bSYour Name 			(*align - (uint32_t)(((unsigned long) type) + 1))
1151*5113495bSYour Name 							& (*align - 1);
1152*5113495bSYour Name 			record = (struct ol_tx_log_queue_state_var_sz_t *)
1153*5113495bSYour Name 				(type + 1 + align_pad);
1154*5113495bSYour Name 			*size += record->num_cats_active *
1155*5113495bSYour Name 				(sizeof(u_int32_t) /* bytes */ +
1156*5113495bSYour Name 					sizeof(u_int16_t) /* frms */);
1157*5113495bSYour Name 		}
1158*5113495bSYour Name 		break;
1159*5113495bSYour Name 
1160*5113495bSYour Name 	/*case ol_tx_log_entry_type_drop:*/
1161*5113495bSYour Name 	default:
1162*5113495bSYour Name 		*size = 0;
1163*5113495bSYour Name 		*align = 0;
1164*5113495bSYour Name 	};
1165*5113495bSYour Name }
1166*5113495bSYour Name 
1167*5113495bSYour Name /**
1168*5113495bSYour Name  * ol_tx_queue_log_oldest_update() - log oldest record
1169*5113495bSYour Name  * @pdev: pointer to txrx handle
1170*5113495bSYour Name  * @offset: offset value
1171*5113495bSYour Name  *
1172*5113495bSYour Name  * Return: None
1173*5113495bSYour Name  */
1174*5113495bSYour Name static void
ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t * pdev,int offset)1175*5113495bSYour Name ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t *pdev, int offset)
1176*5113495bSYour Name {
1177*5113495bSYour Name 	int oldest_record_offset;
1178*5113495bSYour Name 
1179*5113495bSYour Name 	/*
1180*5113495bSYour Name 	 * If the offset of the oldest record is between the current and
1181*5113495bSYour Name 	 * new values of the offset of the newest record, then the oldest
1182*5113495bSYour Name 	 * record has to be dropped from the log to provide room for the
1183*5113495bSYour Name 	 * newest record.
1184*5113495bSYour Name 	 * Advance the offset of the oldest record until it points to a
1185*5113495bSYour Name 	 * record that is beyond the new value of the offset of the newest
1186*5113495bSYour Name 	 * record.
1187*5113495bSYour Name 	 */
1188*5113495bSYour Name 	if (!pdev->txq_log.wrapped)
1189*5113495bSYour Name 		/*
1190*5113495bSYour Name 		 * The log has not even filled up yet - no need to remove
1191*5113495bSYour Name 		 * the oldest record to make room for a new record.
1192*5113495bSYour Name 		 */
1193*5113495bSYour Name 		return;
1194*5113495bSYour Name 
1195*5113495bSYour Name 
1196*5113495bSYour Name 	if (offset > pdev->txq_log.offset) {
1197*5113495bSYour Name 		/*
1198*5113495bSYour Name 		 * not wraparound -
1199*5113495bSYour Name 		 * The oldest record offset may have already wrapped around,
1200*5113495bSYour Name 		 * even if the newest record has not.  In this case, then
1201*5113495bSYour Name 		 * the oldest record offset is fine where it is.
1202*5113495bSYour Name 		 */
1203*5113495bSYour Name 		if (pdev->txq_log.oldest_record_offset == 0)
1204*5113495bSYour Name 			return;
1205*5113495bSYour Name 
1206*5113495bSYour Name 		oldest_record_offset = pdev->txq_log.oldest_record_offset;
1207*5113495bSYour Name 	} else
1208*5113495bSYour Name 		/* wraparound */
1209*5113495bSYour Name 		oldest_record_offset = 0;
1210*5113495bSYour Name 
1211*5113495bSYour Name 
1212*5113495bSYour Name 	while (oldest_record_offset < offset) {
1213*5113495bSYour Name 		int size, align, align_pad;
1214*5113495bSYour Name 		u_int8_t type;
1215*5113495bSYour Name 
1216*5113495bSYour Name 		type = pdev->txq_log.data[oldest_record_offset];
1217*5113495bSYour Name 		if (type == ol_tx_log_entry_type_wrap) {
1218*5113495bSYour Name 			oldest_record_offset = 0;
1219*5113495bSYour Name 			break;
1220*5113495bSYour Name 		}
1221*5113495bSYour Name 		ol_tx_queue_log_entry_type_info(
1222*5113495bSYour Name 				&pdev->txq_log.data[oldest_record_offset],
1223*5113495bSYour Name 				&size, &align, 1);
1224*5113495bSYour Name 		align_pad =
1225*5113495bSYour Name 			(align - ((oldest_record_offset + 1/*type*/)))
1226*5113495bSYour Name 							& (align - 1);
1227*5113495bSYour Name 		/*
1228*5113495bSYour Name 		 * QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1229*5113495bSYour Name 		 * "TXQ LOG old alloc: offset %d, type %d, size %d (%d)\n",
1230*5113495bSYour Name 		 * oldest_record_offset, type, size, size + 1 + align_pad);
1231*5113495bSYour Name 		 */
1232*5113495bSYour Name 		oldest_record_offset += size + 1 + align_pad;
1233*5113495bSYour Name 	}
1234*5113495bSYour Name 	if (oldest_record_offset >= pdev->txq_log.size)
1235*5113495bSYour Name 		oldest_record_offset = 0;
1236*5113495bSYour Name 
1237*5113495bSYour Name 	pdev->txq_log.oldest_record_offset = oldest_record_offset;
1238*5113495bSYour Name }
1239*5113495bSYour Name 
1240*5113495bSYour Name /**
1241*5113495bSYour Name  * ol_tx_queue_log_alloc() - log data allocation
1242*5113495bSYour Name  * @pdev: physical device object
1243*5113495bSYour Name  * @type: ol_tx_log_entry_type
1244*5113495bSYour Name  * @extra_bytes: extra bytes
1245*5113495bSYour Name  *
1246*5113495bSYour Name  *
1247*5113495bSYour Name  * Return: log element
1248*5113495bSYour Name  */
1249*5113495bSYour Name static void *
ol_tx_queue_log_alloc(struct ol_txrx_pdev_t * pdev,u_int8_t type,int extra_bytes)1250*5113495bSYour Name ol_tx_queue_log_alloc(
1251*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1252*5113495bSYour Name 	u_int8_t type /* ol_tx_log_entry_type */,
1253*5113495bSYour Name 	int extra_bytes)
1254*5113495bSYour Name {
1255*5113495bSYour Name 	int size, align, align_pad;
1256*5113495bSYour Name 	int offset;
1257*5113495bSYour Name 
1258*5113495bSYour Name 	ol_tx_queue_log_entry_type_info(&type, &size, &align, 0);
1259*5113495bSYour Name 	size += extra_bytes;
1260*5113495bSYour Name 
1261*5113495bSYour Name 	offset = pdev->txq_log.offset;
1262*5113495bSYour Name 	align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1263*5113495bSYour Name 
1264*5113495bSYour Name 	if (pdev->txq_log.size - offset >= size + 1 + align_pad)
1265*5113495bSYour Name 		/* no need to wrap around */
1266*5113495bSYour Name 		goto alloc_found;
1267*5113495bSYour Name 
1268*5113495bSYour Name 	if (!pdev->txq_log.allow_wrap)
1269*5113495bSYour Name 		return NULL; /* log is full and can't wrap */
1270*5113495bSYour Name 
1271*5113495bSYour Name 	/* handle wrap-around */
1272*5113495bSYour Name 	pdev->txq_log.wrapped = 1;
1273*5113495bSYour Name 	offset = 0;
1274*5113495bSYour Name 	align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1275*5113495bSYour Name 	/* sanity check that the log is large enough to hold this entry */
1276*5113495bSYour Name 	if (pdev->txq_log.size <= size + 1 + align_pad)
1277*5113495bSYour Name 		return NULL;
1278*5113495bSYour Name 
1279*5113495bSYour Name 
1280*5113495bSYour Name alloc_found:
1281*5113495bSYour Name 	ol_tx_queue_log_oldest_update(pdev, offset + size + 1 + align_pad);
1282*5113495bSYour Name 	if (offset == 0)
1283*5113495bSYour Name 		pdev->txq_log.data[pdev->txq_log.offset] =
1284*5113495bSYour Name 						ol_tx_log_entry_type_wrap;
1285*5113495bSYour Name 
1286*5113495bSYour Name 	/*
1287*5113495bSYour Name 	 * QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1288*5113495bSYour Name 	 * "TXQ LOG new alloc: offset %d, type %d, size %d (%d)\n",
1289*5113495bSYour Name 	 * offset, type, size, size + 1 + align_pad);
1290*5113495bSYour Name 	 */
1291*5113495bSYour Name 	pdev->txq_log.data[offset] = type;
1292*5113495bSYour Name 	pdev->txq_log.offset = offset + size + 1 + align_pad;
1293*5113495bSYour Name 	if (pdev->txq_log.offset >= pdev->txq_log.size) {
1294*5113495bSYour Name 		pdev->txq_log.offset = 0;
1295*5113495bSYour Name 		pdev->txq_log.wrapped = 1;
1296*5113495bSYour Name 	}
1297*5113495bSYour Name 	return &pdev->txq_log.data[offset + 1 + align_pad];
1298*5113495bSYour Name }
1299*5113495bSYour Name 
1300*5113495bSYour Name /**
1301*5113495bSYour Name  * ol_tx_queue_log_record_display() - show log record of tx queue
1302*5113495bSYour Name  * @pdev: pointer to txrx handle
1303*5113495bSYour Name  * @offset: offset value
1304*5113495bSYour Name  *
1305*5113495bSYour Name  * Return: size of record
1306*5113495bSYour Name  */
1307*5113495bSYour Name static int
ol_tx_queue_log_record_display(struct ol_txrx_pdev_t * pdev,int offset)1308*5113495bSYour Name ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
1309*5113495bSYour Name {
1310*5113495bSYour Name 	int size, align, align_pad;
1311*5113495bSYour Name 	u_int8_t type;
1312*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
1313*5113495bSYour Name 
1314*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1315*5113495bSYour Name 	type = pdev->txq_log.data[offset];
1316*5113495bSYour Name 	ol_tx_queue_log_entry_type_info(
1317*5113495bSYour Name 			&pdev->txq_log.data[offset], &size, &align, 1);
1318*5113495bSYour Name 	align_pad = (align - ((offset + 1/*type*/))) & (align - 1);
1319*5113495bSYour Name 
1320*5113495bSYour Name 	switch (type) {
1321*5113495bSYour Name 	case ol_tx_log_entry_type_enqueue:
1322*5113495bSYour Name 	{
1323*5113495bSYour Name 		struct ol_tx_log_queue_add_t record;
1324*5113495bSYour Name 
1325*5113495bSYour Name 		qdf_mem_copy(&record,
1326*5113495bSYour Name 			     &pdev->txq_log.data[offset + 1 + align_pad],
1327*5113495bSYour Name 			     sizeof(struct ol_tx_log_queue_add_t));
1328*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1329*5113495bSYour Name 
1330*5113495bSYour Name 		if (record.peer_id != 0xffff) {
1331*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev,
1332*5113495bSYour Name 						       record.peer_id);
1333*5113495bSYour Name 			if (peer)
1334*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1335*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1336*5113495bSYour Name 					  "Q: %6d  %5d  %3d  %4d ("QDF_MAC_ADDR_FMT")",
1337*5113495bSYour Name 					  record.num_frms, record.num_bytes,
1338*5113495bSYour Name 					  record.tid,
1339*5113495bSYour Name 					  record.peer_id,
1340*5113495bSYour Name 					  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1341*5113495bSYour Name 			else
1342*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1343*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1344*5113495bSYour Name 					  "Q: %6d  %5d  %3d  %4d",
1345*5113495bSYour Name 					  record.num_frms, record.num_bytes,
1346*5113495bSYour Name 					  record.tid, record.peer_id);
1347*5113495bSYour Name 		} else {
1348*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1349*5113495bSYour Name 				  QDF_TRACE_LEVEL_INFO,
1350*5113495bSYour Name 				  "Q: %6d  %5d  %3d  from vdev",
1351*5113495bSYour Name 				  record.num_frms, record.num_bytes,
1352*5113495bSYour Name 				  record.tid);
1353*5113495bSYour Name 		}
1354*5113495bSYour Name 		break;
1355*5113495bSYour Name 	}
1356*5113495bSYour Name 	case ol_tx_log_entry_type_dequeue:
1357*5113495bSYour Name 	{
1358*5113495bSYour Name 		struct ol_tx_log_queue_add_t record;
1359*5113495bSYour Name 
1360*5113495bSYour Name 		qdf_mem_copy(&record,
1361*5113495bSYour Name 			     &pdev->txq_log.data[offset + 1 + align_pad],
1362*5113495bSYour Name 			     sizeof(struct ol_tx_log_queue_add_t));
1363*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1364*5113495bSYour Name 
1365*5113495bSYour Name 		if (record.peer_id != 0xffff) {
1366*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
1367*5113495bSYour Name 			if (peer)
1368*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1369*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1370*5113495bSYour Name 					  "DQ: %6d  %5d  %3d  %4d ("QDF_MAC_ADDR_FMT")",
1371*5113495bSYour Name 					  record.num_frms, record.num_bytes,
1372*5113495bSYour Name 					  record.tid,
1373*5113495bSYour Name 					  record.peer_id,
1374*5113495bSYour Name 					  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1375*5113495bSYour Name 			else
1376*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1377*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1378*5113495bSYour Name 					  "DQ: %6d  %5d  %3d  %4d",
1379*5113495bSYour Name 					  record.num_frms, record.num_bytes,
1380*5113495bSYour Name 					  record.tid, record.peer_id);
1381*5113495bSYour Name 		} else {
1382*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1383*5113495bSYour Name 				  QDF_TRACE_LEVEL_INFO,
1384*5113495bSYour Name 				  "DQ: %6d  %5d  %3d  from vdev",
1385*5113495bSYour Name 				  record.num_frms, record.num_bytes,
1386*5113495bSYour Name 				  record.tid);
1387*5113495bSYour Name 		}
1388*5113495bSYour Name 		break;
1389*5113495bSYour Name 	}
1390*5113495bSYour Name 	case ol_tx_log_entry_type_queue_free:
1391*5113495bSYour Name 	{
1392*5113495bSYour Name 		struct ol_tx_log_queue_add_t record;
1393*5113495bSYour Name 
1394*5113495bSYour Name 		qdf_mem_copy(&record,
1395*5113495bSYour Name 			     &pdev->txq_log.data[offset + 1 + align_pad],
1396*5113495bSYour Name 			     sizeof(struct ol_tx_log_queue_add_t));
1397*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1398*5113495bSYour Name 
1399*5113495bSYour Name 		if (record.peer_id != 0xffff) {
1400*5113495bSYour Name 			peer = ol_txrx_peer_find_by_id(pdev, record.peer_id);
1401*5113495bSYour Name 			if (peer)
1402*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1403*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1404*5113495bSYour Name 					  "F: %6d  %5d  %3d  %4d ("QDF_MAC_ADDR_FMT")",
1405*5113495bSYour Name 					  record.num_frms, record.num_bytes,
1406*5113495bSYour Name 					  record.tid,
1407*5113495bSYour Name 					  record.peer_id,
1408*5113495bSYour Name 					  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1409*5113495bSYour Name 			else
1410*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1411*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1412*5113495bSYour Name 					  "F: %6d  %5d  %3d  %4d",
1413*5113495bSYour Name 					  record.num_frms, record.num_bytes,
1414*5113495bSYour Name 					  record.tid, record.peer_id);
1415*5113495bSYour Name 		} else {
1416*5113495bSYour Name 			/* shouldn't happen */
1417*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1418*5113495bSYour Name 				  QDF_TRACE_LEVEL_INFO,
1419*5113495bSYour Name 				  "Unexpected vdev queue removal\n");
1420*5113495bSYour Name 		}
1421*5113495bSYour Name 			break;
1422*5113495bSYour Name 	}
1423*5113495bSYour Name 
1424*5113495bSYour Name 	case ol_tx_log_entry_type_queue_state:
1425*5113495bSYour Name 	{
1426*5113495bSYour Name 		int i, j;
1427*5113495bSYour Name 		u_int32_t active_bitmap;
1428*5113495bSYour Name 		struct ol_tx_log_queue_state_var_sz_t record;
1429*5113495bSYour Name 		u_int8_t *data;
1430*5113495bSYour Name 
1431*5113495bSYour Name 		qdf_mem_copy(&record,
1432*5113495bSYour Name 			     &pdev->txq_log.data[offset + 1 + align_pad],
1433*5113495bSYour Name 			     sizeof(struct ol_tx_log_queue_state_var_sz_t));
1434*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1435*5113495bSYour Name 
1436*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1437*5113495bSYour Name 			  "S: bitmap = %#x",
1438*5113495bSYour Name 			  record.active_bitmap);
1439*5113495bSYour Name 		data = &record.data[0];
1440*5113495bSYour Name 		j = 0;
1441*5113495bSYour Name 		i = 0;
1442*5113495bSYour Name 		active_bitmap = record.active_bitmap;
1443*5113495bSYour Name 		while (active_bitmap) {
1444*5113495bSYour Name 			if (active_bitmap & 0x1) {
1445*5113495bSYour Name 				u_int16_t frms;
1446*5113495bSYour Name 				u_int32_t bytes;
1447*5113495bSYour Name 
1448*5113495bSYour Name 				frms = data[0] | (data[1] << 8);
1449*5113495bSYour Name 				bytes = (data[2] <<  0) | (data[3] <<  8) |
1450*5113495bSYour Name 					(data[4] << 16) | (data[5] << 24);
1451*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1452*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1453*5113495bSYour Name 					  "cat %2d: %6d  %5d",
1454*5113495bSYour Name 					  i, frms, bytes);
1455*5113495bSYour Name 				data += 6;
1456*5113495bSYour Name 				j++;
1457*5113495bSYour Name 			}
1458*5113495bSYour Name 			i++;
1459*5113495bSYour Name 			active_bitmap >>= 1;
1460*5113495bSYour Name 		}
1461*5113495bSYour Name 		break;
1462*5113495bSYour Name 	}
1463*5113495bSYour Name 
1464*5113495bSYour Name 	/*case ol_tx_log_entry_type_drop:*/
1465*5113495bSYour Name 
1466*5113495bSYour Name 	case ol_tx_log_entry_type_wrap:
1467*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1468*5113495bSYour Name 		return negative_sign * offset; /* go back to the top */
1469*5113495bSYour Name 
1470*5113495bSYour Name 	default:
1471*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1472*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1473*5113495bSYour Name 			  "*** invalid tx log entry type (%d)\n", type);
1474*5113495bSYour Name 		return 0; /* error */
1475*5113495bSYour Name 	};
1476*5113495bSYour Name 
1477*5113495bSYour Name 	return size + 1 + align_pad;
1478*5113495bSYour Name }
1479*5113495bSYour Name 
1480*5113495bSYour Name /**
1481*5113495bSYour Name  * ol_tx_queue_log_display() - show tx queue log
1482*5113495bSYour Name  * @pdev: pointer to txrx handle
1483*5113495bSYour Name  *
1484*5113495bSYour Name  * Return: None
1485*5113495bSYour Name  */
1486*5113495bSYour Name void
ol_tx_queue_log_display(struct ol_txrx_pdev_t * pdev)1487*5113495bSYour Name ol_tx_queue_log_display(struct ol_txrx_pdev_t *pdev)
1488*5113495bSYour Name {
1489*5113495bSYour Name 	int offset;
1490*5113495bSYour Name 	int unwrap;
1491*5113495bSYour Name 
1492*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1493*5113495bSYour Name 	offset = pdev->txq_log.oldest_record_offset;
1494*5113495bSYour Name 	unwrap = pdev->txq_log.wrapped;
1495*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1496*5113495bSYour Name 	/*
1497*5113495bSYour Name 	 * In theory, this should use mutex to guard against the offset
1498*5113495bSYour Name 	 * being changed while in use, but since this is just for debugging,
1499*5113495bSYour Name 	 * don't bother.
1500*5113495bSYour Name 	 */
1501*5113495bSYour Name 	txrx_nofl_info("Current target credit: %d",
1502*5113495bSYour Name 		       qdf_atomic_read(&pdev->target_tx_credit));
1503*5113495bSYour Name 	txrx_nofl_info("Tx queue log:");
1504*5113495bSYour Name 	txrx_nofl_info(": Frames  Bytes  TID  PEER");
1505*5113495bSYour Name 
1506*5113495bSYour Name 	while (unwrap || offset != pdev->txq_log.offset) {
1507*5113495bSYour Name 		int delta = ol_tx_queue_log_record_display(pdev, offset);
1508*5113495bSYour Name 
1509*5113495bSYour Name 		if (delta == 0)
1510*5113495bSYour Name 			return; /* error */
1511*5113495bSYour Name 
1512*5113495bSYour Name 		if (delta < 0)
1513*5113495bSYour Name 			unwrap = 0;
1514*5113495bSYour Name 
1515*5113495bSYour Name 		offset += delta;
1516*5113495bSYour Name 	}
1517*5113495bSYour Name }
1518*5113495bSYour Name 
1519*5113495bSYour Name void
ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t * pdev,struct ol_txrx_msdu_info_t * msdu_info,int frms,int bytes)1520*5113495bSYour Name ol_tx_queue_log_enqueue(
1521*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1522*5113495bSYour Name 	struct ol_txrx_msdu_info_t *msdu_info,
1523*5113495bSYour Name 	int frms, int bytes)
1524*5113495bSYour Name {
1525*5113495bSYour Name 	int tid;
1526*5113495bSYour Name 	u_int16_t peer_id = msdu_info->htt.info.peer_id;
1527*5113495bSYour Name 	struct ol_tx_log_queue_add_t *log_elem;
1528*5113495bSYour Name 
1529*5113495bSYour Name 	tid = msdu_info->htt.info.ext_tid;
1530*5113495bSYour Name 
1531*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1532*5113495bSYour Name 	log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_enqueue, 0);
1533*5113495bSYour Name 	if (!log_elem) {
1534*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1535*5113495bSYour Name 		return;
1536*5113495bSYour Name 	}
1537*5113495bSYour Name 
1538*5113495bSYour Name 	log_elem->num_frms = frms;
1539*5113495bSYour Name 	log_elem->num_bytes = bytes;
1540*5113495bSYour Name 	log_elem->peer_id = peer_id;
1541*5113495bSYour Name 	log_elem->tid = tid;
1542*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1543*5113495bSYour Name }
1544*5113495bSYour Name 
1545*5113495bSYour Name void
ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int frms,int bytes)1546*5113495bSYour Name ol_tx_queue_log_dequeue(
1547*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1548*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
1549*5113495bSYour Name 	int frms, int bytes)
1550*5113495bSYour Name {
1551*5113495bSYour Name 	int ext_tid;
1552*5113495bSYour Name 	u_int16_t peer_id;
1553*5113495bSYour Name 	struct ol_tx_log_queue_add_t *log_elem;
1554*5113495bSYour Name 
1555*5113495bSYour Name 	ext_tid = txq->ext_tid;
1556*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1557*5113495bSYour Name 	log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_dequeue, 0);
1558*5113495bSYour Name 	if (!log_elem) {
1559*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1560*5113495bSYour Name 		return;
1561*5113495bSYour Name 	}
1562*5113495bSYour Name 
1563*5113495bSYour Name 	if (ext_tid < OL_TX_NUM_TIDS) {
1564*5113495bSYour Name 		struct ol_txrx_peer_t *peer;
1565*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq_base;
1566*5113495bSYour Name 
1567*5113495bSYour Name 		txq_base = txq - ext_tid;
1568*5113495bSYour Name 		peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
1569*5113495bSYour Name 		peer_id = peer->peer_ids[0];
1570*5113495bSYour Name 	} else {
1571*5113495bSYour Name 		peer_id = ~0;
1572*5113495bSYour Name 	}
1573*5113495bSYour Name 
1574*5113495bSYour Name 	log_elem->num_frms = frms;
1575*5113495bSYour Name 	log_elem->num_bytes = bytes;
1576*5113495bSYour Name 	log_elem->peer_id = peer_id;
1577*5113495bSYour Name 	log_elem->tid = ext_tid;
1578*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1579*5113495bSYour Name }
1580*5113495bSYour Name 
1581*5113495bSYour Name void
ol_tx_queue_log_free(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int tid,int frms,int bytes,bool is_peer_txq)1582*5113495bSYour Name ol_tx_queue_log_free(
1583*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1584*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
1585*5113495bSYour Name 	int tid, int frms, int bytes, bool is_peer_txq)
1586*5113495bSYour Name {
1587*5113495bSYour Name 	u_int16_t peer_id;
1588*5113495bSYour Name 	struct ol_tx_log_queue_add_t *log_elem;
1589*5113495bSYour Name 
1590*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1591*5113495bSYour Name 	log_elem = ol_tx_queue_log_alloc(pdev, ol_tx_log_entry_type_queue_free,
1592*5113495bSYour Name 									0);
1593*5113495bSYour Name 	if (!log_elem) {
1594*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1595*5113495bSYour Name 		return;
1596*5113495bSYour Name 	}
1597*5113495bSYour Name 
1598*5113495bSYour Name 	if ((tid < OL_TX_NUM_TIDS) && is_peer_txq) {
1599*5113495bSYour Name 		struct ol_txrx_peer_t *peer;
1600*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq_base;
1601*5113495bSYour Name 
1602*5113495bSYour Name 		txq_base = txq - tid;
1603*5113495bSYour Name 		peer = container_of(txq_base, struct ol_txrx_peer_t, txqs[0]);
1604*5113495bSYour Name 		peer_id = peer->peer_ids[0];
1605*5113495bSYour Name 	} else {
1606*5113495bSYour Name 		peer_id = ~0;
1607*5113495bSYour Name 	}
1608*5113495bSYour Name 
1609*5113495bSYour Name 	log_elem->num_frms = frms;
1610*5113495bSYour Name 	log_elem->num_bytes = bytes;
1611*5113495bSYour Name 	log_elem->peer_id = peer_id;
1612*5113495bSYour Name 	log_elem->tid = tid;
1613*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1614*5113495bSYour Name }
1615*5113495bSYour Name 
1616*5113495bSYour Name void
ol_tx_queue_log_sched(struct ol_txrx_pdev_t * pdev,int credit,int * num_cats,u_int32_t ** active_bitmap,u_int8_t ** data)1617*5113495bSYour Name ol_tx_queue_log_sched(
1618*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1619*5113495bSYour Name 	int credit,
1620*5113495bSYour Name 	int *num_cats,
1621*5113495bSYour Name 	u_int32_t **active_bitmap,
1622*5113495bSYour Name 	u_int8_t  **data)
1623*5113495bSYour Name {
1624*5113495bSYour Name 	int data_size;
1625*5113495bSYour Name 	struct ol_tx_log_queue_state_var_sz_t *log_elem;
1626*5113495bSYour Name 
1627*5113495bSYour Name 	data_size = sizeof(u_int32_t) /* bytes */ +
1628*5113495bSYour Name 				sizeof(u_int16_t) /* frms */;
1629*5113495bSYour Name 	data_size *= *num_cats;
1630*5113495bSYour Name 
1631*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1632*5113495bSYour Name 	log_elem = ol_tx_queue_log_alloc(
1633*5113495bSYour Name 			pdev, ol_tx_log_entry_type_queue_state, data_size);
1634*5113495bSYour Name 	if (!log_elem) {
1635*5113495bSYour Name 		*num_cats = 0;
1636*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1637*5113495bSYour Name 		return;
1638*5113495bSYour Name 	}
1639*5113495bSYour Name 	log_elem->num_cats_active = *num_cats;
1640*5113495bSYour Name 	log_elem->active_bitmap = 0;
1641*5113495bSYour Name 	log_elem->credit = credit;
1642*5113495bSYour Name 
1643*5113495bSYour Name 	*active_bitmap = &log_elem->active_bitmap;
1644*5113495bSYour Name 	*data = &log_elem->data[0];
1645*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1646*5113495bSYour Name }
1647*5113495bSYour Name 
1648*5113495bSYour Name /**
1649*5113495bSYour Name  * ol_tx_queue_log_clear() - clear tx queue log
1650*5113495bSYour Name  * @pdev: pointer to txrx handle
1651*5113495bSYour Name  *
1652*5113495bSYour Name  * Return: None
1653*5113495bSYour Name  */
1654*5113495bSYour Name void
ol_tx_queue_log_clear(struct ol_txrx_pdev_t * pdev)1655*5113495bSYour Name ol_tx_queue_log_clear(struct ol_txrx_pdev_t *pdev)
1656*5113495bSYour Name {
1657*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->txq_log_spinlock);
1658*5113495bSYour Name 	qdf_mem_zero(&pdev->txq_log, sizeof(pdev->txq_log));
1659*5113495bSYour Name 	pdev->txq_log.size = OL_TXQ_LOG_SIZE;
1660*5113495bSYour Name 	pdev->txq_log.oldest_record_offset = 0;
1661*5113495bSYour Name 	pdev->txq_log.offset = 0;
1662*5113495bSYour Name 	pdev->txq_log.allow_wrap = 1;
1663*5113495bSYour Name 	pdev->txq_log.wrapped = 0;
1664*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->txq_log_spinlock);
1665*5113495bSYour Name }
1666*5113495bSYour Name #endif /* defined(DEBUG_HL_LOGGING) */
1667*5113495bSYour Name 
1668*5113495bSYour Name /*--- queue state printouts -------------------------------------------------*/
1669*5113495bSYour Name 
1670*5113495bSYour Name #if TXRX_DEBUG_LEVEL > 5
1671*5113495bSYour Name 
1672*5113495bSYour Name /**
1673*5113495bSYour Name  * ol_tx_queue_display() - show tx queue info
1674*5113495bSYour Name  * @txq: pointer to txq frames
1675*5113495bSYour Name  * @indent: indent
1676*5113495bSYour Name  *
1677*5113495bSYour Name  * Return: None
1678*5113495bSYour Name  */
1679*5113495bSYour Name static void
ol_tx_queue_display(struct ol_tx_frms_queue_t * txq,int indent)1680*5113495bSYour Name ol_tx_queue_display(struct ol_tx_frms_queue_t *txq, int indent)
1681*5113495bSYour Name {
1682*5113495bSYour Name 	char *state;
1683*5113495bSYour Name 
1684*5113495bSYour Name 	state = (txq->flag == ol_tx_queue_active) ? "active" : "paused";
1685*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1686*5113495bSYour Name 		  "%*stxq %pK (%s): %d frms, %d bytes\n",
1687*5113495bSYour Name 		  indent, " ", txq, state, txq->frms, txq->bytes);
1688*5113495bSYour Name }
1689*5113495bSYour Name 
1690*5113495bSYour Name void
ol_tx_queues_display(struct ol_txrx_pdev_t * pdev)1691*5113495bSYour Name ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
1692*5113495bSYour Name {
1693*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
1694*5113495bSYour Name 
1695*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1696*5113495bSYour Name 		  "pdev %pK tx queues:\n", pdev);
1697*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1698*5113495bSYour Name 		struct ol_txrx_peer_t *peer;
1699*5113495bSYour Name 		int i;
1700*5113495bSYour Name 
1701*5113495bSYour Name 		for (i = 0; i < QDF_ARRAY_SIZE(vdev->txqs); i++) {
1702*5113495bSYour Name 			if (vdev->txqs[i].frms == 0)
1703*5113495bSYour Name 				continue;
1704*5113495bSYour Name 
1705*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
1706*5113495bSYour Name 				  "vdev %d (%pK), txq %d\n", vdev->vdev_id,
1707*5113495bSYour Name 				  vdev, i);
1708*5113495bSYour Name 			ol_tx_queue_display(&vdev->txqs[i], 4);
1709*5113495bSYour Name 		}
1710*5113495bSYour Name 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1711*5113495bSYour Name 			for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
1712*5113495bSYour Name 				if (peer->txqs[i].frms == 0)
1713*5113495bSYour Name 					continue;
1714*5113495bSYour Name 
1715*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1716*5113495bSYour Name 					  QDF_TRACE_LEVEL_INFO_LOW,
1717*5113495bSYour Name 					  "peer %d (%pK), txq %d\n",
1718*5113495bSYour Name 					  peer->peer_ids[0], vdev, i);
1719*5113495bSYour Name 				ol_tx_queue_display(&peer->txqs[i], 6);
1720*5113495bSYour Name 			}
1721*5113495bSYour Name 		}
1722*5113495bSYour Name 	}
1723*5113495bSYour Name }
1724*5113495bSYour Name #endif
1725*5113495bSYour Name 
1726*5113495bSYour Name #endif /* defined(CONFIG_HL_SUPPORT) */
1727*5113495bSYour Name 
1728*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT)
1729*5113495bSYour Name 
1730*5113495bSYour Name /**
1731*5113495bSYour Name  * ol_txrx_pdev_pause() - pause network queues for each vdev
1732*5113495bSYour Name  * @pdev: pdev handle
1733*5113495bSYour Name  * @reason: reason
1734*5113495bSYour Name  *
1735*5113495bSYour Name  * Return: none
1736*5113495bSYour Name  */
ol_txrx_pdev_pause(struct ol_txrx_pdev_t * pdev,uint32_t reason)1737*5113495bSYour Name void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1738*5113495bSYour Name {
1739*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1740*5113495bSYour Name 
1741*5113495bSYour Name 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1742*5113495bSYour Name 		cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC),
1743*5113495bSYour Name 				  vdev->vdev_id, reason, 0);
1744*5113495bSYour Name 	}
1745*5113495bSYour Name 
1746*5113495bSYour Name }
1747*5113495bSYour Name 
1748*5113495bSYour Name /**
1749*5113495bSYour Name  * ol_txrx_pdev_unpause() - unpause network queues for each vdev
1750*5113495bSYour Name  * @pdev: pdev handle
1751*5113495bSYour Name  * @reason: reason
1752*5113495bSYour Name  *
1753*5113495bSYour Name  * Return: none
1754*5113495bSYour Name  */
ol_txrx_pdev_unpause(struct ol_txrx_pdev_t * pdev,uint32_t reason)1755*5113495bSYour Name void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
1756*5113495bSYour Name {
1757*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL, *tmp;
1758*5113495bSYour Name 
1759*5113495bSYour Name 	TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
1760*5113495bSYour Name 		cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
1761*5113495bSYour Name 				    vdev->vdev_id, reason, 0);
1762*5113495bSYour Name 	}
1763*5113495bSYour Name 
1764*5113495bSYour Name }
1765*5113495bSYour Name #endif
1766*5113495bSYour Name 
1767*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1768*5113495bSYour Name 
1769*5113495bSYour Name /**
1770*5113495bSYour Name  * ol_tx_vdev_has_tx_queue_group() - check for vdev having txq groups
1771*5113495bSYour Name  * @group: pointer to tx queue grpup
1772*5113495bSYour Name  * @vdev_id: vdev id
1773*5113495bSYour Name  *
1774*5113495bSYour Name  * Return: true if vedv has txq groups
1775*5113495bSYour Name  */
1776*5113495bSYour Name static bool
ol_tx_vdev_has_tx_queue_group(struct ol_tx_queue_group_t * group,u_int8_t vdev_id)1777*5113495bSYour Name ol_tx_vdev_has_tx_queue_group(
1778*5113495bSYour Name 	struct ol_tx_queue_group_t *group,
1779*5113495bSYour Name 	u_int8_t vdev_id)
1780*5113495bSYour Name {
1781*5113495bSYour Name 	u_int16_t vdev_bitmap;
1782*5113495bSYour Name 
1783*5113495bSYour Name 	vdev_bitmap = OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
1784*5113495bSYour Name 	if (OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_bitmap, vdev_id))
1785*5113495bSYour Name 		return true;
1786*5113495bSYour Name 
1787*5113495bSYour Name 	return false;
1788*5113495bSYour Name }
1789*5113495bSYour Name 
1790*5113495bSYour Name /**
1791*5113495bSYour Name  * ol_tx_ac_has_tx_queue_group() - check for ac having txq groups
1792*5113495bSYour Name  * @group: pointer to tx queue grpup
1793*5113495bSYour Name  * @ac: access category
1794*5113495bSYour Name  *
1795*5113495bSYour Name  * Return: true if vedv has txq groups
1796*5113495bSYour Name  */
1797*5113495bSYour Name static bool
ol_tx_ac_has_tx_queue_group(struct ol_tx_queue_group_t * group,u_int8_t ac)1798*5113495bSYour Name ol_tx_ac_has_tx_queue_group(
1799*5113495bSYour Name 	struct ol_tx_queue_group_t *group,
1800*5113495bSYour Name 	u_int8_t ac)
1801*5113495bSYour Name {
1802*5113495bSYour Name 	u_int16_t ac_bitmap;
1803*5113495bSYour Name 
1804*5113495bSYour Name 	ac_bitmap = OL_TXQ_GROUP_AC_MASK_GET(group->membership);
1805*5113495bSYour Name 	if (OL_TXQ_GROUP_AC_BIT_MASK_GET(ac_bitmap, ac))
1806*5113495bSYour Name 		return true;
1807*5113495bSYour Name 
1808*5113495bSYour Name 	return false;
1809*5113495bSYour Name }
1810*5113495bSYour Name 
1811*5113495bSYour Name #ifdef FEATURE_HL_DBS_GROUP_CREDIT_SHARING
1812*5113495bSYour Name static inline struct ol_tx_queue_group_t *
ol_tx_txq_find_other_group(struct ol_txrx_pdev_t * pdev,struct ol_tx_queue_group_t * txq_grp)1813*5113495bSYour Name ol_tx_txq_find_other_group(struct ol_txrx_pdev_t *pdev,
1814*5113495bSYour Name 			   struct ol_tx_queue_group_t *txq_grp)
1815*5113495bSYour Name {
1816*5113495bSYour Name 	int i;
1817*5113495bSYour Name 	struct ol_tx_queue_group_t *other_grp = NULL;
1818*5113495bSYour Name 
1819*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1820*5113495bSYour Name 		if (&pdev->txq_grps[i] != txq_grp) {
1821*5113495bSYour Name 			other_grp = &pdev->txq_grps[i];
1822*5113495bSYour Name 			break;
1823*5113495bSYour Name 		}
1824*5113495bSYour Name 	}
1825*5113495bSYour Name 	return other_grp;
1826*5113495bSYour Name }
1827*5113495bSYour Name 
ol_tx_txq_group_credit_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u32 credit)1828*5113495bSYour Name u32 ol_tx_txq_group_credit_limit(
1829*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1830*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
1831*5113495bSYour Name 	u32 credit)
1832*5113495bSYour Name {
1833*5113495bSYour Name 	struct ol_tx_queue_group_t *txq_grp = txq->group_ptrs[0];
1834*5113495bSYour Name 	struct ol_tx_queue_group_t *other_grp;
1835*5113495bSYour Name 	u32 ask;
1836*5113495bSYour Name 	u32 updated_credit;
1837*5113495bSYour Name 	u32 credit_oth_grp;
1838*5113495bSYour Name 
1839*5113495bSYour Name 	if (qdf_unlikely(!txq_grp))
1840*5113495bSYour Name 		return credit;
1841*5113495bSYour Name 
1842*5113495bSYour Name 	updated_credit = qdf_atomic_read(&txq_grp->credit);
1843*5113495bSYour Name 
1844*5113495bSYour Name 	if (credit <= updated_credit)
1845*5113495bSYour Name 		/* We have enough credits */
1846*5113495bSYour Name 		return credit;
1847*5113495bSYour Name 
1848*5113495bSYour Name 	ask = credit - updated_credit;
1849*5113495bSYour Name 	other_grp = ol_tx_txq_find_other_group(pdev, txq_grp);
1850*5113495bSYour Name 	if (qdf_unlikely(!other_grp))
1851*5113495bSYour Name 		return credit;
1852*5113495bSYour Name 
1853*5113495bSYour Name 	credit_oth_grp = qdf_atomic_read(&other_grp->credit);
1854*5113495bSYour Name 	if (other_grp->frm_count < credit_oth_grp) {
1855*5113495bSYour Name 		u32 spare = credit_oth_grp - other_grp->frm_count;
1856*5113495bSYour Name 
1857*5113495bSYour Name 		if (pdev->limit_lend) {
1858*5113495bSYour Name 			if (spare > pdev->min_reserve)
1859*5113495bSYour Name 				spare -= pdev->min_reserve;
1860*5113495bSYour Name 			else
1861*5113495bSYour Name 				spare = 0;
1862*5113495bSYour Name 		}
1863*5113495bSYour Name 		updated_credit += min(spare, ask);
1864*5113495bSYour Name 	}
1865*5113495bSYour Name 	return updated_credit;
1866*5113495bSYour Name }
1867*5113495bSYour Name 
ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u32 credits_used)1868*5113495bSYour Name u32 ol_tx_txq_update_borrowed_group_credits(struct ol_txrx_pdev_t *pdev,
1869*5113495bSYour Name 					    struct ol_tx_frms_queue_t *txq,
1870*5113495bSYour Name 					    u32 credits_used)
1871*5113495bSYour Name {
1872*5113495bSYour Name 	struct ol_tx_queue_group_t *txq_grp = txq->group_ptrs[0];
1873*5113495bSYour Name 	u32 credits_cur_grp;
1874*5113495bSYour Name 	u32 credits_brwd;
1875*5113495bSYour Name 
1876*5113495bSYour Name 	if (qdf_unlikely(!txq_grp))
1877*5113495bSYour Name 		return credits_used;
1878*5113495bSYour Name 
1879*5113495bSYour Name 	credits_cur_grp = qdf_atomic_read(&txq_grp->credit);
1880*5113495bSYour Name 	if (credits_used > credits_cur_grp) {
1881*5113495bSYour Name 		struct ol_tx_queue_group_t *other_grp =
1882*5113495bSYour Name 			ol_tx_txq_find_other_group(pdev, txq_grp);
1883*5113495bSYour Name 
1884*5113495bSYour Name 		if (qdf_likely(other_grp)) {
1885*5113495bSYour Name 			credits_brwd = credits_used - credits_cur_grp;
1886*5113495bSYour Name 			/*
1887*5113495bSYour Name 			 * All the credits were used from the active txq group.
1888*5113495bSYour Name 			 */
1889*5113495bSYour Name 			credits_used = credits_cur_grp;
1890*5113495bSYour Name 			/* Deduct credits borrowed from other group */
1891*5113495bSYour Name 			ol_txrx_update_group_credit(other_grp, -credits_brwd,
1892*5113495bSYour Name 						    0);
1893*5113495bSYour Name 		}
1894*5113495bSYour Name 	}
1895*5113495bSYour Name 	return credits_used;
1896*5113495bSYour Name }
1897*5113495bSYour Name #else /* FEATURE_HL_DBS_GROUP_CREDIT_SHARING */
ol_tx_txq_group_credit_limit(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,u_int32_t credit)1898*5113495bSYour Name u_int32_t ol_tx_txq_group_credit_limit(
1899*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1900*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
1901*5113495bSYour Name 	u_int32_t credit)
1902*5113495bSYour Name {
1903*5113495bSYour Name 	u_int8_t i;
1904*5113495bSYour Name 	int updated_credit = credit;
1905*5113495bSYour Name 
1906*5113495bSYour Name 	/*
1907*5113495bSYour Name 	 * If this tx queue belongs to a group, check whether the group's
1908*5113495bSYour Name 	 * credit limit is more stringent than the global credit limit.
1909*5113495bSYour Name 	 */
1910*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
1911*5113495bSYour Name 		if (txq->group_ptrs[i]) {
1912*5113495bSYour Name 			int group_credit;
1913*5113495bSYour Name 
1914*5113495bSYour Name 			group_credit = qdf_atomic_read(
1915*5113495bSYour Name 					&txq->group_ptrs[i]->credit);
1916*5113495bSYour Name 			updated_credit = QDF_MIN(updated_credit, group_credit);
1917*5113495bSYour Name 		}
1918*5113495bSYour Name 	}
1919*5113495bSYour Name 
1920*5113495bSYour Name 	credit = (updated_credit < 0) ? 0 : updated_credit;
1921*5113495bSYour Name 
1922*5113495bSYour Name 	return credit;
1923*5113495bSYour Name }
1924*5113495bSYour Name #endif /* FEATURE_HL_DBS_GROUP_CREDIT_SHARING */
1925*5113495bSYour Name 
ol_tx_txq_group_credit_update(struct ol_txrx_pdev_t * pdev,struct ol_tx_frms_queue_t * txq,int32_t credit,u_int8_t absolute)1926*5113495bSYour Name void ol_tx_txq_group_credit_update(
1927*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
1928*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
1929*5113495bSYour Name 	int32_t credit,
1930*5113495bSYour Name 	u_int8_t absolute)
1931*5113495bSYour Name {
1932*5113495bSYour Name 	u_int8_t i;
1933*5113495bSYour Name 	/*
1934*5113495bSYour Name 	 * If this tx queue belongs to a group then
1935*5113495bSYour Name 	 * update group credit
1936*5113495bSYour Name 	 */
1937*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
1938*5113495bSYour Name 		if (txq->group_ptrs[i])
1939*5113495bSYour Name 			ol_txrx_update_group_credit(txq->group_ptrs[i],
1940*5113495bSYour Name 						    credit, absolute);
1941*5113495bSYour Name 	}
1942*5113495bSYour Name 	ol_tx_update_group_credit_stats(pdev);
1943*5113495bSYour Name }
1944*5113495bSYour Name 
1945*5113495bSYour Name void
ol_tx_set_vdev_group_ptr(ol_txrx_pdev_handle pdev,u_int8_t vdev_id,struct ol_tx_queue_group_t * grp_ptr)1946*5113495bSYour Name ol_tx_set_vdev_group_ptr(
1947*5113495bSYour Name 	ol_txrx_pdev_handle pdev,
1948*5113495bSYour Name 	u_int8_t vdev_id,
1949*5113495bSYour Name 	struct ol_tx_queue_group_t *grp_ptr)
1950*5113495bSYour Name {
1951*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = NULL;
1952*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
1953*5113495bSYour Name 
1954*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1955*5113495bSYour Name 		if (vdev->vdev_id == vdev_id) {
1956*5113495bSYour Name 			u_int8_t i, j;
1957*5113495bSYour Name 
1958*5113495bSYour Name 			/* update vdev queues group pointers */
1959*5113495bSYour Name 			for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1960*5113495bSYour Name 				for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++)
1961*5113495bSYour Name 					vdev->txqs[i].group_ptrs[j] = grp_ptr;
1962*5113495bSYour Name 			}
1963*5113495bSYour Name 			qdf_spin_lock_bh(&pdev->peer_ref_mutex);
1964*5113495bSYour Name 			/* Update peer queue group pointers */
1965*5113495bSYour Name 			TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1966*5113495bSYour Name 				for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1967*5113495bSYour Name 					for (j = 0;
1968*5113495bSYour Name 						j < OL_TX_MAX_GROUPS_PER_QUEUE;
1969*5113495bSYour Name 							j++)
1970*5113495bSYour Name 						peer->txqs[i].group_ptrs[j] =
1971*5113495bSYour Name 							grp_ptr;
1972*5113495bSYour Name 				}
1973*5113495bSYour Name 			}
1974*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
1975*5113495bSYour Name 			break;
1976*5113495bSYour Name 		}
1977*5113495bSYour Name 	}
1978*5113495bSYour Name }
1979*5113495bSYour Name 
ol_tx_txq_set_group_ptr(struct ol_tx_frms_queue_t * txq,struct ol_tx_queue_group_t * grp_ptr)1980*5113495bSYour Name void ol_tx_txq_set_group_ptr(
1981*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq,
1982*5113495bSYour Name 	struct ol_tx_queue_group_t *grp_ptr)
1983*5113495bSYour Name {
1984*5113495bSYour Name 	u_int8_t i;
1985*5113495bSYour Name 
1986*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
1987*5113495bSYour Name 		txq->group_ptrs[i] = grp_ptr;
1988*5113495bSYour Name }
1989*5113495bSYour Name 
ol_tx_set_peer_group_ptr(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer,u_int8_t vdev_id,u_int8_t tid)1990*5113495bSYour Name void ol_tx_set_peer_group_ptr(
1991*5113495bSYour Name 	ol_txrx_pdev_handle pdev,
1992*5113495bSYour Name 	struct ol_txrx_peer_t *peer,
1993*5113495bSYour Name 	u_int8_t vdev_id,
1994*5113495bSYour Name 	u_int8_t tid)
1995*5113495bSYour Name {
1996*5113495bSYour Name 	u_int8_t i, j = 0;
1997*5113495bSYour Name 	struct ol_tx_queue_group_t *group = NULL;
1998*5113495bSYour Name 
1999*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
2000*5113495bSYour Name 		peer->txqs[tid].group_ptrs[i] = NULL;
2001*5113495bSYour Name 
2002*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
2003*5113495bSYour Name 		group = &pdev->txq_grps[i];
2004*5113495bSYour Name 		if (ol_tx_vdev_has_tx_queue_group(group, vdev_id)) {
2005*5113495bSYour Name 			if (tid < OL_TX_NUM_QOS_TIDS) {
2006*5113495bSYour Name 				if (ol_tx_ac_has_tx_queue_group(
2007*5113495bSYour Name 						group,
2008*5113495bSYour Name 						TXRX_TID_TO_WMM_AC(tid))) {
2009*5113495bSYour Name 					peer->txqs[tid].group_ptrs[j] = group;
2010*5113495bSYour Name 					j++;
2011*5113495bSYour Name 				}
2012*5113495bSYour Name 			} else {
2013*5113495bSYour Name 				peer->txqs[tid].group_ptrs[j] = group;
2014*5113495bSYour Name 				j++;
2015*5113495bSYour Name 			}
2016*5113495bSYour Name 		}
2017*5113495bSYour Name 		if (j >= OL_TX_MAX_GROUPS_PER_QUEUE)
2018*5113495bSYour Name 			break;
2019*5113495bSYour Name 	}
2020*5113495bSYour Name }
2021*5113495bSYour Name 
ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t * pdev)2022*5113495bSYour Name u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev)
2023*5113495bSYour Name {
2024*5113495bSYour Name 		return OL_TX_MAX_TXQ_GROUPS;
2025*5113495bSYour Name }
2026*5113495bSYour Name #endif /* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL */
2027*5113495bSYour Name 
2028*5113495bSYour Name #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
2029*5113495bSYour Name 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t * txq,int num_frms)2030*5113495bSYour Name void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq, int num_frms)
2031*5113495bSYour Name {
2032*5113495bSYour Name 	int i;
2033*5113495bSYour Name 
2034*5113495bSYour Name 	if (!num_frms || !txq) {
2035*5113495bSYour Name 		ol_txrx_dbg("Invalid params");
2036*5113495bSYour Name 		return;
2037*5113495bSYour Name 	}
2038*5113495bSYour Name 
2039*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
2040*5113495bSYour Name 		if (txq->group_ptrs[i]) {
2041*5113495bSYour Name 			txq->group_ptrs[i]->frm_count += num_frms;
2042*5113495bSYour Name 			qdf_assert(txq->group_ptrs[i]->frm_count >= 0);
2043*5113495bSYour Name 		}
2044*5113495bSYour Name 	}
2045*5113495bSYour Name }
2046*5113495bSYour Name #endif
2047*5113495bSYour Name 
2048*5113495bSYour Name /*--- End of LL tx throttle queue code ---------------------------------------*/
2049