xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_hl.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
21*5113495bSYour Name #include <qdf_lock.h>           /* qdf_os_spinlock */
22*5113495bSYour Name #include <qdf_time.h>           /* qdf_system_ticks, etc. */
23*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24*5113495bSYour Name #include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
25*5113495bSYour Name 
26*5113495bSYour Name #include "queue.h"          /* TAILQ */
27*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
28*5113495bSYour Name #include <enet.h>               /* ethernet_hdr_t, etc. */
29*5113495bSYour Name #include <ipv6_defs.h>          /* ipv6_traffic_class */
30*5113495bSYour Name #endif
31*5113495bSYour Name 
32*5113495bSYour Name #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
33*5113495bSYour Name #include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
34*5113495bSYour Name #include <ol_txrx_htt_api.h>    /* htt_tx_status */
35*5113495bSYour Name 
36*5113495bSYour Name #include <ol_ctrl_txrx_api.h>
37*5113495bSYour Name #include <cdp_txrx_tx_delay.h>
38*5113495bSYour Name #include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
39*5113495bSYour Name #include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
40*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
41*5113495bSYour Name #include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
42*5113495bSYour Name #endif
43*5113495bSYour Name #include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
44*5113495bSYour Name #include <ol_osif_txrx_api.h>
45*5113495bSYour Name #include <ol_tx.h>              /* ol_tx_reinject */
46*5113495bSYour Name #include <ol_tx_send.h>
47*5113495bSYour Name 
48*5113495bSYour Name #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
49*5113495bSYour Name #include <ol_tx_sched.h>
50*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
51*5113495bSYour Name #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
52*5113495bSYour Name #endif
53*5113495bSYour Name #include <ol_tx_queue.h>
54*5113495bSYour Name #include <ol_txrx.h>
55*5113495bSYour Name #include <pktlog_ac_fmt.h>
56*5113495bSYour Name #include <cdp_txrx_handle.h>
57*5113495bSYour Name #include <wlan_reg_services_api.h>
58*5113495bSYour Name #include "qdf_hrtimer.h"
59*5113495bSYour Name 
60*5113495bSYour Name /* High/Low tx resource count in percentage */
61*5113495bSYour Name /* Set default high threshold to 15% */
62*5113495bSYour Name #ifndef TX_RESOURCE_HIGH_TH_IN_PER
63*5113495bSYour Name #define TX_RESOURCE_HIGH_TH_IN_PER 15
64*5113495bSYour Name #endif
65*5113495bSYour Name 
66*5113495bSYour Name /* Set default low threshold to 5% */
67*5113495bSYour Name #ifndef TX_RESOURCE_LOW_TH_IN_PER
68*5113495bSYour Name #define TX_RESOURCE_LOW_TH_IN_PER 5
69*5113495bSYour Name #endif
70*5113495bSYour Name 
71*5113495bSYour Name #ifdef QCA_HL_NETDEV_FLOW_CONTROL
72*5113495bSYour Name static u16 ol_txrx_tx_desc_alloc_table[TXRX_FC_MAX] = {
73*5113495bSYour Name 	[TXRX_FC_5GH_80M_2x2] = 2000,
74*5113495bSYour Name 	[TXRX_FC_2GH_40M_2x2] = 800,
75*5113495bSYour Name };
76*5113495bSYour Name #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
77*5113495bSYour Name 
78*5113495bSYour Name /* tx filtering is handled within the target FW */
79*5113495bSYour Name #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
80*5113495bSYour Name 
81*5113495bSYour Name u_int16_t
ol_tx_desc_pool_size_hl(struct cdp_cfg * ctrl_pdev)82*5113495bSYour Name ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
83*5113495bSYour Name {
84*5113495bSYour Name 	uint16_t desc_pool_size;
85*5113495bSYour Name 	uint16_t steady_state_tx_lifetime_ms;
86*5113495bSYour Name 	uint16_t safety_factor;
87*5113495bSYour Name 
88*5113495bSYour Name 	/*
89*5113495bSYour Name 	 * Steady-state tx latency:
90*5113495bSYour Name 	 *     roughly 1-2 ms flight time
91*5113495bSYour Name 	 *   + roughly 1-2 ms prep time,
92*5113495bSYour Name 	 *   + roughly 1-2 ms target->host notification time.
93*5113495bSYour Name 	 * = roughly 6 ms total
94*5113495bSYour Name 	 * Thus, steady state number of frames =
95*5113495bSYour Name 	 * steady state max throughput / frame size * tx latency, e.g.
96*5113495bSYour Name 	 * 1 Gbps / 1500 bytes * 6 ms = 500
97*5113495bSYour Name 	 *
98*5113495bSYour Name 	 */
99*5113495bSYour Name 	steady_state_tx_lifetime_ms = 6;
100*5113495bSYour Name 
101*5113495bSYour Name 	safety_factor = 8;
102*5113495bSYour Name 
103*5113495bSYour Name 	desc_pool_size =
104*5113495bSYour Name 		ol_cfg_max_thruput_mbps(ctrl_pdev) *
105*5113495bSYour Name 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
106*5113495bSYour Name 		(8 * OL_TX_AVG_FRM_BYTES) *
107*5113495bSYour Name 		steady_state_tx_lifetime_ms *
108*5113495bSYour Name 		safety_factor;
109*5113495bSYour Name 
110*5113495bSYour Name 	/* minimum */
111*5113495bSYour Name 	if (desc_pool_size < OL_TX_DESC_POOL_SIZE_MIN_HL)
112*5113495bSYour Name 		desc_pool_size = OL_TX_DESC_POOL_SIZE_MIN_HL;
113*5113495bSYour Name 
114*5113495bSYour Name 	/* maximum */
115*5113495bSYour Name 	if (desc_pool_size > OL_TX_DESC_POOL_SIZE_MAX_HL)
116*5113495bSYour Name 		desc_pool_size = OL_TX_DESC_POOL_SIZE_MAX_HL;
117*5113495bSYour Name 
118*5113495bSYour Name 	return desc_pool_size;
119*5113495bSYour Name }
120*5113495bSYour Name 
121*5113495bSYour Name #ifdef CONFIG_TX_DESC_HI_PRIO_RESERVE
122*5113495bSYour Name 
123*5113495bSYour Name /**
124*5113495bSYour Name  * ol_tx_hl_desc_alloc() - Allocate and initialize a tx descriptor
125*5113495bSYour Name  *                        for a HL system.
126*5113495bSYour Name  * @pdev: the data physical device sending the data
127*5113495bSYour Name  * @vdev: the virtual device sending the data
128*5113495bSYour Name  * @msdu: the tx frame
129*5113495bSYour Name  * @msdu_info: the tx meta data
130*5113495bSYour Name  *
131*5113495bSYour Name  * Return: the tx descriptor
132*5113495bSYour Name  */
133*5113495bSYour Name static inline
ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)134*5113495bSYour Name struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
135*5113495bSYour Name 					 struct ol_txrx_vdev_t *vdev,
136*5113495bSYour Name 					 qdf_nbuf_t msdu,
137*5113495bSYour Name 					 struct ol_txrx_msdu_info_t *msdu_info)
138*5113495bSYour Name {
139*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc = NULL;
140*5113495bSYour Name 
141*5113495bSYour Name 	if (qdf_atomic_read(&pdev->tx_queue.rsrc_cnt) >
142*5113495bSYour Name 	    TXRX_HL_TX_DESC_HI_PRIO_RESERVED) {
143*5113495bSYour Name 		tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
144*5113495bSYour Name 	} else if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
145*5113495bSYour Name 		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
146*5113495bSYour Name 		    QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
147*5113495bSYour Name 		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
148*5113495bSYour Name 		    QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
149*5113495bSYour Name 			tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
150*5113495bSYour Name 			ol_txrx_info("Got tx desc from resv pool");
151*5113495bSYour Name 		}
152*5113495bSYour Name 	}
153*5113495bSYour Name 	return tx_desc;
154*5113495bSYour Name }
155*5113495bSYour Name 
156*5113495bSYour Name #elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)157*5113495bSYour Name bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)
158*5113495bSYour Name {
159*5113495bSYour Name 	enum qdf_proto_subtype proto_subtype;
160*5113495bSYour Name 	bool high_prio = false;
161*5113495bSYour Name 
162*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
163*5113495bSYour Name 		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
164*5113495bSYour Name 		    QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
165*5113495bSYour Name 		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
166*5113495bSYour Name 		    QDF_NBUF_CB_PACKET_TYPE_EAPOL))
167*5113495bSYour Name 			high_prio = true;
168*5113495bSYour Name 	} else if (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
169*5113495bSYour Name 		   QDF_NBUF_CB_PACKET_TYPE_ARP) {
170*5113495bSYour Name 		high_prio = true;
171*5113495bSYour Name 	} else if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
172*5113495bSYour Name 		   QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
173*5113495bSYour Name 		proto_subtype = qdf_nbuf_get_icmpv6_subtype(msdu);
174*5113495bSYour Name 		switch (proto_subtype) {
175*5113495bSYour Name 		case QDF_PROTO_ICMPV6_NA:
176*5113495bSYour Name 		case QDF_PROTO_ICMPV6_NS:
177*5113495bSYour Name 			high_prio = true;
178*5113495bSYour Name 		default:
179*5113495bSYour Name 			high_prio = false;
180*5113495bSYour Name 		}
181*5113495bSYour Name 	}
182*5113495bSYour Name 	return high_prio;
183*5113495bSYour Name }
184*5113495bSYour Name 
185*5113495bSYour Name static inline
ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)186*5113495bSYour Name struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
187*5113495bSYour Name 					 struct ol_txrx_vdev_t *vdev,
188*5113495bSYour Name 					 qdf_nbuf_t msdu,
189*5113495bSYour Name 					 struct ol_txrx_msdu_info_t *msdu_info)
190*5113495bSYour Name {
191*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc =
192*5113495bSYour Name 			ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
193*5113495bSYour Name 
194*5113495bSYour Name 	if (!tx_desc)
195*5113495bSYour Name 		return NULL;
196*5113495bSYour Name 
197*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
198*5113495bSYour Name 	/* return if TX flow control disabled */
199*5113495bSYour Name 	if (vdev->tx_desc_limit == 0) {
200*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->tx_mutex);
201*5113495bSYour Name 		return tx_desc;
202*5113495bSYour Name 	}
203*5113495bSYour Name 
204*5113495bSYour Name 	if (!qdf_atomic_read(&vdev->os_q_paused) &&
205*5113495bSYour Name 	    (qdf_atomic_read(&vdev->tx_desc_count) >= vdev->queue_stop_th)) {
206*5113495bSYour Name 		/*
207*5113495bSYour Name 		 * Pause normal priority
208*5113495bSYour Name 		 * netdev queues if tx desc limit crosses
209*5113495bSYour Name 		 */
210*5113495bSYour Name 		pdev->pause_cb(vdev->vdev_id,
211*5113495bSYour Name 			       WLAN_STOP_NON_PRIORITY_QUEUE,
212*5113495bSYour Name 			       WLAN_DATA_FLOW_CONTROL);
213*5113495bSYour Name 		qdf_atomic_set(&vdev->os_q_paused, 1);
214*5113495bSYour Name 	} else if (ol_tx_desc_is_high_prio(msdu) && !vdev->prio_q_paused &&
215*5113495bSYour Name 		   (qdf_atomic_read(&vdev->tx_desc_count)
216*5113495bSYour Name 		   == vdev->tx_desc_limit)) {
217*5113495bSYour Name 		/* Pause high priority queue */
218*5113495bSYour Name 		pdev->pause_cb(vdev->vdev_id,
219*5113495bSYour Name 			       WLAN_NETIF_PRIORITY_QUEUE_OFF,
220*5113495bSYour Name 			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
221*5113495bSYour Name 		vdev->prio_q_paused = 1;
222*5113495bSYour Name 	}
223*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
224*5113495bSYour Name 
225*5113495bSYour Name 	return tx_desc;
226*5113495bSYour Name }
227*5113495bSYour Name 
228*5113495bSYour Name #else
229*5113495bSYour Name 
230*5113495bSYour Name static inline
ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * msdu_info)231*5113495bSYour Name struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
232*5113495bSYour Name 					 struct ol_txrx_vdev_t *vdev,
233*5113495bSYour Name 					 qdf_nbuf_t msdu,
234*5113495bSYour Name 					 struct ol_txrx_msdu_info_t *msdu_info)
235*5113495bSYour Name {
236*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc = NULL;
237*5113495bSYour Name 
238*5113495bSYour Name 	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
239*5113495bSYour Name 	return tx_desc;
240*5113495bSYour Name }
241*5113495bSYour Name #endif
242*5113495bSYour Name 
243*5113495bSYour Name static inline uint16_t
ol_txrx_rsrc_threshold_lo(int desc_pool_size)244*5113495bSYour Name ol_txrx_rsrc_threshold_lo(int desc_pool_size)
245*5113495bSYour Name {
246*5113495bSYour Name 	int threshold_low;
247*5113495bSYour Name 
248*5113495bSYour Name 	/* always maintain a 5% margin of unallocated descriptors */
249*5113495bSYour Name 	threshold_low = ((TX_RESOURCE_LOW_TH_IN_PER) *
250*5113495bSYour Name 			 desc_pool_size) / 100;
251*5113495bSYour Name 
252*5113495bSYour Name 	return threshold_low;
253*5113495bSYour Name }
254*5113495bSYour Name 
255*5113495bSYour Name static inline uint16_t
ol_txrx_rsrc_threshold_hi(int desc_pool_size)256*5113495bSYour Name ol_txrx_rsrc_threshold_hi(int desc_pool_size)
257*5113495bSYour Name {
258*5113495bSYour Name 	int threshold_high;
259*5113495bSYour Name 	/* when freeing up descriptors, keep going until
260*5113495bSYour Name 	 * there's a 15% margin
261*5113495bSYour Name 	 */
262*5113495bSYour Name 	threshold_high = ((TX_RESOURCE_HIGH_TH_IN_PER) *
263*5113495bSYour Name 			  desc_pool_size) / 100;
264*5113495bSYour Name 
265*5113495bSYour Name 	return threshold_high;
266*5113495bSYour Name }
267*5113495bSYour Name 
ol_tx_init_pdev(ol_txrx_pdev_handle pdev)268*5113495bSYour Name void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
269*5113495bSYour Name {
270*5113495bSYour Name 	uint16_t desc_pool_size, i;
271*5113495bSYour Name 
272*5113495bSYour Name 	desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
273*5113495bSYour Name 
274*5113495bSYour Name 	qdf_atomic_init(&pdev->tx_queue.rsrc_cnt);
275*5113495bSYour Name 	qdf_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
276*5113495bSYour Name 
277*5113495bSYour Name 	pdev->tx_queue.rsrc_threshold_lo =
278*5113495bSYour Name 		ol_txrx_rsrc_threshold_lo(desc_pool_size);
279*5113495bSYour Name 	pdev->tx_queue.rsrc_threshold_hi =
280*5113495bSYour Name 		ol_txrx_rsrc_threshold_hi(desc_pool_size);
281*5113495bSYour Name 
282*5113495bSYour Name 	for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++)
283*5113495bSYour Name 		qdf_atomic_init(&pdev->txq_grps[i].credit);
284*5113495bSYour Name 
285*5113495bSYour Name 	ol_tx_target_credit_init(pdev, desc_pool_size);
286*5113495bSYour Name }
287*5113495bSYour Name 
288*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
ol_tx_encap_wrapper(struct ol_txrx_pdev_t * pdev,ol_txrx_vdev_handle vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * tx_msdu_info)289*5113495bSYour Name static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
290*5113495bSYour Name 				      ol_txrx_vdev_handle vdev,
291*5113495bSYour Name 				      struct ol_tx_desc_t *tx_desc,
292*5113495bSYour Name 				      qdf_nbuf_t msdu,
293*5113495bSYour Name 				      struct ol_txrx_msdu_info_t *tx_msdu_info)
294*5113495bSYour Name {
295*5113495bSYour Name 	if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
296*5113495bSYour Name 		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
297*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
298*5113495bSYour Name 		if (tx_msdu_info->peer) {
299*5113495bSYour Name 			/* remove the peer reference added above */
300*5113495bSYour Name 			ol_txrx_peer_release_ref(tx_msdu_info->peer,
301*5113495bSYour Name 						 PEER_DEBUG_ID_OL_INTERNAL);
302*5113495bSYour Name 		}
303*5113495bSYour Name 		return -EINVAL;
304*5113495bSYour Name 	}
305*5113495bSYour Name 
306*5113495bSYour Name 	return 0;
307*5113495bSYour Name }
308*5113495bSYour Name #else
ol_tx_encap_wrapper(struct ol_txrx_pdev_t * pdev,ol_txrx_vdev_handle vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,struct ol_txrx_msdu_info_t * tx_msdu_info)309*5113495bSYour Name static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
310*5113495bSYour Name 				      ol_txrx_vdev_handle vdev,
311*5113495bSYour Name 				      struct ol_tx_desc_t *tx_desc,
312*5113495bSYour Name 				      qdf_nbuf_t msdu,
313*5113495bSYour Name 				      struct ol_txrx_msdu_info_t *tx_msdu_info)
314*5113495bSYour Name {
315*5113495bSYour Name 	/* no-op */
316*5113495bSYour Name 	return 0;
317*5113495bSYour Name }
318*5113495bSYour Name #endif
319*5113495bSYour Name 
320*5113495bSYour Name /**
321*5113495bSYour Name  * parse_ocb_tx_header() - Function to check for OCB
322*5113495bSYour Name  * @msdu:   Pointer to OS packet (qdf_nbuf_t)
323*5113495bSYour Name  * @tx_ctrl: TX control header on a packet and extract it if present
324*5113495bSYour Name  *
325*5113495bSYour Name  * Return: true if ocb parsing is successful
326*5113495bSYour Name  */
327*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
328*5113495bSYour Name #define OCB_HEADER_VERSION     1
parse_ocb_tx_header(qdf_nbuf_t msdu,struct ocb_tx_ctrl_hdr_t * tx_ctrl)329*5113495bSYour Name static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
330*5113495bSYour Name 				struct ocb_tx_ctrl_hdr_t *tx_ctrl)
331*5113495bSYour Name {
332*5113495bSYour Name 	qdf_ether_header_t *eth_hdr_p;
333*5113495bSYour Name 	struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
334*5113495bSYour Name 
335*5113495bSYour Name 	/* Check if TX control header is present */
336*5113495bSYour Name 	eth_hdr_p = (qdf_ether_header_t *)qdf_nbuf_data(msdu);
337*5113495bSYour Name 	if (eth_hdr_p->ether_type != QDF_SWAP_U16(ETHERTYPE_OCB_TX))
338*5113495bSYour Name 		/* TX control header is not present. Nothing to do.. */
339*5113495bSYour Name 		return true;
340*5113495bSYour Name 
341*5113495bSYour Name 	/* Remove the ethernet header */
342*5113495bSYour Name 	qdf_nbuf_pull_head(msdu, sizeof(qdf_ether_header_t));
343*5113495bSYour Name 
344*5113495bSYour Name 	/* Parse the TX control header */
345*5113495bSYour Name 	tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *)qdf_nbuf_data(msdu);
346*5113495bSYour Name 
347*5113495bSYour Name 	if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
348*5113495bSYour Name 		if (tx_ctrl)
349*5113495bSYour Name 			qdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
350*5113495bSYour Name 				     sizeof(*tx_ctrl_hdr));
351*5113495bSYour Name 	} else {
352*5113495bSYour Name 		/* The TX control header is invalid. */
353*5113495bSYour Name 		return false;
354*5113495bSYour Name 	}
355*5113495bSYour Name 
356*5113495bSYour Name 	/* Remove the TX control header */
357*5113495bSYour Name 	qdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
358*5113495bSYour Name 	return true;
359*5113495bSYour Name }
360*5113495bSYour Name #else
parse_ocb_tx_header(qdf_nbuf_t msdu,struct ocb_tx_ctrl_hdr_t * tx_ctrl)361*5113495bSYour Name static bool parse_ocb_tx_header(qdf_nbuf_t msdu,
362*5113495bSYour Name 				struct ocb_tx_ctrl_hdr_t *tx_ctrl)
363*5113495bSYour Name {
364*5113495bSYour Name 	return true;
365*5113495bSYour Name }
366*5113495bSYour Name #endif
367*5113495bSYour Name 
368*5113495bSYour Name /**
369*5113495bSYour Name  * ol_txrx_mgmt_tx_desc_alloc() - Allocate and initialize a tx descriptor
370*5113495bSYour Name  *				 for management frame
371*5113495bSYour Name  * @pdev: the data physical device sending the data
372*5113495bSYour Name  * @vdev: the virtual device sending the data
373*5113495bSYour Name  * @tx_mgmt_frm: the tx management frame
374*5113495bSYour Name  * @tx_msdu_info: the tx meta data
375*5113495bSYour Name  *
376*5113495bSYour Name  * Return: the tx descriptor
377*5113495bSYour Name  */
378*5113495bSYour Name struct ol_tx_desc_t *
ol_txrx_mgmt_tx_desc_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,qdf_nbuf_t tx_mgmt_frm,struct ol_txrx_msdu_info_t * tx_msdu_info)379*5113495bSYour Name ol_txrx_mgmt_tx_desc_alloc(
380*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev,
381*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev,
382*5113495bSYour Name 	qdf_nbuf_t tx_mgmt_frm,
383*5113495bSYour Name 	struct ol_txrx_msdu_info_t *tx_msdu_info)
384*5113495bSYour Name {
385*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
386*5113495bSYour Name 
387*5113495bSYour Name 	tx_msdu_info->htt.action.tx_comp_req = 1;
388*5113495bSYour Name 	tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
389*5113495bSYour Name 	return tx_desc;
390*5113495bSYour Name }
391*5113495bSYour Name 
392*5113495bSYour Name /**
393*5113495bSYour Name  * ol_txrx_mgmt_send_frame() - send a management frame
394*5113495bSYour Name  * @vdev: virtual device sending the frame
395*5113495bSYour Name  * @tx_desc: tx desc
396*5113495bSYour Name  * @tx_mgmt_frm: management frame to send
397*5113495bSYour Name  * @tx_msdu_info: the tx meta data
398*5113495bSYour Name  * @chanfreq: download change frequency
399*5113495bSYour Name  *
400*5113495bSYour Name  * Return:
401*5113495bSYour Name  *      0 -> the frame is accepted for transmission, -OR-
402*5113495bSYour Name  *      1 -> the frame was not accepted
403*5113495bSYour Name  */
ol_txrx_mgmt_send_frame(struct ol_txrx_vdev_t * vdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t tx_mgmt_frm,struct ol_txrx_msdu_info_t * tx_msdu_info,uint16_t chanfreq)404*5113495bSYour Name int ol_txrx_mgmt_send_frame(
405*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev,
406*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc,
407*5113495bSYour Name 	qdf_nbuf_t tx_mgmt_frm,
408*5113495bSYour Name 	struct ol_txrx_msdu_info_t *tx_msdu_info,
409*5113495bSYour Name 	uint16_t chanfreq)
410*5113495bSYour Name {
411*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
412*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq;
413*5113495bSYour Name 	int status = 1;
414*5113495bSYour Name 
415*5113495bSYour Name 	/*
416*5113495bSYour Name 	 * 1.  Look up the peer and queue the frame in the peer's mgmt queue.
417*5113495bSYour Name 	 * 2.  Invoke the download scheduler.
418*5113495bSYour Name 	 */
419*5113495bSYour Name 	txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
420*5113495bSYour Name 	if (!txq) {
421*5113495bSYour Name 		/* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
422*5113495bSYour Name 		 *			     msdu);
423*5113495bSYour Name 		 */
424*5113495bSYour Name 		qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
425*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
426*5113495bSYour Name 					     1 /* error */);
427*5113495bSYour Name 		goto out; /* can't accept the tx mgmt frame */
428*5113495bSYour Name 	}
429*5113495bSYour Name 	/* Initialize the HTT tx desc l2 header offset field.
430*5113495bSYour Name 	 * Even though tx encap does not apply to mgmt frames,
431*5113495bSYour Name 	 * htt_tx_desc_mpdu_header still needs to be called,
432*5113495bSYour Name 	 * to specify that there was no L2 header added by tx encap,
433*5113495bSYour Name 	 * so the frame's length does not need to be adjusted to account for
434*5113495bSYour Name 	 * an added L2 header.
435*5113495bSYour Name 	 */
436*5113495bSYour Name 	htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
437*5113495bSYour Name 	if (qdf_unlikely(htt_tx_desc_init(
438*5113495bSYour Name 			pdev->htt_pdev, tx_desc->htt_tx_desc,
439*5113495bSYour Name 			tx_desc->htt_tx_desc_paddr,
440*5113495bSYour Name 			ol_tx_desc_id(pdev, tx_desc),
441*5113495bSYour Name 			tx_mgmt_frm,
442*5113495bSYour Name 			&tx_msdu_info->htt, &tx_msdu_info->tso_info, NULL, 0)))
443*5113495bSYour Name 		goto out;
444*5113495bSYour Name 	htt_tx_desc_display(tx_desc->htt_tx_desc);
445*5113495bSYour Name 	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
446*5113495bSYour Name 
447*5113495bSYour Name 	ol_tx_enqueue(vdev->pdev, txq, tx_desc, tx_msdu_info);
448*5113495bSYour Name 	ol_tx_sched(vdev->pdev);
449*5113495bSYour Name 	status = 0;
450*5113495bSYour Name out:
451*5113495bSYour Name 	if (tx_msdu_info->peer) {
452*5113495bSYour Name 		/* remove the peer reference added above */
453*5113495bSYour Name 		ol_txrx_peer_release_ref(tx_msdu_info->peer,
454*5113495bSYour Name 					 PEER_DEBUG_ID_OL_INTERNAL);
455*5113495bSYour Name 	}
456*5113495bSYour Name 
457*5113495bSYour Name 	return status;
458*5113495bSYour Name }
459*5113495bSYour Name 
460*5113495bSYour Name /**
461*5113495bSYour Name  * ol_tx_hl_base() - send tx frames for a HL system.
462*5113495bSYour Name  * @vdev: the virtual device sending the data
463*5113495bSYour Name  * @tx_spec: indicate what non-standard transmission actions to apply
464*5113495bSYour Name  * @msdu_list: the tx frames to send
465*5113495bSYour Name  * @tx_comp_req: tx completion req
466*5113495bSYour Name  * @call_sched: will schedule the tx if true
467*5113495bSYour Name  *
468*5113495bSYour Name  * Return: NULL if all MSDUs are accepted
469*5113495bSYour Name  */
470*5113495bSYour Name static inline qdf_nbuf_t
ol_tx_hl_base(ol_txrx_vdev_handle vdev,enum ol_tx_spec tx_spec,qdf_nbuf_t msdu_list,int tx_comp_req,bool call_sched)471*5113495bSYour Name ol_tx_hl_base(
472*5113495bSYour Name 	ol_txrx_vdev_handle vdev,
473*5113495bSYour Name 	enum ol_tx_spec tx_spec,
474*5113495bSYour Name 	qdf_nbuf_t msdu_list,
475*5113495bSYour Name 	int tx_comp_req,
476*5113495bSYour Name 	bool call_sched)
477*5113495bSYour Name {
478*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
479*5113495bSYour Name 	qdf_nbuf_t msdu = msdu_list;
480*5113495bSYour Name 	struct ol_txrx_msdu_info_t tx_msdu_info;
481*5113495bSYour Name 	struct ocb_tx_ctrl_hdr_t tx_ctrl;
482*5113495bSYour Name 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
483*5113495bSYour Name 
484*5113495bSYour Name 	tx_msdu_info.tso_info.is_tso = 0;
485*5113495bSYour Name 
486*5113495bSYour Name 	/*
487*5113495bSYour Name 	 * The msdu_list variable could be used instead of the msdu var,
488*5113495bSYour Name 	 * but just to clarify which operations are done on a single MSDU
489*5113495bSYour Name 	 * vs. a list of MSDUs, use a distinct variable for single MSDUs
490*5113495bSYour Name 	 * within the list.
491*5113495bSYour Name 	 */
492*5113495bSYour Name 	while (msdu) {
493*5113495bSYour Name 		qdf_nbuf_t next;
494*5113495bSYour Name 		struct ol_tx_frms_queue_t *txq;
495*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc = NULL;
496*5113495bSYour Name 
497*5113495bSYour Name 		qdf_mem_zero(&tx_ctrl, sizeof(tx_ctrl));
498*5113495bSYour Name 		tx_msdu_info.peer = NULL;
499*5113495bSYour Name 		/*
500*5113495bSYour Name 		 * The netbuf will get stored into a (peer-TID) tx queue list
501*5113495bSYour Name 		 * inside the ol_tx_classify_store function or else dropped,
502*5113495bSYour Name 		 * so store the next pointer immediately.
503*5113495bSYour Name 		 */
504*5113495bSYour Name 		next = qdf_nbuf_next(msdu);
505*5113495bSYour Name 
506*5113495bSYour Name 		tx_desc = ol_tx_hl_desc_alloc(pdev, vdev, msdu, &tx_msdu_info);
507*5113495bSYour Name 
508*5113495bSYour Name 		if (!tx_desc) {
509*5113495bSYour Name 			/*
510*5113495bSYour Name 			 * If we're out of tx descs, there's no need to try
511*5113495bSYour Name 			 * to allocate tx descs for the remaining MSDUs.
512*5113495bSYour Name 			 */
513*5113495bSYour Name 			TXRX_STATS_MSDU_LIST_INCR(pdev, tx.dropped.host_reject,
514*5113495bSYour Name 						  msdu);
515*5113495bSYour Name 			return msdu; /* the list of unaccepted MSDUs */
516*5113495bSYour Name 		}
517*5113495bSYour Name 
518*5113495bSYour Name 		/* OL_TXRX_PROT_AN_LOG(pdev->prot_an_tx_sent, msdu);*/
519*5113495bSYour Name 
520*5113495bSYour Name 		qdf_dp_trace_log_pkt(vdev->vdev_id, msdu, QDF_TX,
521*5113495bSYour Name 				     QDF_TRACE_DEFAULT_PDEV_ID,
522*5113495bSYour Name 				     vdev->qdf_opmode);
523*5113495bSYour Name 		DPTRACE(qdf_dp_trace_data_pkt(msdu, QDF_TRACE_DEFAULT_PDEV_ID,
524*5113495bSYour Name 					      QDF_DP_TRACE_TX_PACKET_RECORD,
525*5113495bSYour Name 					      tx_desc->id, QDF_TX));
526*5113495bSYour Name 
527*5113495bSYour Name 		if (tx_spec != OL_TX_SPEC_STD) {
528*5113495bSYour Name #if defined(FEATURE_WLAN_TDLS)
529*5113495bSYour Name 			if (tx_spec & OL_TX_SPEC_NO_FREE) {
530*5113495bSYour Name 				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
531*5113495bSYour Name 			} else if (tx_spec & OL_TX_SPEC_TSO) {
532*5113495bSYour Name #else
533*5113495bSYour Name 				if (tx_spec & OL_TX_SPEC_TSO) {
534*5113495bSYour Name #endif
535*5113495bSYour Name 					tx_desc->pkt_type = OL_TX_FRM_TSO;
536*5113495bSYour Name 				}
537*5113495bSYour Name 				if (ol_txrx_tx_is_raw(tx_spec)) {
538*5113495bSYour Name 					/* CHECK THIS: does this need
539*5113495bSYour Name 					 * to happen after htt_tx_desc_init?
540*5113495bSYour Name 					 */
541*5113495bSYour Name 					/* different types of raw frames */
542*5113495bSYour Name 					u_int8_t sub_type =
543*5113495bSYour Name 						ol_txrx_tx_raw_subtype(
544*5113495bSYour Name 								tx_spec);
545*5113495bSYour Name 					htt_tx_desc_type(htt_pdev,
546*5113495bSYour Name 							 tx_desc->htt_tx_desc,
547*5113495bSYour Name 							 htt_pkt_type_raw,
548*5113495bSYour Name 							 sub_type);
549*5113495bSYour Name 				}
550*5113495bSYour Name 			}
551*5113495bSYour Name 
552*5113495bSYour Name 			tx_msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
553*5113495bSYour Name 			tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
554*5113495bSYour Name 			tx_msdu_info.htt.info.frame_type = htt_frm_type_data;
555*5113495bSYour Name 			tx_msdu_info.htt.info.l2_hdr_type = pdev->htt_pkt_type;
556*5113495bSYour Name 
557*5113495bSYour Name 			if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(msdu)
558*5113495bSYour Name 									== 1) {
559*5113495bSYour Name 				tx_msdu_info.htt.action.tx_comp_req = 1;
560*5113495bSYour Name 				tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
561*5113495bSYour Name 			} else {
562*5113495bSYour Name 				tx_msdu_info.htt.action.tx_comp_req =
563*5113495bSYour Name 								tx_comp_req;
564*5113495bSYour Name 			}
565*5113495bSYour Name 
566*5113495bSYour Name 			/* If the vdev is in OCB mode,
567*5113495bSYour Name 			 * parse the tx control header.
568*5113495bSYour Name 			 */
569*5113495bSYour Name 			if (vdev->opmode == wlan_op_mode_ocb) {
570*5113495bSYour Name 				if (!parse_ocb_tx_header(msdu, &tx_ctrl)) {
571*5113495bSYour Name 					/* There was an error parsing
572*5113495bSYour Name 					 * the header.Skip this packet.
573*5113495bSYour Name 					 */
574*5113495bSYour Name 					goto MSDU_LOOP_BOTTOM;
575*5113495bSYour Name 				}
576*5113495bSYour Name 			}
577*5113495bSYour Name 
578*5113495bSYour Name 			txq = ol_tx_classify(vdev, tx_desc, msdu,
579*5113495bSYour Name 					     &tx_msdu_info);
580*5113495bSYour Name 
581*5113495bSYour Name 			/* initialize the HW tx descriptor */
582*5113495bSYour Name 			htt_tx_desc_init(
583*5113495bSYour Name 					pdev->htt_pdev, tx_desc->htt_tx_desc,
584*5113495bSYour Name 					tx_desc->htt_tx_desc_paddr,
585*5113495bSYour Name 					ol_tx_desc_id(pdev, tx_desc),
586*5113495bSYour Name 					msdu,
587*5113495bSYour Name 					&tx_msdu_info.htt,
588*5113495bSYour Name 					&tx_msdu_info.tso_info,
589*5113495bSYour Name 					&tx_ctrl,
590*5113495bSYour Name 					vdev->opmode == wlan_op_mode_ocb);
591*5113495bSYour Name 
592*5113495bSYour Name 			if ((!txq) || TX_FILTER_CHECK(&tx_msdu_info)) {
593*5113495bSYour Name 				/* drop this frame,
594*5113495bSYour Name 				 * but try sending subsequent frames
595*5113495bSYour Name 				 */
596*5113495bSYour Name 				/* TXRX_STATS_MSDU_LIST_INCR(pdev,
597*5113495bSYour Name 				 * tx.dropped.no_txq, msdu);
598*5113495bSYour Name 				 */
599*5113495bSYour Name 				qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
600*5113495bSYour Name 				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
601*5113495bSYour Name 				if (tx_msdu_info.peer) {
602*5113495bSYour Name 					/* remove the peer reference
603*5113495bSYour Name 					 * added above
604*5113495bSYour Name 					 */
605*5113495bSYour Name 					ol_txrx_peer_release_ref(
606*5113495bSYour Name 						tx_msdu_info.peer,
607*5113495bSYour Name 						PEER_DEBUG_ID_OL_INTERNAL);
608*5113495bSYour Name 				}
609*5113495bSYour Name 				goto MSDU_LOOP_BOTTOM;
610*5113495bSYour Name 			}
611*5113495bSYour Name 
612*5113495bSYour Name 			if (tx_msdu_info.peer) {
613*5113495bSYour Name 				/*
614*5113495bSYour Name 				 * If the state is not associated then drop all
615*5113495bSYour Name 				 * the data packets received for that peer
616*5113495bSYour Name 				 */
617*5113495bSYour Name 				if (tx_msdu_info.peer->state ==
618*5113495bSYour Name 						OL_TXRX_PEER_STATE_DISC) {
619*5113495bSYour Name 					qdf_atomic_inc(
620*5113495bSYour Name 						&pdev->tx_queue.rsrc_cnt);
621*5113495bSYour Name 					ol_tx_desc_frame_free_nonstd(pdev,
622*5113495bSYour Name 								     tx_desc,
623*5113495bSYour Name 								     1);
624*5113495bSYour Name 					ol_txrx_peer_release_ref(
625*5113495bSYour Name 						tx_msdu_info.peer,
626*5113495bSYour Name 						PEER_DEBUG_ID_OL_INTERNAL);
627*5113495bSYour Name 					msdu = next;
628*5113495bSYour Name 					continue;
629*5113495bSYour Name 				} else if (tx_msdu_info.peer->state !=
630*5113495bSYour Name 						OL_TXRX_PEER_STATE_AUTH) {
631*5113495bSYour Name 					if (tx_msdu_info.htt.info.ethertype !=
632*5113495bSYour Name 						ETHERTYPE_PAE &&
633*5113495bSYour Name 						tx_msdu_info.htt.info.ethertype
634*5113495bSYour Name 							!= ETHERTYPE_WAI) {
635*5113495bSYour Name 						qdf_atomic_inc(
636*5113495bSYour Name 							&pdev->tx_queue.
637*5113495bSYour Name 								rsrc_cnt);
638*5113495bSYour Name 						ol_tx_desc_frame_free_nonstd(
639*5113495bSYour Name 								pdev,
640*5113495bSYour Name 								tx_desc, 1);
641*5113495bSYour Name 						ol_txrx_peer_release_ref(
642*5113495bSYour Name 						 tx_msdu_info.peer,
643*5113495bSYour Name 						 PEER_DEBUG_ID_OL_INTERNAL);
644*5113495bSYour Name 						msdu = next;
645*5113495bSYour Name 						continue;
646*5113495bSYour Name 					}
647*5113495bSYour Name 				}
648*5113495bSYour Name 			}
649*5113495bSYour Name 			/*
650*5113495bSYour Name 			 * Initialize the HTT tx desc l2 header offset field.
651*5113495bSYour Name 			 * htt_tx_desc_mpdu_header  needs to be called to
652*5113495bSYour Name 			 * make sure, the l2 header size is initialized
653*5113495bSYour Name 			 * correctly to handle cases where TX ENCAP is disabled
654*5113495bSYour Name 			 * or Tx Encap fails to perform Encap
655*5113495bSYour Name 			 */
656*5113495bSYour Name 			htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc, 0);
657*5113495bSYour Name 
658*5113495bSYour Name 			/*
659*5113495bSYour Name 			 * Note: when the driver is built without support for
660*5113495bSYour Name 			 * SW tx encap,the following macro is a no-op.
661*5113495bSYour Name 			 * When the driver is built with support for SW tx
662*5113495bSYour Name 			 * encap, it performs encap, and if an error is
663*5113495bSYour Name 			 * encountered, jumps to the MSDU_LOOP_BOTTOM label.
664*5113495bSYour Name 			 */
665*5113495bSYour Name 			if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
666*5113495bSYour Name 						&tx_msdu_info))
667*5113495bSYour Name 				goto MSDU_LOOP_BOTTOM;
668*5113495bSYour Name 
669*5113495bSYour Name 			/*
670*5113495bSYour Name 			 * If debug display is enabled, show the meta-data
671*5113495bSYour Name 			 * being downloaded to the target via the
672*5113495bSYour Name 			 * HTT tx descriptor.
673*5113495bSYour Name 			 */
674*5113495bSYour Name 			htt_tx_desc_display(tx_desc->htt_tx_desc);
675*5113495bSYour Name 
676*5113495bSYour Name 			ol_tx_enqueue(pdev, txq, tx_desc, &tx_msdu_info);
677*5113495bSYour Name 			if (tx_msdu_info.peer) {
678*5113495bSYour Name 				OL_TX_PEER_STATS_UPDATE(tx_msdu_info.peer,
679*5113495bSYour Name 							msdu);
680*5113495bSYour Name 				/* remove the peer reference added above */
681*5113495bSYour Name 				ol_txrx_peer_release_ref
682*5113495bSYour Name 						(tx_msdu_info.peer,
683*5113495bSYour Name 						 PEER_DEBUG_ID_OL_INTERNAL);
684*5113495bSYour Name 			}
685*5113495bSYour Name MSDU_LOOP_BOTTOM:
686*5113495bSYour Name 			msdu = next;
687*5113495bSYour Name 		}
688*5113495bSYour Name 
689*5113495bSYour Name 		if (call_sched)
690*5113495bSYour Name 			ol_tx_sched(pdev);
691*5113495bSYour Name 		return NULL; /* all MSDUs were accepted */
692*5113495bSYour Name }
693*5113495bSYour Name 
694*5113495bSYour Name #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
695*5113495bSYour Name 
696*5113495bSYour Name /**
697*5113495bSYour Name  * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
698*5113495bSYour Name  * @soc_hdl: soc handle
699*5113495bSYour Name  * @pdev_id: datapath pdev identifier
700*5113495bSYour Name  *
701*5113495bSYour Name  * Return: none
702*5113495bSYour Name  */
703*5113495bSYour Name void
704*5113495bSYour Name ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
705*5113495bSYour Name {
706*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
707*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
708*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
709*5113495bSYour Name 
710*5113495bSYour Name 	if (!pdev)
711*5113495bSYour Name 		return;
712*5113495bSYour Name 
713*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
714*5113495bSYour Name 		vdev->driver_del_ack_enabled = false;
715*5113495bSYour Name 
716*5113495bSYour Name 		dp_debug("vdev_id %d driver_del_ack_enabled %d",
717*5113495bSYour Name 			 vdev->vdev_id, vdev->driver_del_ack_enabled);
718*5113495bSYour Name 	}
719*5113495bSYour Name }
720*5113495bSYour Name 
721*5113495bSYour Name /**
722*5113495bSYour Name  * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
723*5113495bSYour Name  * @soc_hdl: datapath soc handle
724*5113495bSYour Name  * @vdev_id: vdev id
725*5113495bSYour Name  * @rx_packets: number of rx packets
726*5113495bSYour Name  * @time_in_ms: time in ms
727*5113495bSYour Name  * @high_th: high threshold
728*5113495bSYour Name  * @low_th: low threshold
729*5113495bSYour Name  *
730*5113495bSYour Name  * Return: none
731*5113495bSYour Name  */
732*5113495bSYour Name void
733*5113495bSYour Name ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
734*5113495bSYour Name 				     uint8_t vdev_id,
735*5113495bSYour Name 				     unsigned long rx_packets,
736*5113495bSYour Name 				     uint32_t time_in_ms,
737*5113495bSYour Name 				     uint32_t high_th,
738*5113495bSYour Name 				     uint32_t low_th)
739*5113495bSYour Name {
740*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
741*5113495bSYour Name 			(struct ol_txrx_vdev_t *)
742*5113495bSYour Name 			ol_txrx_get_vdev_from_vdev_id(vdev_id);
743*5113495bSYour Name 	bool old_driver_del_ack_enabled;
744*5113495bSYour Name 
745*5113495bSYour Name 	if ((!vdev) || (low_th > high_th))
746*5113495bSYour Name 		return;
747*5113495bSYour Name 
748*5113495bSYour Name 	old_driver_del_ack_enabled = vdev->driver_del_ack_enabled;
749*5113495bSYour Name 	if (rx_packets > high_th)
750*5113495bSYour Name 		vdev->driver_del_ack_enabled = true;
751*5113495bSYour Name 	else if (rx_packets < low_th)
752*5113495bSYour Name 		vdev->driver_del_ack_enabled = false;
753*5113495bSYour Name 
754*5113495bSYour Name 	if (old_driver_del_ack_enabled != vdev->driver_del_ack_enabled) {
755*5113495bSYour Name 		dp_debug("vdev_id %d driver_del_ack_enabled %d rx_packets %ld time_in_ms %d high_th %d low_th %d",
756*5113495bSYour Name 			 vdev->vdev_id, vdev->driver_del_ack_enabled,
757*5113495bSYour Name 			 rx_packets, time_in_ms, high_th, low_th);
758*5113495bSYour Name 	}
759*5113495bSYour Name }
760*5113495bSYour Name 
761*5113495bSYour Name /**
762*5113495bSYour Name  * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
763*5113495bSYour Name  * @vdev: vdev handle
764*5113495bSYour Name  *
765*5113495bSYour Name  * Return: none
766*5113495bSYour Name  */
767*5113495bSYour Name void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev)
768*5113495bSYour Name {
769*5113495bSYour Name 	int i;
770*5113495bSYour Name 	struct tcp_stream_node *tcp_node_list;
771*5113495bSYour Name 	struct tcp_stream_node *temp;
772*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
773*5113495bSYour Name 
774*5113495bSYour Name 	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
775*5113495bSYour Name 		tcp_node_list = NULL;
776*5113495bSYour Name 		qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
777*5113495bSYour Name 		if (vdev->tcp_ack_hash.node[i].no_of_entries)
778*5113495bSYour Name 			tcp_node_list = vdev->tcp_ack_hash.node[i].head;
779*5113495bSYour Name 
780*5113495bSYour Name 		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
781*5113495bSYour Name 		vdev->tcp_ack_hash.node[i].head = NULL;
782*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
783*5113495bSYour Name 
784*5113495bSYour Name 		/* Send all packets */
785*5113495bSYour Name 		while (tcp_node_list) {
786*5113495bSYour Name 			int tx_comp_req = pdev->cfg.default_tx_comp_req ||
787*5113495bSYour Name 						pdev->cfg.request_tx_comp;
788*5113495bSYour Name 			qdf_nbuf_t msdu_list;
789*5113495bSYour Name 
790*5113495bSYour Name 			temp = tcp_node_list;
791*5113495bSYour Name 			tcp_node_list = temp->next;
792*5113495bSYour Name 
793*5113495bSYour Name 			msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
794*5113495bSYour Name 						  temp->head,
795*5113495bSYour Name 						  tx_comp_req, false);
796*5113495bSYour Name 			if (msdu_list)
797*5113495bSYour Name 				qdf_nbuf_tx_free(msdu_list, 1/*error*/);
798*5113495bSYour Name 			ol_txrx_vdev_free_tcp_node(vdev, temp);
799*5113495bSYour Name 		}
800*5113495bSYour Name 	}
801*5113495bSYour Name 	ol_tx_sched(vdev->pdev);
802*5113495bSYour Name }
803*5113495bSYour Name 
804*5113495bSYour Name /**
805*5113495bSYour Name  * tcp_del_ack_tasklet() - tasklet function to send ack packets
806*5113495bSYour Name  * @data: vdev handle
807*5113495bSYour Name  *
808*5113495bSYour Name  * Return: none
809*5113495bSYour Name  */
810*5113495bSYour Name void tcp_del_ack_tasklet(void *data)
811*5113495bSYour Name {
812*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = data;
813*5113495bSYour Name 
814*5113495bSYour Name 	ol_tx_hl_send_all_tcp_ack(vdev);
815*5113495bSYour Name }
816*5113495bSYour Name 
817*5113495bSYour Name /**
818*5113495bSYour Name  * ol_tx_get_stream_id() - get stream_id from packet info
819*5113495bSYour Name  * @info: packet info
820*5113495bSYour Name  *
821*5113495bSYour Name  * Return: stream_id
822*5113495bSYour Name  */
823*5113495bSYour Name uint16_t ol_tx_get_stream_id(struct packet_info *info)
824*5113495bSYour Name {
825*5113495bSYour Name 	return ((info->dst_port + info->dst_ip + info->src_port + info->src_ip)
826*5113495bSYour Name 					 & (OL_TX_HL_DEL_ACK_HASH_SIZE - 1));
827*5113495bSYour Name }
828*5113495bSYour Name 
829*5113495bSYour Name /**
830*5113495bSYour Name  * ol_tx_is_tcp_ack() - check whether the packet is tcp ack frame
831*5113495bSYour Name  * @msdu: packet
832*5113495bSYour Name  *
833*5113495bSYour Name  * Return: true if the packet is tcp ack frame
834*5113495bSYour Name  */
835*5113495bSYour Name static bool
836*5113495bSYour Name ol_tx_is_tcp_ack(qdf_nbuf_t msdu)
837*5113495bSYour Name {
838*5113495bSYour Name 	uint16_t ether_type;
839*5113495bSYour Name 	uint8_t  protocol;
840*5113495bSYour Name 	uint8_t  flag, ip_header_len, tcp_header_len;
841*5113495bSYour Name 	uint32_t seg_len;
842*5113495bSYour Name 	uint8_t  *skb_data;
843*5113495bSYour Name 	uint32_t skb_len;
844*5113495bSYour Name 	bool tcp_acked = false;
845*5113495bSYour Name 	uint32_t tcp_header_off;
846*5113495bSYour Name 
847*5113495bSYour Name 	qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
848*5113495bSYour Name 	if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
849*5113495bSYour Name 	    QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
850*5113495bSYour Name 	    QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
851*5113495bSYour Name 		goto exit;
852*5113495bSYour Name 
853*5113495bSYour Name 	ether_type = (uint16_t)(*(uint16_t *)
854*5113495bSYour Name 			(skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
855*5113495bSYour Name 	protocol = (uint16_t)(*(uint16_t *)
856*5113495bSYour Name 			(skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
857*5113495bSYour Name 
858*5113495bSYour Name 	if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
859*5113495bSYour Name 	    (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
860*5113495bSYour Name 		ip_header_len = ((uint8_t)(*(uint8_t *)
861*5113495bSYour Name 				(skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
862*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
863*5113495bSYour Name 		tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
864*5113495bSYour Name 
865*5113495bSYour Name 		tcp_header_len = ((uint8_t)(*(uint8_t *)
866*5113495bSYour Name 			(skb_data + tcp_header_off +
867*5113495bSYour Name 			QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
868*5113495bSYour Name 		seg_len = skb_len - tcp_header_len - tcp_header_off;
869*5113495bSYour Name 		flag = (uint8_t)(*(uint8_t *)
870*5113495bSYour Name 			(skb_data + tcp_header_off +
871*5113495bSYour Name 			QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
872*5113495bSYour Name 
873*5113495bSYour Name 		if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0))
874*5113495bSYour Name 			tcp_acked = true;
875*5113495bSYour Name 	}
876*5113495bSYour Name 
877*5113495bSYour Name exit:
878*5113495bSYour Name 
879*5113495bSYour Name 	return tcp_acked;
880*5113495bSYour Name }
881*5113495bSYour Name 
882*5113495bSYour Name /**
883*5113495bSYour Name  * ol_tx_get_packet_info() - update packet info for passed msdu
884*5113495bSYour Name  * @msdu: packet
885*5113495bSYour Name  * @info: packet info
886*5113495bSYour Name  *
887*5113495bSYour Name  * Return: none
888*5113495bSYour Name  */
889*5113495bSYour Name void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info)
890*5113495bSYour Name {
891*5113495bSYour Name 	uint16_t ether_type;
892*5113495bSYour Name 	uint8_t  protocol;
893*5113495bSYour Name 	uint8_t  flag, ip_header_len, tcp_header_len;
894*5113495bSYour Name 	uint32_t seg_len;
895*5113495bSYour Name 	uint8_t  *skb_data;
896*5113495bSYour Name 	uint32_t skb_len;
897*5113495bSYour Name 	uint32_t tcp_header_off;
898*5113495bSYour Name 
899*5113495bSYour Name 	info->type = NO_TCP_PKT;
900*5113495bSYour Name 
901*5113495bSYour Name 	qdf_nbuf_peek_header(msdu, &skb_data, &skb_len);
902*5113495bSYour Name 	if (skb_len < (QDF_NBUF_TRAC_IPV4_OFFSET +
903*5113495bSYour Name 	    QDF_NBUF_TRAC_IPV4_HEADER_SIZE +
904*5113495bSYour Name 	    QDF_NBUF_TRAC_TCP_FLAGS_OFFSET))
905*5113495bSYour Name 		return;
906*5113495bSYour Name 
907*5113495bSYour Name 	ether_type = (uint16_t)(*(uint16_t *)
908*5113495bSYour Name 			(skb_data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
909*5113495bSYour Name 	protocol = (uint16_t)(*(uint16_t *)
910*5113495bSYour Name 			(skb_data + QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
911*5113495bSYour Name 
912*5113495bSYour Name 	if ((QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE) == ether_type) &&
913*5113495bSYour Name 	    (protocol == QDF_NBUF_TRAC_TCP_TYPE)) {
914*5113495bSYour Name 		ip_header_len = ((uint8_t)(*(uint8_t *)
915*5113495bSYour Name 				(skb_data + QDF_NBUF_TRAC_IPV4_OFFSET)) &
916*5113495bSYour Name 				QDF_NBUF_TRAC_IPV4_HEADER_MASK) << 2;
917*5113495bSYour Name 		tcp_header_off = QDF_NBUF_TRAC_IPV4_OFFSET + ip_header_len;
918*5113495bSYour Name 
919*5113495bSYour Name 		tcp_header_len = ((uint8_t)(*(uint8_t *)
920*5113495bSYour Name 			(skb_data + tcp_header_off +
921*5113495bSYour Name 			QDF_NBUF_TRAC_TCP_HEADER_LEN_OFFSET))) >> 2;
922*5113495bSYour Name 		seg_len = skb_len - tcp_header_len - tcp_header_off;
923*5113495bSYour Name 		flag = (uint8_t)(*(uint8_t *)
924*5113495bSYour Name 			(skb_data + tcp_header_off +
925*5113495bSYour Name 			QDF_NBUF_TRAC_TCP_FLAGS_OFFSET));
926*5113495bSYour Name 
927*5113495bSYour Name 		info->src_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
928*5113495bSYour Name 			(skb_data + QDF_NBUF_TRAC_IPV4_SRC_ADDR_OFFSET)));
929*5113495bSYour Name 		info->dst_ip = QDF_SWAP_U32((uint32_t)(*(uint32_t *)
930*5113495bSYour Name 			(skb_data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET)));
931*5113495bSYour Name 		info->src_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
932*5113495bSYour Name 				(skb_data + tcp_header_off +
933*5113495bSYour Name 				QDF_NBUF_TRAC_TCP_SPORT_OFFSET)));
934*5113495bSYour Name 		info->dst_port = QDF_SWAP_U16((uint16_t)(*(uint16_t *)
935*5113495bSYour Name 				(skb_data + tcp_header_off +
936*5113495bSYour Name 				QDF_NBUF_TRAC_TCP_DPORT_OFFSET)));
937*5113495bSYour Name 		info->stream_id = ol_tx_get_stream_id(info);
938*5113495bSYour Name 
939*5113495bSYour Name 		if ((flag == QDF_NBUF_TRAC_TCP_ACK_MASK) && (seg_len == 0)) {
940*5113495bSYour Name 			info->type = TCP_PKT_ACK;
941*5113495bSYour Name 			info->ack_number = (uint32_t)(*(uint32_t *)
942*5113495bSYour Name 				(skb_data + tcp_header_off +
943*5113495bSYour Name 				QDF_NBUF_TRAC_TCP_ACK_OFFSET));
944*5113495bSYour Name 			info->ack_number = QDF_SWAP_U32(info->ack_number);
945*5113495bSYour Name 		} else {
946*5113495bSYour Name 			info->type = TCP_PKT_NO_ACK;
947*5113495bSYour Name 		}
948*5113495bSYour Name 	}
949*5113495bSYour Name }
950*5113495bSYour Name 
951*5113495bSYour Name /**
952*5113495bSYour Name  * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
953*5113495bSYour Name  *                                       stream info
954*5113495bSYour Name  * @vdev: vdev handle
955*5113495bSYour Name  * @info: packet info
956*5113495bSYour Name  *
957*5113495bSYour Name  * Return: none
958*5113495bSYour Name  */
959*5113495bSYour Name void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
960*5113495bSYour Name 				       struct packet_info *info)
961*5113495bSYour Name {
962*5113495bSYour Name 	uint8_t no_of_entries;
963*5113495bSYour Name 	struct tcp_stream_node *node_to_be_remove = NULL;
964*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
965*5113495bSYour Name 
966*5113495bSYour Name 	/* remove tcp node from hash */
967*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
968*5113495bSYour Name 			hash_node_lock);
969*5113495bSYour Name 
970*5113495bSYour Name 	no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].
971*5113495bSYour Name 			no_of_entries;
972*5113495bSYour Name 	if (no_of_entries > 1) {
973*5113495bSYour Name 		/* collision case */
974*5113495bSYour Name 		struct tcp_stream_node *head =
975*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].head;
976*5113495bSYour Name 		struct tcp_stream_node *temp;
977*5113495bSYour Name 
978*5113495bSYour Name 		if ((head->dst_ip == info->dst_ip) &&
979*5113495bSYour Name 		    (head->src_ip == info->src_ip) &&
980*5113495bSYour Name 		    (head->src_port == info->src_port) &&
981*5113495bSYour Name 		    (head->dst_port == info->dst_port)) {
982*5113495bSYour Name 			node_to_be_remove = head;
983*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].head =
984*5113495bSYour Name 				head->next;
985*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].
986*5113495bSYour Name 				no_of_entries--;
987*5113495bSYour Name 		} else {
988*5113495bSYour Name 			temp = head;
989*5113495bSYour Name 			while (temp->next) {
990*5113495bSYour Name 				if ((temp->next->dst_ip == info->dst_ip) &&
991*5113495bSYour Name 				    (temp->next->src_ip == info->src_ip) &&
992*5113495bSYour Name 				    (temp->next->src_port == info->src_port) &&
993*5113495bSYour Name 				    (temp->next->dst_port == info->dst_port)) {
994*5113495bSYour Name 					node_to_be_remove = temp->next;
995*5113495bSYour Name 					temp->next = temp->next->next;
996*5113495bSYour Name 					vdev->tcp_ack_hash.
997*5113495bSYour Name 						node[info->stream_id].
998*5113495bSYour Name 						no_of_entries--;
999*5113495bSYour Name 					break;
1000*5113495bSYour Name 				}
1001*5113495bSYour Name 				temp = temp->next;
1002*5113495bSYour Name 			}
1003*5113495bSYour Name 		}
1004*5113495bSYour Name 	} else if (no_of_entries == 1) {
1005*5113495bSYour Name 		/* Only one tcp_node */
1006*5113495bSYour Name 		node_to_be_remove =
1007*5113495bSYour Name 			 vdev->tcp_ack_hash.node[info->stream_id].head;
1008*5113495bSYour Name 		vdev->tcp_ack_hash.node[info->stream_id].head = NULL;
1009*5113495bSYour Name 		vdev->tcp_ack_hash.node[info->stream_id].no_of_entries = 0;
1010*5113495bSYour Name 	}
1011*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
1012*5113495bSYour Name 			  node[info->stream_id].hash_node_lock);
1013*5113495bSYour Name 
1014*5113495bSYour Name 	/* send packets */
1015*5113495bSYour Name 	if (node_to_be_remove) {
1016*5113495bSYour Name 		int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1017*5113495bSYour Name 					pdev->cfg.request_tx_comp;
1018*5113495bSYour Name 		qdf_nbuf_t msdu_list;
1019*5113495bSYour Name 
1020*5113495bSYour Name 		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1021*5113495bSYour Name 					  node_to_be_remove->head,
1022*5113495bSYour Name 					  tx_comp_req, true);
1023*5113495bSYour Name 		if (msdu_list)
1024*5113495bSYour Name 			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1025*5113495bSYour Name 		ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
1026*5113495bSYour Name 	}
1027*5113495bSYour Name }
1028*5113495bSYour Name 
1029*5113495bSYour Name static struct tcp_stream_node *
1030*5113495bSYour Name ol_tx_hl_rep_tcp_ack(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu,
1031*5113495bSYour Name 		     struct packet_info *info, bool *is_found,
1032*5113495bSYour Name 		     bool *start_timer)
1033*5113495bSYour Name {
1034*5113495bSYour Name 	struct tcp_stream_node *node_to_be_remove = NULL;
1035*5113495bSYour Name 	struct tcp_stream_node *head =
1036*5113495bSYour Name 		 vdev->tcp_ack_hash.node[info->stream_id].head;
1037*5113495bSYour Name 	struct tcp_stream_node *temp;
1038*5113495bSYour Name 
1039*5113495bSYour Name 	if ((head->dst_ip == info->dst_ip) &&
1040*5113495bSYour Name 	    (head->src_ip == info->src_ip) &&
1041*5113495bSYour Name 	    (head->src_port == info->src_port) &&
1042*5113495bSYour Name 	    (head->dst_port == info->dst_port)) {
1043*5113495bSYour Name 		*is_found = true;
1044*5113495bSYour Name 		if ((head->ack_number < info->ack_number) &&
1045*5113495bSYour Name 		    (head->no_of_ack_replaced <
1046*5113495bSYour Name 		    ol_cfg_get_del_ack_count_value(vdev->pdev->ctrl_pdev))) {
1047*5113495bSYour Name 			/* replace ack packet */
1048*5113495bSYour Name 			qdf_nbuf_tx_free(head->head, 1);
1049*5113495bSYour Name 			head->head = msdu;
1050*5113495bSYour Name 			head->ack_number = info->ack_number;
1051*5113495bSYour Name 			head->no_of_ack_replaced++;
1052*5113495bSYour Name 			*start_timer = true;
1053*5113495bSYour Name 
1054*5113495bSYour Name 			vdev->no_of_tcpack_replaced++;
1055*5113495bSYour Name 
1056*5113495bSYour Name 			if (head->no_of_ack_replaced ==
1057*5113495bSYour Name 			    ol_cfg_get_del_ack_count_value(
1058*5113495bSYour Name 			    vdev->pdev->ctrl_pdev)) {
1059*5113495bSYour Name 				node_to_be_remove = head;
1060*5113495bSYour Name 				vdev->tcp_ack_hash.node[info->stream_id].head =
1061*5113495bSYour Name 					head->next;
1062*5113495bSYour Name 				vdev->tcp_ack_hash.node[info->stream_id].
1063*5113495bSYour Name 					no_of_entries--;
1064*5113495bSYour Name 			}
1065*5113495bSYour Name 		} else {
1066*5113495bSYour Name 			/* append and send packets */
1067*5113495bSYour Name 			head->head->next = msdu;
1068*5113495bSYour Name 			node_to_be_remove = head;
1069*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].head =
1070*5113495bSYour Name 				head->next;
1071*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].
1072*5113495bSYour Name 				no_of_entries--;
1073*5113495bSYour Name 		}
1074*5113495bSYour Name 	} else {
1075*5113495bSYour Name 		temp = head;
1076*5113495bSYour Name 		while (temp->next) {
1077*5113495bSYour Name 			if ((temp->next->dst_ip == info->dst_ip) &&
1078*5113495bSYour Name 			    (temp->next->src_ip == info->src_ip) &&
1079*5113495bSYour Name 			    (temp->next->src_port == info->src_port) &&
1080*5113495bSYour Name 			    (temp->next->dst_port == info->dst_port)) {
1081*5113495bSYour Name 				*is_found = true;
1082*5113495bSYour Name 				if ((temp->next->ack_number <
1083*5113495bSYour Name 					info->ack_number) &&
1084*5113495bSYour Name 				    (temp->next->no_of_ack_replaced <
1085*5113495bSYour Name 					 ol_cfg_get_del_ack_count_value(
1086*5113495bSYour Name 					 vdev->pdev->ctrl_pdev))) {
1087*5113495bSYour Name 					/* replace ack packet */
1088*5113495bSYour Name 					qdf_nbuf_tx_free(temp->next->head, 1);
1089*5113495bSYour Name 					temp->next->head  = msdu;
1090*5113495bSYour Name 					temp->next->ack_number =
1091*5113495bSYour Name 						info->ack_number;
1092*5113495bSYour Name 					temp->next->no_of_ack_replaced++;
1093*5113495bSYour Name 					*start_timer = true;
1094*5113495bSYour Name 
1095*5113495bSYour Name 					vdev->no_of_tcpack_replaced++;
1096*5113495bSYour Name 
1097*5113495bSYour Name 					if (temp->next->no_of_ack_replaced ==
1098*5113495bSYour Name 					   ol_cfg_get_del_ack_count_value(
1099*5113495bSYour Name 					   vdev->pdev->ctrl_pdev)) {
1100*5113495bSYour Name 						node_to_be_remove = temp->next;
1101*5113495bSYour Name 						temp->next = temp->next->next;
1102*5113495bSYour Name 						vdev->tcp_ack_hash.
1103*5113495bSYour Name 							node[info->stream_id].
1104*5113495bSYour Name 							no_of_entries--;
1105*5113495bSYour Name 					}
1106*5113495bSYour Name 				} else {
1107*5113495bSYour Name 					/* append and send packets */
1108*5113495bSYour Name 					temp->next->head->next = msdu;
1109*5113495bSYour Name 					node_to_be_remove = temp->next;
1110*5113495bSYour Name 					temp->next = temp->next->next;
1111*5113495bSYour Name 					vdev->tcp_ack_hash.
1112*5113495bSYour Name 						node[info->stream_id].
1113*5113495bSYour Name 						no_of_entries--;
1114*5113495bSYour Name 				}
1115*5113495bSYour Name 				break;
1116*5113495bSYour Name 			}
1117*5113495bSYour Name 			temp = temp->next;
1118*5113495bSYour Name 		}
1119*5113495bSYour Name 	}
1120*5113495bSYour Name 	return node_to_be_remove;
1121*5113495bSYour Name }
1122*5113495bSYour Name 
1123*5113495bSYour Name /**
1124*5113495bSYour Name  * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
1125*5113495bSYour Name  *                                       passed packet info
1126*5113495bSYour Name  * @vdev: vdev handle
1127*5113495bSYour Name  * @msdu: packet
1128*5113495bSYour Name  * @info: packet info
1129*5113495bSYour Name  *
1130*5113495bSYour Name  * Return: none
1131*5113495bSYour Name  */
1132*5113495bSYour Name void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
1133*5113495bSYour Name 				       qdf_nbuf_t msdu,
1134*5113495bSYour Name 				       struct packet_info *info)
1135*5113495bSYour Name {
1136*5113495bSYour Name 	uint8_t no_of_entries;
1137*5113495bSYour Name 	struct tcp_stream_node *node_to_be_remove = NULL;
1138*5113495bSYour Name 	bool is_found = false, start_timer = false;
1139*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1140*5113495bSYour Name 
1141*5113495bSYour Name 	/* replace ack if required or send packets */
1142*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
1143*5113495bSYour Name 			hash_node_lock);
1144*5113495bSYour Name 
1145*5113495bSYour Name 	no_of_entries = vdev->tcp_ack_hash.node[info->stream_id].no_of_entries;
1146*5113495bSYour Name 	if (no_of_entries > 0) {
1147*5113495bSYour Name 		node_to_be_remove = ol_tx_hl_rep_tcp_ack(vdev, msdu, info,
1148*5113495bSYour Name 							 &is_found,
1149*5113495bSYour Name 							 &start_timer);
1150*5113495bSYour Name 	}
1151*5113495bSYour Name 
1152*5113495bSYour Name 	if (no_of_entries == 0 || !is_found) {
1153*5113495bSYour Name 		/* Alloc new tcp node */
1154*5113495bSYour Name 		struct tcp_stream_node *new_node;
1155*5113495bSYour Name 
1156*5113495bSYour Name 		new_node = ol_txrx_vdev_alloc_tcp_node(vdev);
1157*5113495bSYour Name 		if (!new_node) {
1158*5113495bSYour Name 			qdf_spin_unlock_bh(&vdev->tcp_ack_hash.
1159*5113495bSYour Name 					  node[info->stream_id].hash_node_lock);
1160*5113495bSYour Name 			dp_alert("Malloc failed");
1161*5113495bSYour Name 			return;
1162*5113495bSYour Name 		}
1163*5113495bSYour Name 		new_node->stream_id = info->stream_id;
1164*5113495bSYour Name 		new_node->dst_ip = info->dst_ip;
1165*5113495bSYour Name 		new_node->src_ip = info->src_ip;
1166*5113495bSYour Name 		new_node->dst_port = info->dst_port;
1167*5113495bSYour Name 		new_node->src_port = info->src_port;
1168*5113495bSYour Name 		new_node->ack_number = info->ack_number;
1169*5113495bSYour Name 		new_node->head = msdu;
1170*5113495bSYour Name 		new_node->next = NULL;
1171*5113495bSYour Name 		new_node->no_of_ack_replaced = 0;
1172*5113495bSYour Name 
1173*5113495bSYour Name 		start_timer = true;
1174*5113495bSYour Name 		/* insert new_node */
1175*5113495bSYour Name 		if (!vdev->tcp_ack_hash.node[info->stream_id].head) {
1176*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].head =
1177*5113495bSYour Name 				new_node;
1178*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].
1179*5113495bSYour Name 				no_of_entries = 1;
1180*5113495bSYour Name 		} else {
1181*5113495bSYour Name 			struct tcp_stream_node *temp =
1182*5113495bSYour Name 				 vdev->tcp_ack_hash.node[info->stream_id].head;
1183*5113495bSYour Name 			while (temp->next)
1184*5113495bSYour Name 				temp = temp->next;
1185*5113495bSYour Name 
1186*5113495bSYour Name 			temp->next = new_node;
1187*5113495bSYour Name 			vdev->tcp_ack_hash.node[info->stream_id].
1188*5113495bSYour Name 				no_of_entries++;
1189*5113495bSYour Name 		}
1190*5113495bSYour Name 	}
1191*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[info->stream_id].
1192*5113495bSYour Name 			  hash_node_lock);
1193*5113495bSYour Name 
1194*5113495bSYour Name 	/* start timer */
1195*5113495bSYour Name 	if (start_timer &&
1196*5113495bSYour Name 	    (!qdf_atomic_read(&vdev->tcp_ack_hash.is_timer_running))) {
1197*5113495bSYour Name 		qdf_hrtimer_start(&vdev->tcp_ack_hash.timer,
1198*5113495bSYour Name 				  qdf_ns_to_ktime((
1199*5113495bSYour Name 						ol_cfg_get_del_ack_timer_value(
1200*5113495bSYour Name 						vdev->pdev->ctrl_pdev) *
1201*5113495bSYour Name 						1000000)),
1202*5113495bSYour Name 			__QDF_HRTIMER_MODE_REL);
1203*5113495bSYour Name 		qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 1);
1204*5113495bSYour Name 	}
1205*5113495bSYour Name 
1206*5113495bSYour Name 	/* send packets */
1207*5113495bSYour Name 	if (node_to_be_remove) {
1208*5113495bSYour Name 		int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1209*5113495bSYour Name 					pdev->cfg.request_tx_comp;
1210*5113495bSYour Name 		qdf_nbuf_t msdu_list = NULL;
1211*5113495bSYour Name 
1212*5113495bSYour Name 		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1213*5113495bSYour Name 					  node_to_be_remove->head,
1214*5113495bSYour Name 					  tx_comp_req, true);
1215*5113495bSYour Name 		if (msdu_list)
1216*5113495bSYour Name 			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1217*5113495bSYour Name 		ol_txrx_vdev_free_tcp_node(vdev, node_to_be_remove);
1218*5113495bSYour Name 	}
1219*5113495bSYour Name }
1220*5113495bSYour Name 
1221*5113495bSYour Name /**
1222*5113495bSYour Name  * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
1223*5113495bSYour Name  * @timer: timer handle
1224*5113495bSYour Name  *
1225*5113495bSYour Name  * Return: enum
1226*5113495bSYour Name  */
1227*5113495bSYour Name enum qdf_hrtimer_restart_status
1228*5113495bSYour Name ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer)
1229*5113495bSYour Name {
1230*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = qdf_container_of(timer,
1231*5113495bSYour Name 						       struct ol_txrx_vdev_t,
1232*5113495bSYour Name 						       tcp_ack_hash.timer);
1233*5113495bSYour Name 	enum qdf_hrtimer_restart_status ret = QDF_HRTIMER_NORESTART;
1234*5113495bSYour Name 
1235*5113495bSYour Name 	qdf_sched_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
1236*5113495bSYour Name 	qdf_atomic_set(&vdev->tcp_ack_hash.is_timer_running, 0);
1237*5113495bSYour Name 	return ret;
1238*5113495bSYour Name }
1239*5113495bSYour Name 
1240*5113495bSYour Name /**
1241*5113495bSYour Name  * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
1242*5113495bSYour Name  * @vdev: vdev handle
1243*5113495bSYour Name  *
1244*5113495bSYour Name  * Return: none
1245*5113495bSYour Name  */
1246*5113495bSYour Name void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev)
1247*5113495bSYour Name {
1248*5113495bSYour Name 	int i;
1249*5113495bSYour Name 	struct tcp_stream_node *tcp_node_list;
1250*5113495bSYour Name 	struct tcp_stream_node *temp;
1251*5113495bSYour Name 
1252*5113495bSYour Name 	qdf_hrtimer_cancel(&vdev->tcp_ack_hash.timer);
1253*5113495bSYour Name 	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
1254*5113495bSYour Name 		tcp_node_list = NULL;
1255*5113495bSYour Name 		qdf_spin_lock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1256*5113495bSYour Name 
1257*5113495bSYour Name 		if (vdev->tcp_ack_hash.node[i].no_of_entries)
1258*5113495bSYour Name 			tcp_node_list = vdev->tcp_ack_hash.node[i].head;
1259*5113495bSYour Name 
1260*5113495bSYour Name 		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
1261*5113495bSYour Name 		vdev->tcp_ack_hash.node[i].head = NULL;
1262*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1263*5113495bSYour Name 
1264*5113495bSYour Name 		/* free all packets */
1265*5113495bSYour Name 		while (tcp_node_list) {
1266*5113495bSYour Name 			temp = tcp_node_list;
1267*5113495bSYour Name 			tcp_node_list = temp->next;
1268*5113495bSYour Name 
1269*5113495bSYour Name 			qdf_nbuf_tx_free(temp->head, 1/*error*/);
1270*5113495bSYour Name 			ol_txrx_vdev_free_tcp_node(vdev, temp);
1271*5113495bSYour Name 		}
1272*5113495bSYour Name 	}
1273*5113495bSYour Name 	ol_txrx_vdev_deinit_tcp_del_ack(vdev);
1274*5113495bSYour Name }
1275*5113495bSYour Name 
1276*5113495bSYour Name /**
1277*5113495bSYour Name  * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
1278*5113495bSYour Name  * @vdev: vdev handle
1279*5113495bSYour Name  *
1280*5113495bSYour Name  * Return: none
1281*5113495bSYour Name  */
1282*5113495bSYour Name void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
1283*5113495bSYour Name {
1284*5113495bSYour Name 	int i;
1285*5113495bSYour Name 
1286*5113495bSYour Name 	vdev->driver_del_ack_enabled = false;
1287*5113495bSYour Name 
1288*5113495bSYour Name 	dp_debug("vdev-id=%u, driver_del_ack_enabled=%d",
1289*5113495bSYour Name 		 vdev->vdev_id,
1290*5113495bSYour Name 		 vdev->driver_del_ack_enabled);
1291*5113495bSYour Name 
1292*5113495bSYour Name 	vdev->no_of_tcpack = 0;
1293*5113495bSYour Name 	vdev->no_of_tcpack_replaced = 0;
1294*5113495bSYour Name 
1295*5113495bSYour Name 	qdf_hrtimer_init(&vdev->tcp_ack_hash.timer,
1296*5113495bSYour Name 			 ol_tx_hl_vdev_tcp_del_ack_timer,
1297*5113495bSYour Name 			 __QDF_CLOCK_MONOTONIC,
1298*5113495bSYour Name 			 __QDF_HRTIMER_MODE_REL,
1299*5113495bSYour Name 			 QDF_CONTEXT_HARDWARE
1300*5113495bSYour Name 			 );
1301*5113495bSYour Name 	qdf_create_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq,
1302*5113495bSYour Name 		      tcp_del_ack_tasklet,
1303*5113495bSYour Name 		      vdev);
1304*5113495bSYour Name 	qdf_atomic_init(&vdev->tcp_ack_hash.is_timer_running);
1305*5113495bSYour Name 	qdf_atomic_init(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1306*5113495bSYour Name 	qdf_spinlock_create(&vdev->tcp_ack_hash.tcp_free_list_lock);
1307*5113495bSYour Name 	vdev->tcp_ack_hash.tcp_free_list = NULL;
1308*5113495bSYour Name 	for (i = 0; i < OL_TX_HL_DEL_ACK_HASH_SIZE; i++) {
1309*5113495bSYour Name 		qdf_spinlock_create(&vdev->tcp_ack_hash.node[i].hash_node_lock);
1310*5113495bSYour Name 		vdev->tcp_ack_hash.node[i].no_of_entries = 0;
1311*5113495bSYour Name 		vdev->tcp_ack_hash.node[i].head = NULL;
1312*5113495bSYour Name 	}
1313*5113495bSYour Name }
1314*5113495bSYour Name 
1315*5113495bSYour Name /**
1316*5113495bSYour Name  * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
1317*5113495bSYour Name  * @vdev: vdev handle
1318*5113495bSYour Name  *
1319*5113495bSYour Name  * Return: none
1320*5113495bSYour Name  */
1321*5113495bSYour Name void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
1322*5113495bSYour Name {
1323*5113495bSYour Name 	struct tcp_stream_node *temp;
1324*5113495bSYour Name 
1325*5113495bSYour Name 	qdf_destroy_bh(&vdev->tcp_ack_hash.tcp_del_ack_tq);
1326*5113495bSYour Name 
1327*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1328*5113495bSYour Name 	while (vdev->tcp_ack_hash.tcp_free_list) {
1329*5113495bSYour Name 		temp = vdev->tcp_ack_hash.tcp_free_list;
1330*5113495bSYour Name 		vdev->tcp_ack_hash.tcp_free_list = temp->next;
1331*5113495bSYour Name 		qdf_mem_free(temp);
1332*5113495bSYour Name 	}
1333*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1334*5113495bSYour Name }
1335*5113495bSYour Name 
1336*5113495bSYour Name /**
1337*5113495bSYour Name  * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
1338*5113495bSYour Name  * @vdev: vdev handle
1339*5113495bSYour Name  * @node: tcp stream node
1340*5113495bSYour Name  *
1341*5113495bSYour Name  * Return: none
1342*5113495bSYour Name  */
1343*5113495bSYour Name void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
1344*5113495bSYour Name 				struct tcp_stream_node *node)
1345*5113495bSYour Name {
1346*5113495bSYour Name 	qdf_atomic_dec(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1347*5113495bSYour Name 
1348*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1349*5113495bSYour Name 	if (vdev->tcp_ack_hash.tcp_free_list) {
1350*5113495bSYour Name 		node->next = vdev->tcp_ack_hash.tcp_free_list;
1351*5113495bSYour Name 		vdev->tcp_ack_hash.tcp_free_list = node;
1352*5113495bSYour Name 	} else {
1353*5113495bSYour Name 		vdev->tcp_ack_hash.tcp_free_list = node;
1354*5113495bSYour Name 		node->next = NULL;
1355*5113495bSYour Name 	}
1356*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1357*5113495bSYour Name }
1358*5113495bSYour Name 
1359*5113495bSYour Name /**
1360*5113495bSYour Name  * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
1361*5113495bSYour Name  * @vdev: vdev handle
1362*5113495bSYour Name  *
1363*5113495bSYour Name  * Return: tcp stream node
1364*5113495bSYour Name  */
1365*5113495bSYour Name struct tcp_stream_node *ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev)
1366*5113495bSYour Name {
1367*5113495bSYour Name 	struct tcp_stream_node *node = NULL;
1368*5113495bSYour Name 
1369*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1370*5113495bSYour Name 	if (vdev->tcp_ack_hash.tcp_free_list) {
1371*5113495bSYour Name 		node = vdev->tcp_ack_hash.tcp_free_list;
1372*5113495bSYour Name 		vdev->tcp_ack_hash.tcp_free_list = node->next;
1373*5113495bSYour Name 	}
1374*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->tcp_ack_hash.tcp_free_list_lock);
1375*5113495bSYour Name 
1376*5113495bSYour Name 	if (!node) {
1377*5113495bSYour Name 		node = qdf_mem_malloc(sizeof(struct ol_txrx_vdev_t));
1378*5113495bSYour Name 		if (!node)
1379*5113495bSYour Name 			return NULL;
1380*5113495bSYour Name 	}
1381*5113495bSYour Name 	qdf_atomic_inc(&vdev->tcp_ack_hash.tcp_node_in_use_count);
1382*5113495bSYour Name 	return node;
1383*5113495bSYour Name }
1384*5113495bSYour Name 
1385*5113495bSYour Name qdf_nbuf_t
1386*5113495bSYour Name ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1387*5113495bSYour Name {
1388*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1389*5113495bSYour Name 	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1390*5113495bSYour Name 				pdev->cfg.request_tx_comp;
1391*5113495bSYour Name 	struct packet_info pkt_info;
1392*5113495bSYour Name 	qdf_nbuf_t temp;
1393*5113495bSYour Name 
1394*5113495bSYour Name 	if (ol_tx_is_tcp_ack(msdu_list))
1395*5113495bSYour Name 		vdev->no_of_tcpack++;
1396*5113495bSYour Name 
1397*5113495bSYour Name 	/* check Enable through ini */
1398*5113495bSYour Name 	if (!ol_cfg_get_del_ack_enable_value(vdev->pdev->ctrl_pdev) ||
1399*5113495bSYour Name 	    (!vdev->driver_del_ack_enabled)) {
1400*5113495bSYour Name 		if (qdf_atomic_read(&vdev->tcp_ack_hash.tcp_node_in_use_count))
1401*5113495bSYour Name 			ol_tx_hl_send_all_tcp_ack(vdev);
1402*5113495bSYour Name 
1403*5113495bSYour Name 		return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1404*5113495bSYour Name 				    tx_comp_req, true);
1405*5113495bSYour Name 	}
1406*5113495bSYour Name 
1407*5113495bSYour Name 	ol_tx_get_packet_info(msdu_list, &pkt_info);
1408*5113495bSYour Name 
1409*5113495bSYour Name 	if (pkt_info.type == TCP_PKT_NO_ACK) {
1410*5113495bSYour Name 		ol_tx_hl_find_and_send_tcp_stream(vdev, &pkt_info);
1411*5113495bSYour Name 		temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1412*5113495bSYour Name 				     tx_comp_req, true);
1413*5113495bSYour Name 		return temp;
1414*5113495bSYour Name 	}
1415*5113495bSYour Name 
1416*5113495bSYour Name 	if (pkt_info.type == TCP_PKT_ACK) {
1417*5113495bSYour Name 		ol_tx_hl_find_and_replace_tcp_ack(vdev, msdu_list, &pkt_info);
1418*5113495bSYour Name 		return NULL;
1419*5113495bSYour Name 	}
1420*5113495bSYour Name 
1421*5113495bSYour Name 	temp = ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1422*5113495bSYour Name 			     tx_comp_req, true);
1423*5113495bSYour Name 	return temp;
1424*5113495bSYour Name }
1425*5113495bSYour Name #else
1426*5113495bSYour Name 
1427*5113495bSYour Name #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
1428*5113495bSYour Name void
1429*5113495bSYour Name ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1430*5113495bSYour Name {
1431*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1432*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1433*5113495bSYour Name 								    pdev_id);
1434*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
1435*5113495bSYour Name 
1436*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1437*5113495bSYour Name 		vdev->bundling_required = false;
1438*5113495bSYour Name 		ol_txrx_info("vdev_id %d bundle_require %d",
1439*5113495bSYour Name 			     vdev->vdev_id, vdev->bundling_required);
1440*5113495bSYour Name 	}
1441*5113495bSYour Name }
1442*5113495bSYour Name 
1443*5113495bSYour Name void
1444*5113495bSYour Name ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
1445*5113495bSYour Name 			      uint32_t time_in_ms, uint32_t high_th,
1446*5113495bSYour Name 			      uint32_t low_th)
1447*5113495bSYour Name {
1448*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)
1449*5113495bSYour Name 				ol_txrx_get_vdev_from_vdev_id(vdev_id);
1450*5113495bSYour Name 	bool old_bundle_required;
1451*5113495bSYour Name 
1452*5113495bSYour Name 	if ((!vdev) || (low_th > high_th))
1453*5113495bSYour Name 		return;
1454*5113495bSYour Name 
1455*5113495bSYour Name 	old_bundle_required = vdev->bundling_required;
1456*5113495bSYour Name 	if (tx_bytes > ((high_th * time_in_ms * 1500) / 1000))
1457*5113495bSYour Name 		vdev->bundling_required = true;
1458*5113495bSYour Name 	else if (tx_bytes < ((low_th * time_in_ms * 1500) / 1000))
1459*5113495bSYour Name 		vdev->bundling_required = false;
1460*5113495bSYour Name 
1461*5113495bSYour Name 	if (old_bundle_required != vdev->bundling_required)
1462*5113495bSYour Name 		ol_txrx_info("vdev_id %d bundle_require %d tx_bytes %ld time_in_ms %d high_th %d low_th %d",
1463*5113495bSYour Name 			     vdev->vdev_id, vdev->bundling_required, tx_bytes,
1464*5113495bSYour Name 			     time_in_ms, high_th, low_th);
1465*5113495bSYour Name }
1466*5113495bSYour Name 
1467*5113495bSYour Name /**
1468*5113495bSYour Name  * ol_tx_hl_queue_flush_all() - drop all packets in vdev bundle queue
1469*5113495bSYour Name  * @vdev: vdev handle
1470*5113495bSYour Name  *
1471*5113495bSYour Name  * Return: none
1472*5113495bSYour Name  */
1473*5113495bSYour Name void
1474*5113495bSYour Name ol_tx_hl_queue_flush_all(struct ol_txrx_vdev_t *vdev)
1475*5113495bSYour Name {
1476*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
1477*5113495bSYour Name 	if (vdev->bundle_queue.txq.depth != 0) {
1478*5113495bSYour Name 		qdf_timer_stop(&vdev->bundle_queue.timer);
1479*5113495bSYour Name 		vdev->pdev->total_bundle_queue_length -=
1480*5113495bSYour Name 				vdev->bundle_queue.txq.depth;
1481*5113495bSYour Name 		qdf_nbuf_tx_free(vdev->bundle_queue.txq.head, 1/*error*/);
1482*5113495bSYour Name 		vdev->bundle_queue.txq.depth = 0;
1483*5113495bSYour Name 		vdev->bundle_queue.txq.head = NULL;
1484*5113495bSYour Name 		vdev->bundle_queue.txq.tail = NULL;
1485*5113495bSYour Name 	}
1486*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1487*5113495bSYour Name }
1488*5113495bSYour Name 
1489*5113495bSYour Name /**
1490*5113495bSYour Name  * ol_tx_hl_vdev_queue_append() - append pkt in tx queue
1491*5113495bSYour Name  * @vdev: vdev handle
1492*5113495bSYour Name  * @msdu_list: msdu list
1493*5113495bSYour Name  *
1494*5113495bSYour Name  * Return: none
1495*5113495bSYour Name  */
1496*5113495bSYour Name static void
1497*5113495bSYour Name ol_tx_hl_vdev_queue_append(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list)
1498*5113495bSYour Name {
1499*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
1500*5113495bSYour Name 
1501*5113495bSYour Name 	if (!vdev->bundle_queue.txq.head) {
1502*5113495bSYour Name 		qdf_timer_start(
1503*5113495bSYour Name 			&vdev->bundle_queue.timer,
1504*5113495bSYour Name 			ol_cfg_get_bundle_timer_value(vdev->pdev->ctrl_pdev));
1505*5113495bSYour Name 		vdev->bundle_queue.txq.head = msdu_list;
1506*5113495bSYour Name 		vdev->bundle_queue.txq.tail = msdu_list;
1507*5113495bSYour Name 	} else {
1508*5113495bSYour Name 		qdf_nbuf_set_next(vdev->bundle_queue.txq.tail, msdu_list);
1509*5113495bSYour Name 	}
1510*5113495bSYour Name 
1511*5113495bSYour Name 	while (qdf_nbuf_next(msdu_list)) {
1512*5113495bSYour Name 		vdev->bundle_queue.txq.depth++;
1513*5113495bSYour Name 		vdev->pdev->total_bundle_queue_length++;
1514*5113495bSYour Name 		msdu_list = qdf_nbuf_next(msdu_list);
1515*5113495bSYour Name 	}
1516*5113495bSYour Name 
1517*5113495bSYour Name 	vdev->bundle_queue.txq.depth++;
1518*5113495bSYour Name 	vdev->pdev->total_bundle_queue_length++;
1519*5113495bSYour Name 	vdev->bundle_queue.txq.tail = msdu_list;
1520*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1521*5113495bSYour Name }
1522*5113495bSYour Name 
1523*5113495bSYour Name /**
1524*5113495bSYour Name  * ol_tx_hl_vdev_queue_send_all() - send all packets in vdev bundle queue
1525*5113495bSYour Name  * @vdev: vdev handle
1526*5113495bSYour Name  * @call_sched: invoke scheduler
1527*5113495bSYour Name  *
1528*5113495bSYour Name  * Return: NULL for success
1529*5113495bSYour Name  */
1530*5113495bSYour Name static qdf_nbuf_t
1531*5113495bSYour Name ol_tx_hl_vdev_queue_send_all(struct ol_txrx_vdev_t *vdev, bool call_sched,
1532*5113495bSYour Name 			     bool in_timer_context)
1533*5113495bSYour Name {
1534*5113495bSYour Name 	qdf_nbuf_t msdu_list = NULL;
1535*5113495bSYour Name 	qdf_nbuf_t skb_list_head, skb_list_tail;
1536*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1537*5113495bSYour Name 	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1538*5113495bSYour Name 				pdev->cfg.request_tx_comp;
1539*5113495bSYour Name 	int pkt_to_sent;
1540*5113495bSYour Name 
1541*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->bundle_queue.mutex);
1542*5113495bSYour Name 
1543*5113495bSYour Name 	if (!vdev->bundle_queue.txq.depth) {
1544*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1545*5113495bSYour Name 		return msdu_list;
1546*5113495bSYour Name 	}
1547*5113495bSYour Name 
1548*5113495bSYour Name 	if (likely((qdf_atomic_read(&vdev->tx_desc_count) +
1549*5113495bSYour Name 		    vdev->bundle_queue.txq.depth) <
1550*5113495bSYour Name 		    vdev->queue_stop_th)) {
1551*5113495bSYour Name 		qdf_timer_stop(&vdev->bundle_queue.timer);
1552*5113495bSYour Name 		vdev->pdev->total_bundle_queue_length -=
1553*5113495bSYour Name 				vdev->bundle_queue.txq.depth;
1554*5113495bSYour Name 		msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1555*5113495bSYour Name 					  vdev->bundle_queue.txq.head,
1556*5113495bSYour Name 					  tx_comp_req, call_sched);
1557*5113495bSYour Name 		vdev->bundle_queue.txq.depth = 0;
1558*5113495bSYour Name 		vdev->bundle_queue.txq.head = NULL;
1559*5113495bSYour Name 		vdev->bundle_queue.txq.tail = NULL;
1560*5113495bSYour Name 	} else {
1561*5113495bSYour Name 		pkt_to_sent = vdev->queue_stop_th -
1562*5113495bSYour Name 			qdf_atomic_read(&vdev->tx_desc_count);
1563*5113495bSYour Name 
1564*5113495bSYour Name 		if (pkt_to_sent) {
1565*5113495bSYour Name 			skb_list_head = vdev->bundle_queue.txq.head;
1566*5113495bSYour Name 
1567*5113495bSYour Name 			while (pkt_to_sent) {
1568*5113495bSYour Name 				skb_list_tail =
1569*5113495bSYour Name 					vdev->bundle_queue.txq.head;
1570*5113495bSYour Name 				vdev->bundle_queue.txq.head =
1571*5113495bSYour Name 				    qdf_nbuf_next(vdev->bundle_queue.txq.head);
1572*5113495bSYour Name 				vdev->pdev->total_bundle_queue_length--;
1573*5113495bSYour Name 				vdev->bundle_queue.txq.depth--;
1574*5113495bSYour Name 				pkt_to_sent--;
1575*5113495bSYour Name 				if (!vdev->bundle_queue.txq.head) {
1576*5113495bSYour Name 					qdf_timer_stop(
1577*5113495bSYour Name 						&vdev->bundle_queue.timer);
1578*5113495bSYour Name 					break;
1579*5113495bSYour Name 				}
1580*5113495bSYour Name 			}
1581*5113495bSYour Name 
1582*5113495bSYour Name 			qdf_nbuf_set_next(skb_list_tail, NULL);
1583*5113495bSYour Name 			msdu_list = ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1584*5113495bSYour Name 						  skb_list_head, tx_comp_req,
1585*5113495bSYour Name 						  call_sched);
1586*5113495bSYour Name 		}
1587*5113495bSYour Name 
1588*5113495bSYour Name 		if (in_timer_context &&	vdev->bundle_queue.txq.head) {
1589*5113495bSYour Name 			qdf_timer_start(
1590*5113495bSYour Name 				&vdev->bundle_queue.timer,
1591*5113495bSYour Name 				ol_cfg_get_bundle_timer_value(
1592*5113495bSYour Name 					vdev->pdev->ctrl_pdev));
1593*5113495bSYour Name 		}
1594*5113495bSYour Name 	}
1595*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->bundle_queue.mutex);
1596*5113495bSYour Name 
1597*5113495bSYour Name 	return msdu_list;
1598*5113495bSYour Name }
1599*5113495bSYour Name 
1600*5113495bSYour Name /**
1601*5113495bSYour Name  * ol_tx_hl_pdev_queue_send_all() - send all packets from all vdev bundle queue
1602*5113495bSYour Name  * @pdev: pdev handle
1603*5113495bSYour Name  *
1604*5113495bSYour Name  * Return: NULL for success
1605*5113495bSYour Name  */
1606*5113495bSYour Name qdf_nbuf_t
1607*5113495bSYour Name ol_tx_hl_pdev_queue_send_all(struct ol_txrx_pdev_t *pdev)
1608*5113495bSYour Name {
1609*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
1610*5113495bSYour Name 	qdf_nbuf_t msdu_list;
1611*5113495bSYour Name 
1612*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1613*5113495bSYour Name 		msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, false, false);
1614*5113495bSYour Name 		if (msdu_list)
1615*5113495bSYour Name 			qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1616*5113495bSYour Name 	}
1617*5113495bSYour Name 	ol_tx_sched(pdev);
1618*5113495bSYour Name 	return NULL; /* all msdus were accepted */
1619*5113495bSYour Name }
1620*5113495bSYour Name 
1621*5113495bSYour Name /**
1622*5113495bSYour Name  * ol_tx_hl_vdev_bundle_timer() - bundle timer function
1623*5113495bSYour Name  * @vdev: vdev handle
1624*5113495bSYour Name  *
1625*5113495bSYour Name  * Return: none
1626*5113495bSYour Name  */
1627*5113495bSYour Name void
1628*5113495bSYour Name ol_tx_hl_vdev_bundle_timer(void *ctx)
1629*5113495bSYour Name {
1630*5113495bSYour Name 	qdf_nbuf_t msdu_list;
1631*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)ctx;
1632*5113495bSYour Name 
1633*5113495bSYour Name 	vdev->no_of_bundle_sent_in_timer++;
1634*5113495bSYour Name 	msdu_list = ol_tx_hl_vdev_queue_send_all(vdev, true, true);
1635*5113495bSYour Name 	if (msdu_list)
1636*5113495bSYour Name 		qdf_nbuf_tx_free(msdu_list, 1/*error*/);
1637*5113495bSYour Name }
1638*5113495bSYour Name 
1639*5113495bSYour Name qdf_nbuf_t
1640*5113495bSYour Name ol_tx_hl(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu_list)
1641*5113495bSYour Name {
1642*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1643*5113495bSYour Name 	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1644*5113495bSYour Name 				pdev->cfg.request_tx_comp;
1645*5113495bSYour Name 
1646*5113495bSYour Name 	/* No queuing for high priority packets */
1647*5113495bSYour Name 	if (ol_tx_desc_is_high_prio(msdu_list)) {
1648*5113495bSYour Name 		vdev->no_of_pkt_not_added_in_queue++;
1649*5113495bSYour Name 		return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1650*5113495bSYour Name 					     tx_comp_req, true);
1651*5113495bSYour Name 	} else if (vdev->bundling_required &&
1652*5113495bSYour Name 	    (ol_cfg_get_bundle_size(vdev->pdev->ctrl_pdev) > 1)) {
1653*5113495bSYour Name 		ol_tx_hl_vdev_queue_append(vdev, msdu_list);
1654*5113495bSYour Name 
1655*5113495bSYour Name 		if (pdev->total_bundle_queue_length >=
1656*5113495bSYour Name 		    ol_cfg_get_bundle_size(vdev->pdev->ctrl_pdev)) {
1657*5113495bSYour Name 			vdev->no_of_bundle_sent_after_threshold++;
1658*5113495bSYour Name 			return ol_tx_hl_pdev_queue_send_all(pdev);
1659*5113495bSYour Name 		}
1660*5113495bSYour Name 	} else {
1661*5113495bSYour Name 		if (vdev->bundle_queue.txq.depth != 0) {
1662*5113495bSYour Name 			ol_tx_hl_vdev_queue_append(vdev, msdu_list);
1663*5113495bSYour Name 			return ol_tx_hl_vdev_queue_send_all(vdev, true, false);
1664*5113495bSYour Name 		} else {
1665*5113495bSYour Name 			vdev->no_of_pkt_not_added_in_queue++;
1666*5113495bSYour Name 			return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list,
1667*5113495bSYour Name 					     tx_comp_req, true);
1668*5113495bSYour Name 		}
1669*5113495bSYour Name 	}
1670*5113495bSYour Name 
1671*5113495bSYour Name 	return NULL; /* all msdus were accepted */
1672*5113495bSYour Name }
1673*5113495bSYour Name 
1674*5113495bSYour Name #else
1675*5113495bSYour Name 
1676*5113495bSYour Name qdf_nbuf_t
1677*5113495bSYour Name ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
1678*5113495bSYour Name {
1679*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1680*5113495bSYour Name 	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1681*5113495bSYour Name 				pdev->cfg.request_tx_comp;
1682*5113495bSYour Name 
1683*5113495bSYour Name 	return ol_tx_hl_base(vdev, OL_TX_SPEC_STD,
1684*5113495bSYour Name 			     msdu_list, tx_comp_req, true);
1685*5113495bSYour Name }
1686*5113495bSYour Name #endif
1687*5113495bSYour Name #endif
1688*5113495bSYour Name 
1689*5113495bSYour Name qdf_nbuf_t ol_tx_non_std_hl(struct ol_txrx_vdev_t *vdev,
1690*5113495bSYour Name 			    enum ol_tx_spec tx_spec,
1691*5113495bSYour Name 			    qdf_nbuf_t msdu_list)
1692*5113495bSYour Name {
1693*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1694*5113495bSYour Name 	int tx_comp_req = pdev->cfg.default_tx_comp_req ||
1695*5113495bSYour Name 				pdev->cfg.request_tx_comp;
1696*5113495bSYour Name 
1697*5113495bSYour Name 	if (!tx_comp_req) {
1698*5113495bSYour Name 		if ((tx_spec == OL_TX_SPEC_NO_FREE) &&
1699*5113495bSYour Name 		    (pdev->tx_data_callback.func))
1700*5113495bSYour Name 			tx_comp_req = 1;
1701*5113495bSYour Name 	}
1702*5113495bSYour Name 	return ol_tx_hl_base(vdev, tx_spec, msdu_list, tx_comp_req, true);
1703*5113495bSYour Name }
1704*5113495bSYour Name 
1705*5113495bSYour Name #ifdef FEATURE_WLAN_TDLS
1706*5113495bSYour Name void ol_txrx_copy_mac_addr_raw(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1707*5113495bSYour Name 			       uint8_t *bss_addr)
1708*5113495bSYour Name {
1709*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1710*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1711*5113495bSYour Name 								     vdev_id);
1712*5113495bSYour Name 
1713*5113495bSYour Name 	if (!vdev)
1714*5113495bSYour Name 		return;
1715*5113495bSYour Name 
1716*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
1717*5113495bSYour Name 	if (bss_addr && vdev->last_real_peer &&
1718*5113495bSYour Name 	    !qdf_mem_cmp((u8 *)bss_addr,
1719*5113495bSYour Name 			     vdev->last_real_peer->mac_addr.raw,
1720*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE))
1721*5113495bSYour Name 		qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
1722*5113495bSYour Name 			     vdev->last_real_peer->mac_addr.raw,
1723*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE);
1724*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
1725*5113495bSYour Name }
1726*5113495bSYour Name 
1727*5113495bSYour Name void
1728*5113495bSYour Name ol_txrx_add_last_real_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1729*5113495bSYour Name 			   uint8_t vdev_id)
1730*5113495bSYour Name {
1731*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1732*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1733*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1734*5113495bSYour Name 								     vdev_id);
1735*5113495bSYour Name 	ol_txrx_peer_handle peer;
1736*5113495bSYour Name 
1737*5113495bSYour Name 	if (!pdev || !vdev)
1738*5113495bSYour Name 		return;
1739*5113495bSYour Name 
1740*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr(
1741*5113495bSYour Name 		(struct cdp_pdev *)pdev,
1742*5113495bSYour Name 		vdev->hl_tdls_ap_mac_addr.raw);
1743*5113495bSYour Name 
1744*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
1745*5113495bSYour Name 	if (!vdev->last_real_peer && peer &&
1746*5113495bSYour Name 	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID)) {
1747*5113495bSYour Name 		vdev->last_real_peer = peer;
1748*5113495bSYour Name 		qdf_mem_zero(vdev->hl_tdls_ap_mac_addr.raw,
1749*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE);
1750*5113495bSYour Name 	}
1751*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
1752*5113495bSYour Name }
1753*5113495bSYour Name 
1754*5113495bSYour Name bool is_vdev_restore_last_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1755*5113495bSYour Name 			       uint8_t *peer_mac)
1756*5113495bSYour Name {
1757*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
1758*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
1759*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1760*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1761*5113495bSYour Name 								     vdev_id);
1762*5113495bSYour Name 
1763*5113495bSYour Name 	if (!vdev)
1764*5113495bSYour Name 		return false;
1765*5113495bSYour Name 
1766*5113495bSYour Name 	pdev = vdev->pdev;
1767*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev, peer_mac);
1768*5113495bSYour Name 
1769*5113495bSYour Name 	return vdev->last_real_peer && (vdev->last_real_peer == peer);
1770*5113495bSYour Name }
1771*5113495bSYour Name 
1772*5113495bSYour Name void ol_txrx_update_last_real_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1773*5113495bSYour Name 				   uint8_t vdev_id, bool restore_last_peer)
1774*5113495bSYour Name {
1775*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1776*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1777*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1778*5113495bSYour Name 								     vdev_id);
1779*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
1780*5113495bSYour Name 
1781*5113495bSYour Name 	if (!restore_last_peer || !pdev || !vdev)
1782*5113495bSYour Name 		return;
1783*5113495bSYour Name 
1784*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
1785*5113495bSYour Name 					 vdev->hl_tdls_ap_mac_addr.raw);
1786*5113495bSYour Name 
1787*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
1788*5113495bSYour Name 	if (!vdev->last_real_peer && peer &&
1789*5113495bSYour Name 	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID)) {
1790*5113495bSYour Name 		vdev->last_real_peer = peer;
1791*5113495bSYour Name 		qdf_mem_zero(vdev->hl_tdls_ap_mac_addr.raw,
1792*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE);
1793*5113495bSYour Name 	}
1794*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
1795*5113495bSYour Name }
1796*5113495bSYour Name 
1797*5113495bSYour Name void ol_txrx_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1798*5113495bSYour Name 				   uint8_t *peer_mac, bool val)
1799*5113495bSYour Name {
1800*5113495bSYour Name 	ol_txrx_peer_handle peer;
1801*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
1802*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1803*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1804*5113495bSYour Name 								     vdev_id);
1805*5113495bSYour Name 
1806*5113495bSYour Name 	if (!vdev)
1807*5113495bSYour Name 		return;
1808*5113495bSYour Name 
1809*5113495bSYour Name 	pdev = vdev->pdev;
1810*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev, peer_mac);
1811*5113495bSYour Name 
1812*5113495bSYour Name 	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
1813*5113495bSYour Name 			  peer, qdf_atomic_read(&peer->ref_cnt));
1814*5113495bSYour Name 
1815*5113495bSYour Name 	/* Mark peer as tdls */
1816*5113495bSYour Name 	if (peer)
1817*5113495bSYour Name 		peer->is_tdls_peer = val;
1818*5113495bSYour Name }
1819*5113495bSYour Name 
1820*5113495bSYour Name void ol_txrx_set_tdls_offchan_enabled(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1821*5113495bSYour Name 				      uint8_t *peer_mac, bool val)
1822*5113495bSYour Name {
1823*5113495bSYour Name 	ol_txrx_peer_handle peer;
1824*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
1825*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1826*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
1827*5113495bSYour Name 								     vdev_id);
1828*5113495bSYour Name 
1829*5113495bSYour Name 	if (!vdev)
1830*5113495bSYour Name 		return;
1831*5113495bSYour Name 
1832*5113495bSYour Name 	pdev = vdev->pdev;
1833*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev, peer_mac);
1834*5113495bSYour Name 
1835*5113495bSYour Name 	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
1836*5113495bSYour Name 			  peer, qdf_atomic_read(&peer->ref_cnt));
1837*5113495bSYour Name 
1838*5113495bSYour Name 	/* Set TDLS Offchan operation enable/disable */
1839*5113495bSYour Name 	if (peer && peer->is_tdls_peer)
1840*5113495bSYour Name 		peer->tdls_offchan_enabled = val;
1841*5113495bSYour Name }
1842*5113495bSYour Name #endif
1843*5113495bSYour Name 
1844*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
1845*5113495bSYour Name /**
1846*5113495bSYour Name  * ol_txrx_pdev_txq_log_init() - initialise pdev txq logs
1847*5113495bSYour Name  * @pdev: the physical device object
1848*5113495bSYour Name  *
1849*5113495bSYour Name  * Return: None
1850*5113495bSYour Name  */
1851*5113495bSYour Name void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
1852*5113495bSYour Name {
1853*5113495bSYour Name 	qdf_spinlock_create(&pdev->txq_log_spinlock);
1854*5113495bSYour Name 	pdev->txq_log.size = OL_TXQ_LOG_SIZE;
1855*5113495bSYour Name 	pdev->txq_log.oldest_record_offset = 0;
1856*5113495bSYour Name 	pdev->txq_log.offset = 0;
1857*5113495bSYour Name 	pdev->txq_log.allow_wrap = 1;
1858*5113495bSYour Name 	pdev->txq_log.wrapped = 0;
1859*5113495bSYour Name }
1860*5113495bSYour Name 
1861*5113495bSYour Name /**
1862*5113495bSYour Name  * ol_txrx_pdev_txq_log_destroy() - remove txq log spinlock for pdev
1863*5113495bSYour Name  * @pdev: the physical device object
1864*5113495bSYour Name  *
1865*5113495bSYour Name  * Return: None
1866*5113495bSYour Name  */
1867*5113495bSYour Name void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
1868*5113495bSYour Name {
1869*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->txq_log_spinlock);
1870*5113495bSYour Name }
1871*5113495bSYour Name #endif
1872*5113495bSYour Name 
1873*5113495bSYour Name #if defined(DEBUG_HL_LOGGING)
1874*5113495bSYour Name 
1875*5113495bSYour Name /**
1876*5113495bSYour Name  * ol_txrx_pdev_grp_stats_init() - initialise group stat spinlock for pdev
1877*5113495bSYour Name  * @pdev: the physical device object
1878*5113495bSYour Name  *
1879*5113495bSYour Name  * Return: None
1880*5113495bSYour Name  */
1881*5113495bSYour Name void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
1882*5113495bSYour Name {
1883*5113495bSYour Name 	qdf_spinlock_create(&pdev->grp_stat_spinlock);
1884*5113495bSYour Name 	pdev->grp_stats.last_valid_index = -1;
1885*5113495bSYour Name 	pdev->grp_stats.wrap_around = 0;
1886*5113495bSYour Name }
1887*5113495bSYour Name 
1888*5113495bSYour Name /**
1889*5113495bSYour Name  * ol_txrx_pdev_grp_stat_destroy() - destroy group stat spinlock for pdev
1890*5113495bSYour Name  * @pdev: the physical device object
1891*5113495bSYour Name  *
1892*5113495bSYour Name  * Return: None
1893*5113495bSYour Name  */
1894*5113495bSYour Name void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
1895*5113495bSYour Name {
1896*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->grp_stat_spinlock);
1897*5113495bSYour Name }
1898*5113495bSYour Name #endif
1899*5113495bSYour Name 
1900*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
1901*5113495bSYour Name 
1902*5113495bSYour Name /**
1903*5113495bSYour Name  * ol_txrx_hl_tdls_flag_reset() - reset tdls flag for vdev
1904*5113495bSYour Name  * @soc_hdl: Datapath soc handle
1905*5113495bSYour Name  * @vdev_id: id of vdev
1906*5113495bSYour Name  * @flag: flag
1907*5113495bSYour Name  *
1908*5113495bSYour Name  * Return: None
1909*5113495bSYour Name  */
1910*5113495bSYour Name void
1911*5113495bSYour Name ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1912*5113495bSYour Name 			   bool flag)
1913*5113495bSYour Name {
1914*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
1915*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
1916*5113495bSYour Name 	if (!vdev) {
1917*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1918*5113495bSYour Name 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
1919*5113495bSYour Name 		return;
1920*5113495bSYour Name 	}
1921*5113495bSYour Name 
1922*5113495bSYour Name 	vdev->hlTdlsFlag = flag;
1923*5113495bSYour Name }
1924*5113495bSYour Name #endif
1925*5113495bSYour Name 
1926*5113495bSYour Name /**
1927*5113495bSYour Name  * ol_txrx_vdev_txqs_init() - initialise vdev tx queues
1928*5113495bSYour Name  * @vdev: the virtual device object
1929*5113495bSYour Name  *
1930*5113495bSYour Name  * Return: None
1931*5113495bSYour Name  */
1932*5113495bSYour Name void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
1933*5113495bSYour Name {
1934*5113495bSYour Name 	uint8_t i;
1935*5113495bSYour Name 
1936*5113495bSYour Name 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1937*5113495bSYour Name 		TAILQ_INIT(&vdev->txqs[i].head);
1938*5113495bSYour Name 		vdev->txqs[i].paused_count.total = 0;
1939*5113495bSYour Name 		vdev->txqs[i].frms = 0;
1940*5113495bSYour Name 		vdev->txqs[i].bytes = 0;
1941*5113495bSYour Name 		vdev->txqs[i].ext_tid = OL_TX_NUM_TIDS + i;
1942*5113495bSYour Name 		vdev->txqs[i].flag = ol_tx_queue_empty;
1943*5113495bSYour Name 		/* aggregation is not applicable for vdev tx queues */
1944*5113495bSYour Name 		vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
1945*5113495bSYour Name 		ol_tx_txq_set_group_ptr(&vdev->txqs[i], NULL);
1946*5113495bSYour Name 		ol_txrx_set_txq_peer(&vdev->txqs[i], NULL);
1947*5113495bSYour Name 	}
1948*5113495bSYour Name }
1949*5113495bSYour Name 
1950*5113495bSYour Name /**
1951*5113495bSYour Name  * ol_txrx_vdev_tx_queue_free() - free vdev tx queues
1952*5113495bSYour Name  * @vdev: the virtual device object
1953*5113495bSYour Name  *
1954*5113495bSYour Name  * Return: None
1955*5113495bSYour Name  */
1956*5113495bSYour Name void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
1957*5113495bSYour Name {
1958*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
1959*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq;
1960*5113495bSYour Name 	int i;
1961*5113495bSYour Name 
1962*5113495bSYour Name 	for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
1963*5113495bSYour Name 		txq = &vdev->txqs[i];
1964*5113495bSYour Name 		ol_tx_queue_free(pdev, txq, (i + OL_TX_NUM_TIDS), false);
1965*5113495bSYour Name 	}
1966*5113495bSYour Name }
1967*5113495bSYour Name 
1968*5113495bSYour Name /**
1969*5113495bSYour Name  * ol_txrx_peer_txqs_init() - initialise peer tx queues
1970*5113495bSYour Name  * @pdev: the physical device object
1971*5113495bSYour Name  * @peer: peer object
1972*5113495bSYour Name  *
1973*5113495bSYour Name  * Return: None
1974*5113495bSYour Name  */
1975*5113495bSYour Name void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
1976*5113495bSYour Name 			    struct ol_txrx_peer_t *peer)
1977*5113495bSYour Name {
1978*5113495bSYour Name 	uint8_t i;
1979*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = peer->vdev;
1980*5113495bSYour Name 
1981*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1982*5113495bSYour Name 	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
1983*5113495bSYour Name 		TAILQ_INIT(&peer->txqs[i].head);
1984*5113495bSYour Name 		peer->txqs[i].paused_count.total = 0;
1985*5113495bSYour Name 		peer->txqs[i].frms = 0;
1986*5113495bSYour Name 		peer->txqs[i].bytes = 0;
1987*5113495bSYour Name 		peer->txqs[i].ext_tid = i;
1988*5113495bSYour Name 		peer->txqs[i].flag = ol_tx_queue_empty;
1989*5113495bSYour Name 		peer->txqs[i].aggr_state = ol_tx_aggr_untried;
1990*5113495bSYour Name 		ol_tx_set_peer_group_ptr(pdev, peer, vdev->vdev_id, i);
1991*5113495bSYour Name 		ol_txrx_set_txq_peer(&peer->txqs[i], peer);
1992*5113495bSYour Name 	}
1993*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1994*5113495bSYour Name 
1995*5113495bSYour Name 	/* aggregation is not applicable for mgmt and non-QoS tx queues */
1996*5113495bSYour Name 	for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++)
1997*5113495bSYour Name 		peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
1998*5113495bSYour Name 
1999*5113495bSYour Name 	ol_txrx_peer_pause(peer);
2000*5113495bSYour Name }
2001*5113495bSYour Name 
2002*5113495bSYour Name /**
2003*5113495bSYour Name  * ol_txrx_peer_tx_queue_free() - free peer tx queues
2004*5113495bSYour Name  * @pdev: the physical device object
2005*5113495bSYour Name  * @peer: peer object
2006*5113495bSYour Name  *
2007*5113495bSYour Name  * Return: None
2008*5113495bSYour Name  */
2009*5113495bSYour Name void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
2010*5113495bSYour Name 				struct ol_txrx_peer_t *peer)
2011*5113495bSYour Name {
2012*5113495bSYour Name 	struct ol_tx_frms_queue_t *txq;
2013*5113495bSYour Name 	uint8_t i;
2014*5113495bSYour Name 
2015*5113495bSYour Name 	for (i = 0; i < OL_TX_NUM_TIDS; i++) {
2016*5113495bSYour Name 		txq = &peer->txqs[i];
2017*5113495bSYour Name 		ol_tx_queue_free(pdev, txq, i, true);
2018*5113495bSYour Name 	}
2019*5113495bSYour Name }
2020*5113495bSYour Name 
2021*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
2022*5113495bSYour Name 
2023*5113495bSYour Name /**
2024*5113495bSYour Name  * ol_txrx_update_group_credit() - update group credit for tx queue
2025*5113495bSYour Name  * @group: for which credit needs to be updated
2026*5113495bSYour Name  * @credit: credits
2027*5113495bSYour Name  * @absolute: TXQ group absolute
2028*5113495bSYour Name  *
2029*5113495bSYour Name  * Return: allocated pool size
2030*5113495bSYour Name  */
2031*5113495bSYour Name void ol_txrx_update_group_credit(
2032*5113495bSYour Name 		struct ol_tx_queue_group_t *group,
2033*5113495bSYour Name 		int32_t credit,
2034*5113495bSYour Name 		u_int8_t absolute)
2035*5113495bSYour Name {
2036*5113495bSYour Name 	if (absolute)
2037*5113495bSYour Name 		qdf_atomic_set(&group->credit, credit);
2038*5113495bSYour Name 	else
2039*5113495bSYour Name 		qdf_atomic_add(credit, &group->credit);
2040*5113495bSYour Name }
2041*5113495bSYour Name 
2042*5113495bSYour Name /**
2043*5113495bSYour Name  * ol_txrx_update_tx_queue_groups() - update vdev tx queue group if
2044*5113495bSYour Name  *				      vdev id mask and ac mask is not matching
2045*5113495bSYour Name  * @pdev: the data physical device
2046*5113495bSYour Name  * @group_id: TXQ group id
2047*5113495bSYour Name  * @credit: TXQ group credit count
2048*5113495bSYour Name  * @absolute: TXQ group absolute
2049*5113495bSYour Name  * @vdev_id_mask: TXQ vdev group id mask
2050*5113495bSYour Name  * @ac_mask: TQX access category mask
2051*5113495bSYour Name  *
2052*5113495bSYour Name  * Return: None
2053*5113495bSYour Name  */
2054*5113495bSYour Name void ol_txrx_update_tx_queue_groups(
2055*5113495bSYour Name 		ol_txrx_pdev_handle pdev,
2056*5113495bSYour Name 		u_int8_t group_id,
2057*5113495bSYour Name 		int32_t credit,
2058*5113495bSYour Name 		u_int8_t absolute,
2059*5113495bSYour Name 		u_int32_t vdev_id_mask,
2060*5113495bSYour Name 		u_int32_t ac_mask
2061*5113495bSYour Name 		)
2062*5113495bSYour Name {
2063*5113495bSYour Name 	struct ol_tx_queue_group_t *group;
2064*5113495bSYour Name 	u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
2065*5113495bSYour Name 	u_int32_t membership;
2066*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
2067*5113495bSYour Name 
2068*5113495bSYour Name 	if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
2069*5113495bSYour Name 		ol_txrx_warn("invalid group_id=%u, ignore update", group_id);
2070*5113495bSYour Name 		return;
2071*5113495bSYour Name 	}
2072*5113495bSYour Name 
2073*5113495bSYour Name 	group = &pdev->txq_grps[group_id];
2074*5113495bSYour Name 
2075*5113495bSYour Name 	membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
2076*5113495bSYour Name 
2077*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
2078*5113495bSYour Name 	/*
2079*5113495bSYour Name 	 * if the membership (vdev id mask and ac mask)
2080*5113495bSYour Name 	 * matches then no need to update tx qeue groups.
2081*5113495bSYour Name 	 */
2082*5113495bSYour Name 	if (group->membership == membership)
2083*5113495bSYour Name 		/* Update Credit Only */
2084*5113495bSYour Name 		goto credit_update;
2085*5113495bSYour Name 
2086*5113495bSYour Name 	credit += ol_txrx_distribute_group_credits(pdev, group_id,
2087*5113495bSYour Name 						   vdev_id_mask);
2088*5113495bSYour Name 	/*
2089*5113495bSYour Name 	 * membership (vdev id mask and ac mask) is not matching
2090*5113495bSYour Name 	 * TODO: ignoring ac mask for now
2091*5113495bSYour Name 	 */
2092*5113495bSYour Name 	qdf_assert(ac_mask == 0xffff);
2093*5113495bSYour Name 	group_vdev_id_mask =
2094*5113495bSYour Name 		OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
2095*5113495bSYour Name 
2096*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2097*5113495bSYour Name 		group_vdev_bit_mask =
2098*5113495bSYour Name 			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
2099*5113495bSYour Name 					group_vdev_id_mask, vdev->vdev_id);
2100*5113495bSYour Name 		vdev_bit_mask =
2101*5113495bSYour Name 			OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(
2102*5113495bSYour Name 					vdev_id_mask, vdev->vdev_id);
2103*5113495bSYour Name 
2104*5113495bSYour Name 		if (group_vdev_bit_mask != vdev_bit_mask) {
2105*5113495bSYour Name 			/*
2106*5113495bSYour Name 			 * Change in vdev tx queue group
2107*5113495bSYour Name 			 */
2108*5113495bSYour Name 			if (!vdev_bit_mask) {
2109*5113495bSYour Name 				/* Set Group Pointer (vdev and peer) to NULL */
2110*5113495bSYour Name 				ol_txrx_info("Group membership removed for vdev_id %d from group_id %d",
2111*5113495bSYour Name 					     vdev->vdev_id, group_id);
2112*5113495bSYour Name 				ol_tx_set_vdev_group_ptr(
2113*5113495bSYour Name 						pdev, vdev->vdev_id, NULL);
2114*5113495bSYour Name 			} else {
2115*5113495bSYour Name 				/* Set Group Pointer (vdev and peer) */
2116*5113495bSYour Name 				ol_txrx_info("Group membership updated for vdev_id %d to group_id %d",
2117*5113495bSYour Name 					     vdev->vdev_id, group_id);
2118*5113495bSYour Name 				ol_tx_set_vdev_group_ptr(
2119*5113495bSYour Name 						pdev, vdev->vdev_id, group);
2120*5113495bSYour Name 			}
2121*5113495bSYour Name 		}
2122*5113495bSYour Name 	}
2123*5113495bSYour Name 	/* Update membership */
2124*5113495bSYour Name 	group->membership = membership;
2125*5113495bSYour Name 	ol_txrx_info("Group membership updated for group_id %d membership 0x%x",
2126*5113495bSYour Name 		     group_id, group->membership);
2127*5113495bSYour Name credit_update:
2128*5113495bSYour Name 	/* Update Credit */
2129*5113495bSYour Name 	ol_txrx_update_group_credit(group, credit, absolute);
2130*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
2131*5113495bSYour Name }
2132*5113495bSYour Name #endif
2133*5113495bSYour Name 
2134*5113495bSYour Name #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
2135*5113495bSYour Name 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
2136*5113495bSYour Name #define MIN_INIT_GROUP_CREDITS	10
2137*5113495bSYour Name int ol_txrx_distribute_group_credits(struct ol_txrx_pdev_t *pdev,
2138*5113495bSYour Name 				     u8 group_id,
2139*5113495bSYour Name 				     u32 vdevid_mask_new)
2140*5113495bSYour Name {
2141*5113495bSYour Name 	struct ol_tx_queue_group_t *grp = &pdev->txq_grps[group_id];
2142*5113495bSYour Name 	struct ol_tx_queue_group_t *grp_nxt = &pdev->txq_grps[!group_id];
2143*5113495bSYour Name 	int creds_nxt = qdf_atomic_read(&grp_nxt->credit);
2144*5113495bSYour Name 	int vdevid_mask = OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp->membership);
2145*5113495bSYour Name 	int vdevid_mask_othgrp =
2146*5113495bSYour Name 		OL_TXQ_GROUP_VDEV_ID_MASK_GET(grp_nxt->membership);
2147*5113495bSYour Name 	int creds_distribute = 0;
2148*5113495bSYour Name 
2149*5113495bSYour Name 	/* if vdev added to the group is the first vdev */
2150*5113495bSYour Name 	if ((vdevid_mask == 0) && (vdevid_mask_new != 0)) {
2151*5113495bSYour Name 		/* if other group has members */
2152*5113495bSYour Name 		if (vdevid_mask_othgrp) {
2153*5113495bSYour Name 			if (creds_nxt < MIN_INIT_GROUP_CREDITS)
2154*5113495bSYour Name 				creds_distribute = creds_nxt / 2;
2155*5113495bSYour Name 			else
2156*5113495bSYour Name 				creds_distribute = MIN_INIT_GROUP_CREDITS;
2157*5113495bSYour Name 
2158*5113495bSYour Name 			ol_txrx_update_group_credit(grp_nxt, -creds_distribute,
2159*5113495bSYour Name 						    0);
2160*5113495bSYour Name 		} else {
2161*5113495bSYour Name 			/*
2162*5113495bSYour Name 			 * Other grp has no members, give all credits to this
2163*5113495bSYour Name 			 * grp.
2164*5113495bSYour Name 			 */
2165*5113495bSYour Name 			creds_distribute =
2166*5113495bSYour Name 				qdf_atomic_read(&pdev->target_tx_credit);
2167*5113495bSYour Name 		}
2168*5113495bSYour Name 	/* if all vdevs are removed from this grp */
2169*5113495bSYour Name 	} else if ((vdevid_mask != 0) && (vdevid_mask_new == 0)) {
2170*5113495bSYour Name 		if (vdevid_mask_othgrp)
2171*5113495bSYour Name 			/* Transfer credits to other grp */
2172*5113495bSYour Name 			ol_txrx_update_group_credit(grp_nxt,
2173*5113495bSYour Name 						    qdf_atomic_read(&grp->
2174*5113495bSYour Name 						    credit),
2175*5113495bSYour Name 						    0);
2176*5113495bSYour Name 		/* Set current grp credits to zero */
2177*5113495bSYour Name 		ol_txrx_update_group_credit(grp, 0, 1);
2178*5113495bSYour Name 	}
2179*5113495bSYour Name 
2180*5113495bSYour Name 	return creds_distribute;
2181*5113495bSYour Name }
2182*5113495bSYour Name #endif /*
2183*5113495bSYour Name 	* FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL &&
2184*5113495bSYour Name 	* FEATURE_HL_DBS_GROUP_CREDIT_SHARING
2185*5113495bSYour Name 	*/
2186*5113495bSYour Name 
2187*5113495bSYour Name #ifdef QCA_HL_NETDEV_FLOW_CONTROL
2188*5113495bSYour Name int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc_hdl,
2189*5113495bSYour Name 				     uint8_t pdev_id,
2190*5113495bSYour Name 				     tx_pause_callback flowcontrol)
2191*5113495bSYour Name {
2192*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2193*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
2194*5113495bSYour Name 	u32 desc_pool_size;
2195*5113495bSYour Name 
2196*5113495bSYour Name 	if (!pdev || !flowcontrol) {
2197*5113495bSYour Name 		ol_txrx_err("pdev or pause_cb is NULL");
2198*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2199*5113495bSYour Name 	}
2200*5113495bSYour Name 
2201*5113495bSYour Name 	desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
2202*5113495bSYour Name 	/*
2203*5113495bSYour Name 	 * Assert if the tx descriptor pool size meets the requirements
2204*5113495bSYour Name 	 * Maximum 2 sessions are allowed on a band.
2205*5113495bSYour Name 	 */
2206*5113495bSYour Name 	QDF_ASSERT((2 * ol_txrx_tx_desc_alloc_table[TXRX_FC_5GH_80M_2x2] +
2207*5113495bSYour Name 		    ol_txrx_tx_desc_alloc_table[TXRX_FC_2GH_40M_2x2])
2208*5113495bSYour Name 		    <= desc_pool_size);
2209*5113495bSYour Name 
2210*5113495bSYour Name 	pdev->pause_cb = flowcontrol;
2211*5113495bSYour Name 	return 0;
2212*5113495bSYour Name }
2213*5113495bSYour Name 
2214*5113495bSYour Name int ol_txrx_set_vdev_os_queue_status(struct cdp_soc_t *soc_hdl, u8 vdev_id,
2215*5113495bSYour Name 				     enum netif_action_type action)
2216*5113495bSYour Name {
2217*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
2218*5113495bSYour Name 	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2219*5113495bSYour Name 
2220*5113495bSYour Name 	if (!vdev) {
2221*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2222*5113495bSYour Name 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
2223*5113495bSYour Name 		return -EINVAL;
2224*5113495bSYour Name 	}
2225*5113495bSYour Name 
2226*5113495bSYour Name 	switch (action) {
2227*5113495bSYour Name 	case WLAN_NETIF_PRIORITY_QUEUE_ON:
2228*5113495bSYour Name 		qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
2229*5113495bSYour Name 		vdev->prio_q_paused = 0;
2230*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
2231*5113495bSYour Name 		break;
2232*5113495bSYour Name 	case WLAN_WAKE_NON_PRIORITY_QUEUE:
2233*5113495bSYour Name 		qdf_atomic_set(&vdev->os_q_paused, 0);
2234*5113495bSYour Name 		break;
2235*5113495bSYour Name 	default:
2236*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2237*5113495bSYour Name 			  "%s: Invalid action %d", __func__, action);
2238*5113495bSYour Name 		return -EINVAL;
2239*5113495bSYour Name 	}
2240*5113495bSYour Name 	return 0;
2241*5113495bSYour Name }
2242*5113495bSYour Name 
2243*5113495bSYour Name int ol_txrx_set_vdev_tx_desc_limit(struct cdp_soc_t *soc_hdl, u8 vdev_id,
2244*5113495bSYour Name 				   u32 chan_freq)
2245*5113495bSYour Name {
2246*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
2247*5113495bSYour Name 	(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2248*5113495bSYour Name 	enum ol_txrx_fc_limit_id fc_limit_id;
2249*5113495bSYour Name 	u32 td_limit;
2250*5113495bSYour Name 
2251*5113495bSYour Name 	if (!vdev) {
2252*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2253*5113495bSYour Name 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
2254*5113495bSYour Name 		return -EINVAL;
2255*5113495bSYour Name 	}
2256*5113495bSYour Name 
2257*5113495bSYour Name 	/* TODO: Handle no of spatial streams and channel BW */
2258*5113495bSYour Name 	if (WLAN_REG_IS_5GHZ_CH_FREQ(chan_freq))
2259*5113495bSYour Name 		fc_limit_id = TXRX_FC_5GH_80M_2x2;
2260*5113495bSYour Name 	else
2261*5113495bSYour Name 		fc_limit_id = TXRX_FC_2GH_40M_2x2;
2262*5113495bSYour Name 
2263*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
2264*5113495bSYour Name 	td_limit = ol_txrx_tx_desc_alloc_table[fc_limit_id];
2265*5113495bSYour Name 	vdev->tx_desc_limit = td_limit;
2266*5113495bSYour Name 	vdev->queue_stop_th = td_limit - TXRX_HL_TX_DESC_HI_PRIO_RESERVED;
2267*5113495bSYour Name 	vdev->queue_restart_th = td_limit - TXRX_HL_TX_DESC_QUEUE_RESTART_TH;
2268*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
2269*5113495bSYour Name 
2270*5113495bSYour Name 	return 0;
2271*5113495bSYour Name }
2272*5113495bSYour Name 
2273*5113495bSYour Name void ol_tx_dump_flow_pool_info_compact(struct ol_txrx_pdev_t *pdev)
2274*5113495bSYour Name {
2275*5113495bSYour Name 	char *comb_log_str;
2276*5113495bSYour Name 	int bytes_written = 0;
2277*5113495bSYour Name 	uint32_t free_size;
2278*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
2279*5113495bSYour Name 	int i = 0;
2280*5113495bSYour Name 
2281*5113495bSYour Name 	free_size = WLAN_MAX_VDEVS * 100;
2282*5113495bSYour Name 	comb_log_str = qdf_mem_malloc(free_size);
2283*5113495bSYour Name 	if (!comb_log_str)
2284*5113495bSYour Name 		return;
2285*5113495bSYour Name 
2286*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
2287*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2288*5113495bSYour Name 		bytes_written += snprintf(&comb_log_str[bytes_written],
2289*5113495bSYour Name 				free_size, "%d (%d,%d)(%d,%d)(%d,%d) |",
2290*5113495bSYour Name 				vdev->vdev_id, vdev->tx_desc_limit,
2291*5113495bSYour Name 				qdf_atomic_read(&vdev->tx_desc_count),
2292*5113495bSYour Name 				qdf_atomic_read(&vdev->os_q_paused),
2293*5113495bSYour Name 				vdev->prio_q_paused, vdev->queue_stop_th,
2294*5113495bSYour Name 				vdev->queue_restart_th);
2295*5113495bSYour Name 		free_size -= bytes_written;
2296*5113495bSYour Name 	}
2297*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
2298*5113495bSYour Name 	qdf_nofl_debug("STATS | FC: %s", comb_log_str);
2299*5113495bSYour Name 
2300*5113495bSYour Name 	free_size = WLAN_MAX_VDEVS * 100;
2301*5113495bSYour Name 	bytes_written = 0;
2302*5113495bSYour Name 	qdf_mem_zero(comb_log_str, free_size);
2303*5113495bSYour Name 
2304*5113495bSYour Name 	bytes_written = snprintf(&comb_log_str[bytes_written], free_size,
2305*5113495bSYour Name 				 "%d ",
2306*5113495bSYour Name 				 qdf_atomic_read(&pdev->target_tx_credit));
2307*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
2308*5113495bSYour Name 		bytes_written += snprintf(&comb_log_str[bytes_written],
2309*5113495bSYour Name 					  free_size, "|%d, (0x%x, %d)", i,
2310*5113495bSYour Name 					  OL_TXQ_GROUP_VDEV_ID_MASK_GET(
2311*5113495bSYour Name 					  pdev->txq_grps[i].membership),
2312*5113495bSYour Name 					  qdf_atomic_read(
2313*5113495bSYour Name 					  &pdev->txq_grps[i].credit));
2314*5113495bSYour Name 	       free_size -= bytes_written;
2315*5113495bSYour Name 	}
2316*5113495bSYour Name 	qdf_nofl_debug("STATS | CREDIT: %s", comb_log_str);
2317*5113495bSYour Name 	qdf_mem_free(comb_log_str);
2318*5113495bSYour Name }
2319*5113495bSYour Name 
2320*5113495bSYour Name void ol_tx_dump_flow_pool_info(struct cdp_soc_t *soc_hdl)
2321*5113495bSYour Name {
2322*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2323*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
2324*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
2325*5113495bSYour Name 
2326*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2327*5113495bSYour Name 	if (!pdev) {
2328*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
2329*5113495bSYour Name 		return;
2330*5113495bSYour Name 	}
2331*5113495bSYour Name 
2332*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
2333*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2334*5113495bSYour Name 		txrx_nofl_info("vdev_id %d", vdev->vdev_id);
2335*5113495bSYour Name 		txrx_nofl_info("limit %d available %d stop_threshold %d restart_threshold %d",
2336*5113495bSYour Name 			       vdev->tx_desc_limit,
2337*5113495bSYour Name 			       qdf_atomic_read(&vdev->tx_desc_count),
2338*5113495bSYour Name 			       vdev->queue_stop_th, vdev->queue_restart_th);
2339*5113495bSYour Name 		txrx_nofl_info("q_paused %d prio_q_paused %d",
2340*5113495bSYour Name 			       qdf_atomic_read(&vdev->os_q_paused),
2341*5113495bSYour Name 			       vdev->prio_q_paused);
2342*5113495bSYour Name 		txrx_nofl_info("no_of_bundle_sent_after_threshold %lld",
2343*5113495bSYour Name 			       vdev->no_of_bundle_sent_after_threshold);
2344*5113495bSYour Name 		txrx_nofl_info("no_of_bundle_sent_in_timer %lld",
2345*5113495bSYour Name 			       vdev->no_of_bundle_sent_in_timer);
2346*5113495bSYour Name 		txrx_nofl_info("no_of_pkt_not_added_in_queue %lld",
2347*5113495bSYour Name 			       vdev->no_of_pkt_not_added_in_queue);
2348*5113495bSYour Name 	}
2349*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
2350*5113495bSYour Name }
2351*5113495bSYour Name #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
2352