xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_send.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
21*5113495bSYour Name #include <qdf_lock.h>           /* qdf_os_spinlock */
22*5113495bSYour Name #include <qdf_time.h>           /* qdf_system_ticks, etc. */
23*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24*5113495bSYour Name #include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
25*5113495bSYour Name 
26*5113495bSYour Name #include "queue.h"          /* TAILQ */
27*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
28*5113495bSYour Name #include <enet.h>               /* ethernet_hdr_t, etc. */
29*5113495bSYour Name #include <ipv6_defs.h>          /* ipv6_traffic_class */
30*5113495bSYour Name #endif
31*5113495bSYour Name 
32*5113495bSYour Name #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
33*5113495bSYour Name #include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
34*5113495bSYour Name #include <ol_txrx_htt_api.h>    /* htt_tx_status */
35*5113495bSYour Name 
36*5113495bSYour Name #include <ol_ctrl_txrx_api.h>
37*5113495bSYour Name #include <cdp_txrx_tx_delay.h>
38*5113495bSYour Name #include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
39*5113495bSYour Name #include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
40*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
41*5113495bSYour Name #include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
42*5113495bSYour Name #endif
43*5113495bSYour Name #include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
44*5113495bSYour Name #include <ol_osif_txrx_api.h>
45*5113495bSYour Name #include <ol_tx.h>              /* ol_tx_reinject */
46*5113495bSYour Name #include <ol_tx_send.h>
47*5113495bSYour Name 
48*5113495bSYour Name #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
49*5113495bSYour Name #include <ol_tx_sched.h>
50*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
51*5113495bSYour Name #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
52*5113495bSYour Name #endif
53*5113495bSYour Name #include <ol_tx_queue.h>
54*5113495bSYour Name #include <ol_txrx.h>
55*5113495bSYour Name #include <pktlog_ac_fmt.h>
56*5113495bSYour Name #include <cdp_txrx_handle.h>
57*5113495bSYour Name #include <wlan_pkt_capture_ucfg_api.h>
58*5113495bSYour Name #include <wlan_dp_txrx.h>
59*5113495bSYour Name #ifdef TX_CREDIT_RECLAIM_SUPPORT
60*5113495bSYour Name 
61*5113495bSYour Name #define OL_TX_CREDIT_RECLAIM(pdev)					\
62*5113495bSYour Name 	do {								\
63*5113495bSYour Name 		if (qdf_atomic_read(&pdev->target_tx_credit)  <		\
64*5113495bSYour Name 		    ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) {		\
65*5113495bSYour Name 			ol_osif_ath_tasklet(pdev->osdev);		\
66*5113495bSYour Name 		}							\
67*5113495bSYour Name 	} while (0)
68*5113495bSYour Name 
69*5113495bSYour Name #else
70*5113495bSYour Name 
71*5113495bSYour Name #define OL_TX_CREDIT_RECLAIM(pdev)
72*5113495bSYour Name 
73*5113495bSYour Name #endif /* TX_CREDIT_RECLAIM_SUPPORT */
74*5113495bSYour Name 
75*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
76*5113495bSYour Name 
77*5113495bSYour Name /*
78*5113495bSYour Name  * HL needs to keep track of the amount of credit available to download
79*5113495bSYour Name  * tx frames to the target - the download scheduler decides when to
80*5113495bSYour Name  * download frames, and which frames to download, based on the credit
81*5113495bSYour Name  * availability.
82*5113495bSYour Name  * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
83*5113495bSYour Name  * of the target_tx_credit, to determine when to poll for tx completion
84*5113495bSYour Name  * messages.
85*5113495bSYour Name  */
86*5113495bSYour Name static inline void
ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t * pdev,int delta)87*5113495bSYour Name ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
88*5113495bSYour Name {
89*5113495bSYour Name 	qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
90*5113495bSYour Name }
91*5113495bSYour Name 
92*5113495bSYour Name static inline void
ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t * pdev,int delta)93*5113495bSYour Name ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
94*5113495bSYour Name {
95*5113495bSYour Name 	qdf_atomic_add(delta, &pdev->target_tx_credit);
96*5113495bSYour Name }
97*5113495bSYour Name #else
98*5113495bSYour Name 
99*5113495bSYour Name static inline void
ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t * pdev,int delta)100*5113495bSYour Name ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
101*5113495bSYour Name {
102*5113495bSYour Name }
103*5113495bSYour Name 
104*5113495bSYour Name static inline void
ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t * pdev,int delta)105*5113495bSYour Name ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
106*5113495bSYour Name {
107*5113495bSYour Name }
108*5113495bSYour Name #endif
109*5113495bSYour Name 
110*5113495bSYour Name #ifdef DESC_TIMESTAMP_DEBUG_INFO
ol_tx_desc_update_comp_ts(struct ol_tx_desc_t * tx_desc)111*5113495bSYour Name static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
112*5113495bSYour Name {
113*5113495bSYour Name 	tx_desc->desc_debug_info.last_comp_ts = qdf_get_log_timestamp();
114*5113495bSYour Name }
115*5113495bSYour Name #else
ol_tx_desc_update_comp_ts(struct ol_tx_desc_t * tx_desc)116*5113495bSYour Name static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
117*5113495bSYour Name {
118*5113495bSYour Name }
119*5113495bSYour Name #endif
120*5113495bSYour Name 
121*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)122*5113495bSYour Name void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
123*5113495bSYour Name {
124*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
125*5113495bSYour Name 	bool trigger_unpause = false;
126*5113495bSYour Name 
127*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
128*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
129*5113495bSYour Name 		if (vdev->tx_desc_limit == 0)
130*5113495bSYour Name 			continue;
131*5113495bSYour Name 
132*5113495bSYour Name 		/* un-pause high priority queue */
133*5113495bSYour Name 		if (vdev->prio_q_paused &&
134*5113495bSYour Name 		    (qdf_atomic_read(&vdev->tx_desc_count)
135*5113495bSYour Name 		     < vdev->tx_desc_limit)) {
136*5113495bSYour Name 			pdev->pause_cb(vdev->vdev_id,
137*5113495bSYour Name 				       WLAN_NETIF_PRIORITY_QUEUE_ON,
138*5113495bSYour Name 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
139*5113495bSYour Name 			vdev->prio_q_paused = 0;
140*5113495bSYour Name 		}
141*5113495bSYour Name 		/* un-pause non priority queues */
142*5113495bSYour Name 		if (qdf_atomic_read(&vdev->os_q_paused) &&
143*5113495bSYour Name 		    (qdf_atomic_read(&vdev->tx_desc_count)
144*5113495bSYour Name 		    <= vdev->queue_restart_th)) {
145*5113495bSYour Name 			pdev->pause_cb(vdev->vdev_id,
146*5113495bSYour Name 				       WLAN_WAKE_NON_PRIORITY_QUEUE,
147*5113495bSYour Name 				       WLAN_DATA_FLOW_CONTROL);
148*5113495bSYour Name 			qdf_atomic_set(&vdev->os_q_paused, 0);
149*5113495bSYour Name 			trigger_unpause = true;
150*5113495bSYour Name 		}
151*5113495bSYour Name 	}
152*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
153*5113495bSYour Name 	if (trigger_unpause)
154*5113495bSYour Name 		ol_tx_hl_pdev_queue_send_all(pdev);
155*5113495bSYour Name }
156*5113495bSYour Name #endif
157*5113495bSYour Name 
158*5113495bSYour Name static inline uint16_t
ol_tx_send_base(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu)159*5113495bSYour Name ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
160*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
161*5113495bSYour Name {
162*5113495bSYour Name 	int msdu_credit_consumed;
163*5113495bSYour Name 
164*5113495bSYour Name 	TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
165*5113495bSYour Name 	TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
166*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit),
167*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit) - 1,
168*5113495bSYour Name 			      qdf_nbuf_len(msdu));
169*5113495bSYour Name 
170*5113495bSYour Name 	msdu_credit_consumed = htt_tx_msdu_credit(msdu);
171*5113495bSYour Name 	ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
172*5113495bSYour Name 	OL_TX_CREDIT_RECLAIM(pdev);
173*5113495bSYour Name 
174*5113495bSYour Name 	/*
175*5113495bSYour Name 	 * When the tx frame is downloaded to the target, there are two
176*5113495bSYour Name 	 * outstanding references:
177*5113495bSYour Name 	 * 1.  The host download SW (HTT, HTC, HIF)
178*5113495bSYour Name 	 *     This reference is cleared by the ol_tx_send_done callback
179*5113495bSYour Name 	 *     functions.
180*5113495bSYour Name 	 * 2.  The target FW
181*5113495bSYour Name 	 *     This reference is cleared by the ol_tx_completion_handler
182*5113495bSYour Name 	 *     function.
183*5113495bSYour Name 	 * It is extremely probable that the download completion is processed
184*5113495bSYour Name 	 * before the tx completion message.  However, under exceptional
185*5113495bSYour Name 	 * conditions the tx completion may be processed first.  Thus, rather
186*5113495bSYour Name 	 * that assuming that reference (1) is done before reference (2),
187*5113495bSYour Name 	 * explicit reference tracking is needed.
188*5113495bSYour Name 	 * Double-increment the ref count to account for both references
189*5113495bSYour Name 	 * described above.
190*5113495bSYour Name 	 */
191*5113495bSYour Name 
192*5113495bSYour Name 	OL_TX_DESC_REF_INIT(tx_desc);
193*5113495bSYour Name 	OL_TX_DESC_REF_INC(tx_desc);
194*5113495bSYour Name 	OL_TX_DESC_REF_INC(tx_desc);
195*5113495bSYour Name 
196*5113495bSYour Name 	return msdu_credit_consumed;
197*5113495bSYour Name }
198*5113495bSYour Name 
199*5113495bSYour Name void
ol_tx_send(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,uint8_t vdev_id)200*5113495bSYour Name ol_tx_send(struct ol_txrx_pdev_t *pdev,
201*5113495bSYour Name 	   struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id)
202*5113495bSYour Name {
203*5113495bSYour Name 	int msdu_credit_consumed;
204*5113495bSYour Name 	uint16_t id;
205*5113495bSYour Name 	int failed;
206*5113495bSYour Name 
207*5113495bSYour Name 	msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
208*5113495bSYour Name 	id = ol_tx_desc_id(pdev, tx_desc);
209*5113495bSYour Name 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
210*5113495bSYour Name 	DPTRACE(qdf_dp_trace_ptr(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
211*5113495bSYour Name 				QDF_TRACE_DEFAULT_PDEV_ID,
212*5113495bSYour Name 				qdf_nbuf_data_addr(msdu),
213*5113495bSYour Name 				sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
214*5113495bSYour Name 				vdev_id, 0,
215*5113495bSYour Name 				tx_desc->vdev->qdf_opmode
216*5113495bSYour Name 				));
217*5113495bSYour Name 	failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
218*5113495bSYour Name 	if (qdf_unlikely(failed)) {
219*5113495bSYour Name 		ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
220*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
221*5113495bSYour Name 	}
222*5113495bSYour Name }
223*5113495bSYour Name 
224*5113495bSYour Name void
ol_tx_send_batch(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t head_msdu,int num_msdus)225*5113495bSYour Name ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
226*5113495bSYour Name 		 qdf_nbuf_t head_msdu, int num_msdus)
227*5113495bSYour Name {
228*5113495bSYour Name 	qdf_nbuf_t rejected;
229*5113495bSYour Name 
230*5113495bSYour Name 	OL_TX_CREDIT_RECLAIM(pdev);
231*5113495bSYour Name 
232*5113495bSYour Name 	rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
233*5113495bSYour Name 	while (qdf_unlikely(rejected)) {
234*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc;
235*5113495bSYour Name 		uint16_t *msdu_id_storage;
236*5113495bSYour Name 		qdf_nbuf_t next;
237*5113495bSYour Name 
238*5113495bSYour Name 		next = qdf_nbuf_next(rejected);
239*5113495bSYour Name 		msdu_id_storage = ol_tx_msdu_id_storage(rejected);
240*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
241*5113495bSYour Name 
242*5113495bSYour Name 		ol_tx_target_credit_incr(pdev, rejected);
243*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
244*5113495bSYour Name 
245*5113495bSYour Name 		rejected = next;
246*5113495bSYour Name 	}
247*5113495bSYour Name }
248*5113495bSYour Name 
249*5113495bSYour Name void
ol_tx_send_nonstd(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,enum htt_pkt_type pkt_type)250*5113495bSYour Name ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
251*5113495bSYour Name 		  struct ol_tx_desc_t *tx_desc,
252*5113495bSYour Name 		  qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
253*5113495bSYour Name {
254*5113495bSYour Name 	int msdu_credit_consumed;
255*5113495bSYour Name 	uint16_t id;
256*5113495bSYour Name 	int failed;
257*5113495bSYour Name 
258*5113495bSYour Name 	msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
259*5113495bSYour Name 	id = ol_tx_desc_id(pdev, tx_desc);
260*5113495bSYour Name 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
261*5113495bSYour Name 	failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
262*5113495bSYour Name 	if (failed) {
263*5113495bSYour Name 		ol_txrx_err(
264*5113495bSYour Name 			   "Error: freeing tx frame after htt_tx failed");
265*5113495bSYour Name 		ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
266*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
267*5113495bSYour Name 	}
268*5113495bSYour Name }
269*5113495bSYour Name 
270*5113495bSYour Name static inline bool
ol_tx_download_done_base(struct ol_txrx_pdev_t * pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)271*5113495bSYour Name ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
272*5113495bSYour Name 			 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
273*5113495bSYour Name {
274*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
275*5113495bSYour Name 	bool is_frame_freed = false;
276*5113495bSYour Name 
277*5113495bSYour Name 	tx_desc = ol_tx_desc_find(pdev, msdu_id);
278*5113495bSYour Name 	qdf_assert(tx_desc);
279*5113495bSYour Name 
280*5113495bSYour Name 	/*
281*5113495bSYour Name 	 * If the download is done for
282*5113495bSYour Name 	 * the Management frame then
283*5113495bSYour Name 	 * call the download callback if registered
284*5113495bSYour Name 	 */
285*5113495bSYour Name 	if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
286*5113495bSYour Name 		ol_txrx_mgmt_tx_cb download_cb =
287*5113495bSYour Name 			pdev->tx_mgmt_cb.download_cb;
288*5113495bSYour Name 		if (download_cb) {
289*5113495bSYour Name 			download_cb(pdev->tx_mgmt_cb.ctxt,
290*5113495bSYour Name 				    tx_desc->netbuf, status != A_OK);
291*5113495bSYour Name 		}
292*5113495bSYour Name 	}
293*5113495bSYour Name 
294*5113495bSYour Name 	if (status != A_OK) {
295*5113495bSYour Name 		ol_tx_target_credit_incr(pdev, msdu);
296*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
297*5113495bSYour Name 					     1 /* download err */);
298*5113495bSYour Name 		is_frame_freed = true;
299*5113495bSYour Name 	} else {
300*5113495bSYour Name 		if (OL_TX_DESC_NO_REFS(tx_desc)) {
301*5113495bSYour Name 			/*
302*5113495bSYour Name 			 * The decremented value was zero - free the frame.
303*5113495bSYour Name 			 * Use the tx status recorded previously during
304*5113495bSYour Name 			 * tx completion handling.
305*5113495bSYour Name 			 */
306*5113495bSYour Name 			ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
307*5113495bSYour Name 						     tx_desc->status !=
308*5113495bSYour Name 						     htt_tx_status_ok);
309*5113495bSYour Name 			is_frame_freed = true;
310*5113495bSYour Name 		}
311*5113495bSYour Name 	}
312*5113495bSYour Name 	return is_frame_freed;
313*5113495bSYour Name }
314*5113495bSYour Name 
315*5113495bSYour Name void
ol_tx_download_done_ll(void * pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)316*5113495bSYour Name ol_tx_download_done_ll(void *pdev,
317*5113495bSYour Name 		       A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
318*5113495bSYour Name {
319*5113495bSYour Name 	ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
320*5113495bSYour Name 				 msdu_id);
321*5113495bSYour Name }
322*5113495bSYour Name 
323*5113495bSYour Name void
ol_tx_download_done_hl_retain(void * txrx_pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)324*5113495bSYour Name ol_tx_download_done_hl_retain(void *txrx_pdev,
325*5113495bSYour Name 			      A_STATUS status,
326*5113495bSYour Name 			      qdf_nbuf_t msdu, uint16_t msdu_id)
327*5113495bSYour Name {
328*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = txrx_pdev;
329*5113495bSYour Name 
330*5113495bSYour Name 	ol_tx_download_done_base(pdev, status, msdu, msdu_id);
331*5113495bSYour Name }
332*5113495bSYour Name 
333*5113495bSYour Name void
ol_tx_download_done_hl_free(void * txrx_pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)334*5113495bSYour Name ol_tx_download_done_hl_free(void *txrx_pdev,
335*5113495bSYour Name 			    A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
336*5113495bSYour Name {
337*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = txrx_pdev;
338*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
339*5113495bSYour Name 	bool is_frame_freed;
340*5113495bSYour Name 	uint8_t dp_status;
341*5113495bSYour Name 
342*5113495bSYour Name 	tx_desc = ol_tx_desc_find(pdev, msdu_id);
343*5113495bSYour Name 	qdf_assert(tx_desc);
344*5113495bSYour Name 	dp_status = qdf_dp_get_status_from_a_status(status);
345*5113495bSYour Name 	DPTRACE(qdf_dp_trace_ptr(msdu,
346*5113495bSYour Name 				 QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
347*5113495bSYour Name 				 QDF_TRACE_DEFAULT_PDEV_ID,
348*5113495bSYour Name 				 qdf_nbuf_data_addr(msdu),
349*5113495bSYour Name 				 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
350*5113495bSYour Name 				 dp_status, 0,
351*5113495bSYour Name 				 tx_desc->vdev->qdf_opmode
352*5113495bSYour Name 				 ));
353*5113495bSYour Name 
354*5113495bSYour Name 	is_frame_freed = ol_tx_download_done_base(pdev, status, msdu, msdu_id);
355*5113495bSYour Name 
356*5113495bSYour Name 	/*
357*5113495bSYour Name 	 * if frame is freed in ol_tx_download_done_base then return.
358*5113495bSYour Name 	 */
359*5113495bSYour Name 	if (is_frame_freed) {
360*5113495bSYour Name 		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
361*5113495bSYour Name 		return;
362*5113495bSYour Name 	}
363*5113495bSYour Name 
364*5113495bSYour Name 	if ((tx_desc->pkt_type != OL_TX_FRM_NO_FREE) &&
365*5113495bSYour Name 	    (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
366*5113495bSYour Name 		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
367*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
368*5113495bSYour Name 	}
369*5113495bSYour Name }
370*5113495bSYour Name 
ol_tx_target_credit_init(struct ol_txrx_pdev_t * pdev,int credit_delta)371*5113495bSYour Name void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
372*5113495bSYour Name {
373*5113495bSYour Name 	qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
374*5113495bSYour Name }
375*5113495bSYour Name 
ol_tx_target_credit_update(struct ol_txrx_pdev_t * pdev,int credit_delta)376*5113495bSYour Name void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
377*5113495bSYour Name {
378*5113495bSYour Name 	TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
379*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit),
380*5113495bSYour Name 			      credit_delta,
381*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit) +
382*5113495bSYour Name 			      credit_delta);
383*5113495bSYour Name 	qdf_atomic_add(credit_delta, &pdev->target_tx_credit);
384*5113495bSYour Name }
385*5113495bSYour Name 
386*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
387*5113495bSYour Name 
388*5113495bSYour Name static void
389*5113495bSYour Name ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
390*5113495bSYour Name 		    enum htt_tx_status status,
391*5113495bSYour Name 		    uint16_t *desc_ids, int num_msdus);
392*5113495bSYour Name 
393*5113495bSYour Name #else
394*5113495bSYour Name static inline void
ol_tx_delay_compute(struct ol_txrx_pdev_t * pdev,enum htt_tx_status status,uint16_t * desc_ids,int num_msdus)395*5113495bSYour Name ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
396*5113495bSYour Name 		    enum htt_tx_status status,
397*5113495bSYour Name 		    uint16_t *desc_ids, int num_msdus)
398*5113495bSYour Name {
399*5113495bSYour Name }
400*5113495bSYour Name #endif /* QCA_COMPUTE_TX_DELAY */
401*5113495bSYour Name 
402*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT)
ol_tx_deduct_one_credit(struct ol_txrx_pdev_t * pdev)403*5113495bSYour Name int ol_tx_deduct_one_credit(struct ol_txrx_pdev_t *pdev)
404*5113495bSYour Name {
405*5113495bSYour Name 	/* TODO: Check if enough credits */
406*5113495bSYour Name 
407*5113495bSYour Name 	if (!pdev->cfg.default_tx_comp_req) {
408*5113495bSYour Name 		ol_tx_target_credit_update(pdev, -1);
409*5113495bSYour Name 		ol_tx_deduct_one_any_group_credit(pdev);
410*5113495bSYour Name 
411*5113495bSYour Name 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_HTT_MSG,
412*5113495bSYour Name 			QDF_CREDIT_DEC, 1,
413*5113495bSYour Name 			qdf_atomic_read(&pdev->target_tx_credit),
414*5113495bSYour Name 			qdf_atomic_read(&pdev->txq_grps[0].credit),
415*5113495bSYour Name 			qdf_atomic_read(&pdev->txq_grps[1].credit)));
416*5113495bSYour Name 	}
417*5113495bSYour Name 
418*5113495bSYour Name 	return 0;
419*5113495bSYour Name }
420*5113495bSYour Name #endif /* CONFIG_HL_SUPPORT */
421*5113495bSYour Name 
422*5113495bSYour Name #ifndef OL_TX_RESTORE_HDR
423*5113495bSYour Name #define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
424*5113495bSYour Name #endif
425*5113495bSYour Name /*
426*5113495bSYour Name  * The following macros could have been inline functions too.
427*5113495bSYour Name  * The only rationale for choosing macros, is to force the compiler to inline
428*5113495bSYour Name  * the implementation, which cannot be controlled for actual "inline" functions,
429*5113495bSYour Name  * since "inline" is only a hint to the compiler.
430*5113495bSYour Name  * In the performance path, we choose to force the inlining, in preference to
431*5113495bSYour Name  * type-checking offered by the actual inlined functions.
432*5113495bSYour Name  */
433*5113495bSYour Name #define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
434*5113495bSYour Name 	TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
435*5113495bSYour Name #ifndef ATH_11AC_TXCOMPACT
436*5113495bSYour Name #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
437*5113495bSYour Name 				   _lcl_freelist, _tx_desc_last)	\
438*5113495bSYour Name 	do {								\
439*5113495bSYour Name 		qdf_atomic_init(&(_tx_desc)->ref_cnt);			\
440*5113495bSYour Name 		/* restore original hdr offset */			\
441*5113495bSYour Name 		OL_TX_RESTORE_HDR((_tx_desc), (_netbuf));		\
442*5113495bSYour Name 		qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
443*5113495bSYour Name 		qdf_nbuf_free((_netbuf));				\
444*5113495bSYour Name 		((union ol_tx_desc_list_elem_t *)(_tx_desc))->next =	\
445*5113495bSYour Name 			(_lcl_freelist);				\
446*5113495bSYour Name 		if (qdf_unlikely(!lcl_freelist)) {			\
447*5113495bSYour Name 			(_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
448*5113495bSYour Name 				(_tx_desc);				\
449*5113495bSYour Name 		}							\
450*5113495bSYour Name 		(_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
451*5113495bSYour Name 	} while (0)
452*5113495bSYour Name #else    /*!ATH_11AC_TXCOMPACT */
453*5113495bSYour Name #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
454*5113495bSYour Name 				   _lcl_freelist, _tx_desc_last)	\
455*5113495bSYour Name 	do {								\
456*5113495bSYour Name 		/* restore original hdr offset */			\
457*5113495bSYour Name 		OL_TX_RESTORE_HDR((_tx_desc), (_netbuf));		\
458*5113495bSYour Name 		qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
459*5113495bSYour Name 		qdf_nbuf_free((_netbuf));				\
460*5113495bSYour Name 		((union ol_tx_desc_list_elem_t *)(_tx_desc))->next =	\
461*5113495bSYour Name 			(_lcl_freelist);				\
462*5113495bSYour Name 		if (qdf_unlikely(!lcl_freelist)) {			\
463*5113495bSYour Name 			(_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
464*5113495bSYour Name 				(_tx_desc);				\
465*5113495bSYour Name 		}							\
466*5113495bSYour Name 		(_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
467*5113495bSYour Name 	} while (0)
468*5113495bSYour Name 
469*5113495bSYour Name #endif /*!ATH_11AC_TXCOMPACT */
470*5113495bSYour Name 
471*5113495bSYour Name #ifdef QCA_TX_SINGLE_COMPLETIONS
472*5113495bSYour Name #ifdef QCA_TX_STD_PATH_ONLY
473*5113495bSYour Name #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
474*5113495bSYour Name 			    _netbuf, _lcl_freelist,			\
475*5113495bSYour Name 			    _tx_desc_last, _status, is_tx_desc_freed)	\
476*5113495bSYour Name 	{								\
477*5113495bSYour Name 		is_tx_desc_freed = 0;					\
478*5113495bSYour Name 		ol_tx_msdu_complete_single((_pdev), (_tx_desc),		\
479*5113495bSYour Name 					   (_netbuf), (_lcl_freelist),	\
480*5113495bSYour Name 					   _tx_desc_last)		\
481*5113495bSYour Name 	}
482*5113495bSYour Name #else                           /* !QCA_TX_STD_PATH_ONLY */
483*5113495bSYour Name #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
484*5113495bSYour Name 			    _netbuf, _lcl_freelist,			\
485*5113495bSYour Name 			    _tx_desc_last, _status, is_tx_desc_freed)	\
486*5113495bSYour Name 	do {								\
487*5113495bSYour Name 		if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
488*5113495bSYour Name 			is_tx_desc_freed = 0;				\
489*5113495bSYour Name 			ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
490*5113495bSYour Name 						   (_netbuf), (_lcl_freelist), \
491*5113495bSYour Name 						   (_tx_desc_last));	\
492*5113495bSYour Name 		} else {						\
493*5113495bSYour Name 			is_tx_desc_freed = 1;				\
494*5113495bSYour Name 			ol_tx_desc_frame_free_nonstd(			\
495*5113495bSYour Name 				(_pdev), (_tx_desc),			\
496*5113495bSYour Name 				(_status) != htt_tx_status_ok);		\
497*5113495bSYour Name 		}							\
498*5113495bSYour Name 	} while (0)
499*5113495bSYour Name #endif /* !QCA_TX_STD_PATH_ONLY */
500*5113495bSYour Name #else                           /* !QCA_TX_SINGLE_COMPLETIONS */
501*5113495bSYour Name #ifdef QCA_TX_STD_PATH_ONLY
502*5113495bSYour Name #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
503*5113495bSYour Name 			    _netbuf, _lcl_freelist,			\
504*5113495bSYour Name 			    _tx_desc_last, _status, is_tx_desc_freed)	\
505*5113495bSYour Name 	{								\
506*5113495bSYour Name 		is_tx_desc_freed = 0;					\
507*5113495bSYour Name 		ol_tx_msdu_complete_batch((_pdev), (_tx_desc),		\
508*5113495bSYour Name 					(_tx_descs), (_status))		\
509*5113495bSYour Name 	}
510*5113495bSYour Name #else                           /* !QCA_TX_STD_PATH_ONLY */
511*5113495bSYour Name #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
512*5113495bSYour Name 			    _netbuf, _lcl_freelist,			\
513*5113495bSYour Name 			    _tx_desc_last, _status, is_tx_desc_freed)	\
514*5113495bSYour Name 	do {								\
515*5113495bSYour Name 		if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
516*5113495bSYour Name 			is_tx_desc_freed = 0;				\
517*5113495bSYour Name 			ol_tx_msdu_complete_batch((_pdev), (_tx_desc),	\
518*5113495bSYour Name 						  (_tx_descs), (_status)); \
519*5113495bSYour Name 		} else {						\
520*5113495bSYour Name 			is_tx_desc_freed = 1;				\
521*5113495bSYour Name 			ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
522*5113495bSYour Name 						     (_status) !=	\
523*5113495bSYour Name 						     htt_tx_status_ok); \
524*5113495bSYour Name 		}							\
525*5113495bSYour Name 	} while (0)
526*5113495bSYour Name #endif /* !QCA_TX_STD_PATH_ONLY */
527*5113495bSYour Name #endif /* QCA_TX_SINGLE_COMPLETIONS */
528*5113495bSYour Name 
529*5113495bSYour Name #if !defined(CONFIG_HL_SUPPORT)
ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)530*5113495bSYour Name void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
531*5113495bSYour Name {
532*5113495bSYour Name 	int i = 0;
533*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
534*5113495bSYour Name 	int num_disarded = 0;
535*5113495bSYour Name 
536*5113495bSYour Name 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
537*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, i);
538*5113495bSYour Name 		/*
539*5113495bSYour Name 		 * Confirm that each tx descriptor is "empty", i.e. it has
540*5113495bSYour Name 		 * no tx frame attached.
541*5113495bSYour Name 		 * In particular, check that there are no frames that have
542*5113495bSYour Name 		 * been given to the target to transmit, for which the
543*5113495bSYour Name 		 * target has never provided a response.
544*5113495bSYour Name 		 */
545*5113495bSYour Name 		if (qdf_atomic_read(&tx_desc->ref_cnt)) {
546*5113495bSYour Name 			ol_txrx_dbg(
547*5113495bSYour Name 				   "Warning: freeing tx desc %d", tx_desc->id);
548*5113495bSYour Name 			ol_tx_desc_frame_free_nonstd(pdev,
549*5113495bSYour Name 						     tx_desc, 1);
550*5113495bSYour Name 			num_disarded++;
551*5113495bSYour Name 		}
552*5113495bSYour Name 	}
553*5113495bSYour Name 
554*5113495bSYour Name 	if (num_disarded)
555*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
556*5113495bSYour Name 			"Warning: freed %d tx descs for which no tx completion rcvd from the target",
557*5113495bSYour Name 			num_disarded);
558*5113495bSYour Name }
559*5113495bSYour Name #endif
560*5113495bSYour Name 
ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev,int credits)561*5113495bSYour Name void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
562*5113495bSYour Name {
563*5113495bSYour Name 	ol_tx_target_credit_update(pdev, credits);
564*5113495bSYour Name 
565*5113495bSYour Name 	if (pdev->cfg.is_high_latency)
566*5113495bSYour Name 		ol_tx_sched(pdev);
567*5113495bSYour Name 
568*5113495bSYour Name 	/* UNPAUSE OS Q */
569*5113495bSYour Name 	ol_tx_flow_ct_unpause_os_q(pdev);
570*5113495bSYour Name }
571*5113495bSYour Name 
572*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
573*5113495bSYour Name /**
574*5113495bSYour Name  * ol_tx_flow_pool_lock() - take flow pool lock
575*5113495bSYour Name  * @tx_desc: tx desc
576*5113495bSYour Name  *
577*5113495bSYour Name  * Return: None
578*5113495bSYour Name  */
579*5113495bSYour Name static inline
ol_tx_flow_pool_lock(struct ol_tx_desc_t * tx_desc)580*5113495bSYour Name void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc)
581*5113495bSYour Name {
582*5113495bSYour Name 	struct ol_tx_flow_pool_t *pool;
583*5113495bSYour Name 
584*5113495bSYour Name 	pool = tx_desc->pool;
585*5113495bSYour Name 	qdf_spin_lock_bh(&pool->flow_pool_lock);
586*5113495bSYour Name }
587*5113495bSYour Name 
588*5113495bSYour Name /**
589*5113495bSYour Name  * ol_tx_flow_pool_unlock() - release flow pool lock
590*5113495bSYour Name  * @tx_desc: tx desc
591*5113495bSYour Name  *
592*5113495bSYour Name  * Return: None
593*5113495bSYour Name  */
594*5113495bSYour Name static inline
ol_tx_flow_pool_unlock(struct ol_tx_desc_t * tx_desc)595*5113495bSYour Name void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc)
596*5113495bSYour Name {
597*5113495bSYour Name 	struct ol_tx_flow_pool_t *pool;
598*5113495bSYour Name 
599*5113495bSYour Name 	pool = tx_desc->pool;
600*5113495bSYour Name 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
601*5113495bSYour Name }
602*5113495bSYour Name #else
603*5113495bSYour Name static inline
ol_tx_flow_pool_lock(struct ol_tx_desc_t * tx_desc)604*5113495bSYour Name void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc)
605*5113495bSYour Name {
606*5113495bSYour Name }
607*5113495bSYour Name 
608*5113495bSYour Name static inline
ol_tx_flow_pool_unlock(struct ol_tx_desc_t * tx_desc)609*5113495bSYour Name void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc)
610*5113495bSYour Name {
611*5113495bSYour Name }
612*5113495bSYour Name #endif
613*5113495bSYour Name 
614*5113495bSYour Name #ifdef WLAN_FEATURE_PKT_CAPTURE
615*5113495bSYour Name #define RESERVE_BYTES 100
616*5113495bSYour Name /**
617*5113495bSYour Name  * ol_tx_pkt_capture_tx_completion_process(): process tx packets
618*5113495bSYour Name  * for pkt capture mode
619*5113495bSYour Name  * @pdev: device handler
620*5113495bSYour Name  * @tx_desc: tx desc
621*5113495bSYour Name  * @payload: tx data header
622*5113495bSYour Name  * @tid:  tid number
623*5113495bSYour Name  * @status: Tx status
624*5113495bSYour Name  *
625*5113495bSYour Name  * Return: none
626*5113495bSYour Name  */
627*5113495bSYour Name static void
ol_tx_pkt_capture_tx_completion_process(ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,struct htt_tx_data_hdr_information * payload_hdr,uint8_t tid,uint8_t status)628*5113495bSYour Name ol_tx_pkt_capture_tx_completion_process(
629*5113495bSYour Name 			ol_txrx_pdev_handle pdev,
630*5113495bSYour Name 			struct ol_tx_desc_t *tx_desc,
631*5113495bSYour Name 			struct htt_tx_data_hdr_information *payload_hdr,
632*5113495bSYour Name 			uint8_t tid, uint8_t status)
633*5113495bSYour Name {
634*5113495bSYour Name 	qdf_nbuf_t netbuf;
635*5113495bSYour Name 	int nbuf_len;
636*5113495bSYour Name 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
637*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
638*5113495bSYour Name 	uint8_t bssid[QDF_MAC_ADDR_SIZE];
639*5113495bSYour Name 	uint8_t pkt_type = 0;
640*5113495bSYour Name 
641*5113495bSYour Name 	qdf_assert(tx_desc);
642*5113495bSYour Name 
643*5113495bSYour Name 	ol_tx_flow_pool_lock(tx_desc);
644*5113495bSYour Name 	/*
645*5113495bSYour Name 	 * In cases when vdev has gone down and tx completion
646*5113495bSYour Name 	 * are received, leads to NULL vdev access.
647*5113495bSYour Name 	 * So, check for NULL before dereferencing it.
648*5113495bSYour Name 	 */
649*5113495bSYour Name 	if (!tx_desc->vdev) {
650*5113495bSYour Name 		ol_tx_flow_pool_unlock(tx_desc);
651*5113495bSYour Name 		return;
652*5113495bSYour Name 	}
653*5113495bSYour Name 
654*5113495bSYour Name 	ol_tx_flow_pool_unlock(tx_desc);
655*5113495bSYour Name 
656*5113495bSYour Name 	if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
657*5113495bSYour Name 		if (!tx_desc->tso_desc)
658*5113495bSYour Name 			return;
659*5113495bSYour Name 
660*5113495bSYour Name 		tso_seg = tx_desc->tso_desc;
661*5113495bSYour Name 		nbuf_len = tso_seg->seg.total_len;
662*5113495bSYour Name 	} else {
663*5113495bSYour Name 		int i, extra_frag_len = 0;
664*5113495bSYour Name 
665*5113495bSYour Name 		i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(tx_desc->netbuf);
666*5113495bSYour Name 		if (i > 0)
667*5113495bSYour Name 			extra_frag_len =
668*5113495bSYour Name 			QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(tx_desc->netbuf);
669*5113495bSYour Name 		nbuf_len = qdf_nbuf_len(tx_desc->netbuf) - extra_frag_len;
670*5113495bSYour Name 	}
671*5113495bSYour Name 
672*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
673*5113495bSYour Name 	peer = TAILQ_FIRST(&tx_desc->vdev->peer_list);
674*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
675*5113495bSYour Name 	if (!peer)
676*5113495bSYour Name 		return;
677*5113495bSYour Name 
678*5113495bSYour Name 	qdf_spin_lock_bh(&peer->peer_info_lock);
679*5113495bSYour Name 	qdf_mem_copy(bssid, &peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
680*5113495bSYour Name 	qdf_spin_unlock_bh(&peer->peer_info_lock);
681*5113495bSYour Name 
682*5113495bSYour Name 	netbuf = qdf_nbuf_alloc(NULL,
683*5113495bSYour Name 				roundup(nbuf_len + RESERVE_BYTES, 4),
684*5113495bSYour Name 				RESERVE_BYTES, 4, false);
685*5113495bSYour Name 	if (!netbuf)
686*5113495bSYour Name 		return;
687*5113495bSYour Name 
688*5113495bSYour Name 	qdf_nbuf_put_tail(netbuf, nbuf_len);
689*5113495bSYour Name 
690*5113495bSYour Name 	if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
691*5113495bSYour Name 		uint8_t frag_cnt, num_frags = 0;
692*5113495bSYour Name 		int frag_len = 0;
693*5113495bSYour Name 		uint32_t tcp_seq_num;
694*5113495bSYour Name 		uint16_t ip_len;
695*5113495bSYour Name 
696*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
697*5113495bSYour Name 
698*5113495bSYour Name 		if (tso_seg->seg.num_frags > 0)
699*5113495bSYour Name 			num_frags = tso_seg->seg.num_frags - 1;
700*5113495bSYour Name 
701*5113495bSYour Name 		/*Num of frags in a tso seg cannot be less than 2 */
702*5113495bSYour Name 		if (num_frags < 1) {
703*5113495bSYour Name 			qdf_print("ERROR: num of frags in tso segment is %d\n",
704*5113495bSYour Name 				  (num_frags + 1));
705*5113495bSYour Name 			qdf_nbuf_free(netbuf);
706*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
707*5113495bSYour Name 			return;
708*5113495bSYour Name 		}
709*5113495bSYour Name 
710*5113495bSYour Name 		tcp_seq_num = tso_seg->seg.tso_flags.tcp_seq_num;
711*5113495bSYour Name 		tcp_seq_num = qdf_cpu_to_be32(tcp_seq_num);
712*5113495bSYour Name 
713*5113495bSYour Name 		ip_len = tso_seg->seg.tso_flags.ip_len;
714*5113495bSYour Name 		ip_len = qdf_cpu_to_be16(ip_len);
715*5113495bSYour Name 
716*5113495bSYour Name 		for (frag_cnt = 0; frag_cnt <= num_frags; frag_cnt++) {
717*5113495bSYour Name 			qdf_mem_copy(qdf_nbuf_data(netbuf) + frag_len,
718*5113495bSYour Name 				     tso_seg->seg.tso_frags[frag_cnt].vaddr,
719*5113495bSYour Name 				     tso_seg->seg.tso_frags[frag_cnt].length);
720*5113495bSYour Name 			frag_len += tso_seg->seg.tso_frags[frag_cnt].length;
721*5113495bSYour Name 		}
722*5113495bSYour Name 
723*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
724*5113495bSYour Name 
725*5113495bSYour Name 		qdf_mem_copy((qdf_nbuf_data(netbuf) + IPV4_PKT_LEN_OFFSET),
726*5113495bSYour Name 			     &ip_len, sizeof(ip_len));
727*5113495bSYour Name 		qdf_mem_copy((qdf_nbuf_data(netbuf) + IPV4_TCP_SEQ_NUM_OFFSET),
728*5113495bSYour Name 			     &tcp_seq_num, sizeof(tcp_seq_num));
729*5113495bSYour Name 	} else {
730*5113495bSYour Name 		qdf_mem_copy(qdf_nbuf_data(netbuf),
731*5113495bSYour Name 			     qdf_nbuf_data(tx_desc->netbuf),
732*5113495bSYour Name 			     nbuf_len);
733*5113495bSYour Name 	}
734*5113495bSYour Name 
735*5113495bSYour Name 	qdf_nbuf_push_head(
736*5113495bSYour Name 			netbuf,
737*5113495bSYour Name 			sizeof(struct htt_tx_data_hdr_information));
738*5113495bSYour Name 	qdf_mem_copy(qdf_nbuf_data(netbuf), payload_hdr,
739*5113495bSYour Name 		     sizeof(struct htt_tx_data_hdr_information));
740*5113495bSYour Name 
741*5113495bSYour Name 	ucfg_pkt_capture_tx_completion_process(
742*5113495bSYour Name 				tx_desc->vdev_id,
743*5113495bSYour Name 				netbuf, pkt_type,
744*5113495bSYour Name 				tid, status,
745*5113495bSYour Name 				TXRX_PKTCAPTURE_PKT_FORMAT_8023, bssid,
746*5113495bSYour Name 				pdev->htt_pdev, payload_hdr->tx_retry_cnt);
747*5113495bSYour Name }
748*5113495bSYour Name #else
749*5113495bSYour Name static void
ol_tx_pkt_capture_tx_completion_process(ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,struct htt_tx_data_hdr_information * payload_hdr,uint8_t tid,uint8_t status)750*5113495bSYour Name ol_tx_pkt_capture_tx_completion_process(
751*5113495bSYour Name 			ol_txrx_pdev_handle pdev,
752*5113495bSYour Name 			struct ol_tx_desc_t *tx_desc,
753*5113495bSYour Name 			struct htt_tx_data_hdr_information *payload_hdr,
754*5113495bSYour Name 			uint8_t tid, uint8_t status)
755*5113495bSYour Name {
756*5113495bSYour Name }
757*5113495bSYour Name #endif /* WLAN_FEATURE_PKT_CAPTURE */
758*5113495bSYour Name 
759*5113495bSYour Name #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
ol_tx_get_txtstamps(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)760*5113495bSYour Name static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
761*5113495bSYour Name 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
762*5113495bSYour Name 		int num_msdus)
763*5113495bSYour Name {
764*5113495bSYour Name 	u_int32_t has_tx_tsf;
765*5113495bSYour Name 	u_int32_t has_retry;
766*5113495bSYour Name 
767*5113495bSYour Name 	struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
768*5113495bSYour Name 	struct htt_tx_compl_ind_append_retries *retry_list = NULL;
769*5113495bSYour Name 	int offset_dwords;
770*5113495bSYour Name 
771*5113495bSYour Name 	if (num_msdus <= 0)
772*5113495bSYour Name 		return NULL;
773*5113495bSYour Name 
774*5113495bSYour Name 	has_tx_tsf = HTT_TX_COMPL_IND_APPEND1_GET(*msg_word_header);
775*5113495bSYour Name 
776*5113495bSYour Name 	/* skip header and MSDUx ID part*/
777*5113495bSYour Name 	offset_dwords = ((num_msdus + 1) >> 1);
778*5113495bSYour Name 	*msg_word_payload += offset_dwords;
779*5113495bSYour Name 
780*5113495bSYour Name 	if (!has_tx_tsf)
781*5113495bSYour Name 		return NULL;
782*5113495bSYour Name 
783*5113495bSYour Name 	has_retry = HTT_TX_COMPL_IND_APPEND_GET(*msg_word_header);
784*5113495bSYour Name 	if (has_retry) {
785*5113495bSYour Name 		int retry_index = 0;
786*5113495bSYour Name 		int width_for_each_retry =
787*5113495bSYour Name 			(sizeof(struct htt_tx_compl_ind_append_retries) +
788*5113495bSYour Name 			3) >> 2;
789*5113495bSYour Name 
790*5113495bSYour Name 		retry_list = (struct htt_tx_compl_ind_append_retries *)
791*5113495bSYour Name 			(*msg_word_payload + offset_dwords);
792*5113495bSYour Name 		while (retry_list) {
793*5113495bSYour Name 			if (retry_list[retry_index++].flag == 0)
794*5113495bSYour Name 				break;
795*5113495bSYour Name 		}
796*5113495bSYour Name 		offset_dwords = retry_index * width_for_each_retry;
797*5113495bSYour Name 	}
798*5113495bSYour Name 
799*5113495bSYour Name 	*msg_word_payload +=  offset_dwords;
800*5113495bSYour Name 	txtstamp_list = (struct htt_tx_compl_ind_append_tx_tstamp *)
801*5113495bSYour Name 		(*msg_word_payload);
802*5113495bSYour Name 	return txtstamp_list;
803*5113495bSYour Name }
804*5113495bSYour Name 
805*5113495bSYour Name static inline
ol_tx_get_txtstamp64s(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)806*5113495bSYour Name struct htt_tx_compl_ind_append_tx_tsf64 *ol_tx_get_txtstamp64s(
807*5113495bSYour Name 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
808*5113495bSYour Name 		int num_msdus)
809*5113495bSYour Name {
810*5113495bSYour Name 	u_int32_t has_tx_tstamp64;
811*5113495bSYour Name 	u_int32_t has_rssi;
812*5113495bSYour Name 	struct htt_tx_compl_ind_append_tx_tsf64 *txtstamp64_list = NULL;
813*5113495bSYour Name 
814*5113495bSYour Name 	int offset_dwords = 0;
815*5113495bSYour Name 
816*5113495bSYour Name 	if (num_msdus <= 0)
817*5113495bSYour Name 		return NULL;
818*5113495bSYour Name 
819*5113495bSYour Name 	has_tx_tstamp64 = HTT_TX_COMPL_IND_APPEND3_GET(*msg_word_header);
820*5113495bSYour Name 	if (!has_tx_tstamp64)
821*5113495bSYour Name 		return NULL;
822*5113495bSYour Name 
823*5113495bSYour Name 	/*skip MSDUx ACK RSSI part*/
824*5113495bSYour Name 	has_rssi = HTT_TX_COMPL_IND_APPEND2_GET(*msg_word_header);
825*5113495bSYour Name 	if (has_rssi)
826*5113495bSYour Name 		offset_dwords = ((num_msdus + 1) >> 1);
827*5113495bSYour Name 
828*5113495bSYour Name 	*msg_word_payload = *msg_word_payload + offset_dwords;
829*5113495bSYour Name 	txtstamp64_list =
830*5113495bSYour Name 		(struct htt_tx_compl_ind_append_tx_tsf64 *)
831*5113495bSYour Name 		(*msg_word_payload);
832*5113495bSYour Name 
833*5113495bSYour Name 	return txtstamp64_list;
834*5113495bSYour Name }
835*5113495bSYour Name 
ol_tx_timestamp(ol_txrx_pdev_handle pdev,enum htt_tx_status status,qdf_nbuf_t netbuf,u_int64_t ts)836*5113495bSYour Name static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
837*5113495bSYour Name 				   enum htt_tx_status status,
838*5113495bSYour Name 				   qdf_nbuf_t netbuf, u_int64_t ts)
839*5113495bSYour Name {
840*5113495bSYour Name 	if (!netbuf)
841*5113495bSYour Name 		return;
842*5113495bSYour Name 
843*5113495bSYour Name 	if (pdev->ol_tx_timestamp_cb)
844*5113495bSYour Name 		pdev->ol_tx_timestamp_cb(status, netbuf, ts);
845*5113495bSYour Name }
846*5113495bSYour Name #else
ol_tx_get_txtstamps(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)847*5113495bSYour Name static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
848*5113495bSYour Name 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
849*5113495bSYour Name 		int num_msdus)
850*5113495bSYour Name {
851*5113495bSYour Name 	return NULL;
852*5113495bSYour Name }
853*5113495bSYour Name 
854*5113495bSYour Name static inline
ol_tx_get_txtstamp64s(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)855*5113495bSYour Name struct htt_tx_compl_ind_append_tx_tsf64 *ol_tx_get_txtstamp64s(
856*5113495bSYour Name 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
857*5113495bSYour Name 		int num_msdus)
858*5113495bSYour Name {
859*5113495bSYour Name 	return NULL;
860*5113495bSYour Name }
861*5113495bSYour Name 
ol_tx_timestamp(ol_txrx_pdev_handle pdev,enum htt_tx_status status,qdf_nbuf_t netbuf,u_int64_t ts)862*5113495bSYour Name static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
863*5113495bSYour Name 				   enum htt_tx_status status,
864*5113495bSYour Name 				   qdf_nbuf_t netbuf, u_int64_t ts)
865*5113495bSYour Name {
866*5113495bSYour Name }
867*5113495bSYour Name #endif /* WLAN_FEATURE_TSF_PLUS_SOCK_TS */
868*5113495bSYour Name 
ol_tx_update_ack_count(struct ol_tx_desc_t * tx_desc,enum htt_tx_status status)869*5113495bSYour Name static void ol_tx_update_ack_count(struct ol_tx_desc_t *tx_desc,
870*5113495bSYour Name 				   enum htt_tx_status status)
871*5113495bSYour Name {
872*5113495bSYour Name 	if (!tx_desc->vdev)
873*5113495bSYour Name 		return;
874*5113495bSYour Name 
875*5113495bSYour Name 	if (status == htt_tx_status_ok)
876*5113495bSYour Name 		++tx_desc->vdev->txrx_stats.txack_success;
877*5113495bSYour Name 	else
878*5113495bSYour Name 		++tx_desc->vdev->txrx_stats.txack_failed;
879*5113495bSYour Name }
880*5113495bSYour Name 
881*5113495bSYour Name /**
882*5113495bSYour Name  * ol_tx_notify_completion() - Notify tx completion for this desc
883*5113495bSYour Name  * @tx_desc: tx desc
884*5113495bSYour Name  * @netbuf:  buffer
885*5113495bSYour Name  * @status: tx status
886*5113495bSYour Name  *
887*5113495bSYour Name  * Return: none
888*5113495bSYour Name  */
ol_tx_notify_completion(struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,uint8_t status)889*5113495bSYour Name static void ol_tx_notify_completion(struct ol_tx_desc_t *tx_desc,
890*5113495bSYour Name 				    qdf_nbuf_t netbuf, uint8_t status)
891*5113495bSYour Name {
892*5113495bSYour Name 	void *osif_dev;
893*5113495bSYour Name 	ol_txrx_completion_fp tx_compl_cbk = NULL;
894*5113495bSYour Name 	uint16_t flag = 0;
895*5113495bSYour Name 
896*5113495bSYour Name 	qdf_assert(tx_desc);
897*5113495bSYour Name 
898*5113495bSYour Name 	ol_tx_flow_pool_lock(tx_desc);
899*5113495bSYour Name 
900*5113495bSYour Name 	if (!tx_desc->vdev ||
901*5113495bSYour Name 	    !tx_desc->vdev->osif_dev) {
902*5113495bSYour Name 		ol_tx_flow_pool_unlock(tx_desc);
903*5113495bSYour Name 		return;
904*5113495bSYour Name 	}
905*5113495bSYour Name 	osif_dev = tx_desc->vdev->osif_dev;
906*5113495bSYour Name 	tx_compl_cbk = tx_desc->vdev->tx_comp;
907*5113495bSYour Name 	ol_tx_flow_pool_unlock(tx_desc);
908*5113495bSYour Name 
909*5113495bSYour Name 	if (status == htt_tx_status_ok)
910*5113495bSYour Name 		flag = (BIT(QDF_TX_RX_STATUS_OK) |
911*5113495bSYour Name 			BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC));
912*5113495bSYour Name 	else if (status != htt_tx_status_download_fail)
913*5113495bSYour Name 		flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
914*5113495bSYour Name 
915*5113495bSYour Name 	if (tx_compl_cbk)
916*5113495bSYour Name 		tx_compl_cbk(netbuf, osif_dev, flag);
917*5113495bSYour Name }
918*5113495bSYour Name 
919*5113495bSYour Name /**
920*5113495bSYour Name  * ol_tx_update_connectivity_stats() - update connectivity stats
921*5113495bSYour Name  * @tx_desc: tx desc
922*5113495bSYour Name  * @netbuf:  buffer
923*5113495bSYour Name  * @status: htt status
924*5113495bSYour Name  *
925*5113495bSYour Name  *
926*5113495bSYour Name  * Return: none
927*5113495bSYour Name  */
ol_tx_update_connectivity_stats(struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,enum htt_tx_status status)928*5113495bSYour Name static void ol_tx_update_connectivity_stats(struct ol_tx_desc_t *tx_desc,
929*5113495bSYour Name 					    qdf_nbuf_t netbuf,
930*5113495bSYour Name 					    enum htt_tx_status status)
931*5113495bSYour Name {
932*5113495bSYour Name 	void *osif_dev;
933*5113495bSYour Name 	uint32_t pkt_type_bitmap;
934*5113495bSYour Name 	ol_txrx_stats_rx_fp stats_rx = NULL;
935*5113495bSYour Name 	uint8_t pkt_type = 0;
936*5113495bSYour Name 
937*5113495bSYour Name 	qdf_assert(tx_desc);
938*5113495bSYour Name 
939*5113495bSYour Name 	ol_tx_flow_pool_lock(tx_desc);
940*5113495bSYour Name 	/*
941*5113495bSYour Name 	 * In cases when vdev has gone down and tx completion
942*5113495bSYour Name 	 * are received, leads to NULL vdev access.
943*5113495bSYour Name 	 * So, check for NULL before dereferencing it.
944*5113495bSYour Name 	 */
945*5113495bSYour Name 	if (!tx_desc->vdev ||
946*5113495bSYour Name 	    !tx_desc->vdev->osif_dev ||
947*5113495bSYour Name 	    !tx_desc->vdev->stats_rx) {
948*5113495bSYour Name 		ol_tx_flow_pool_unlock(tx_desc);
949*5113495bSYour Name 		return;
950*5113495bSYour Name 	}
951*5113495bSYour Name 	osif_dev = tx_desc->vdev->osif_dev;
952*5113495bSYour Name 	stats_rx = tx_desc->vdev->stats_rx;
953*5113495bSYour Name 	ol_tx_flow_pool_unlock(tx_desc);
954*5113495bSYour Name 
955*5113495bSYour Name 	pkt_type_bitmap = wlan_dp_intf_get_pkt_type_bitmap_value(tx_desc->vdev);
956*5113495bSYour Name 
957*5113495bSYour Name 	if (pkt_type_bitmap) {
958*5113495bSYour Name 		if (status != htt_tx_status_download_fail)
959*5113495bSYour Name 			stats_rx(netbuf, osif_dev,
960*5113495bSYour Name 				 PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
961*5113495bSYour Name 		if (status == htt_tx_status_ok)
962*5113495bSYour Name 			stats_rx(netbuf, osif_dev,
963*5113495bSYour Name 				 PKT_TYPE_TX_ACK_CNT, &pkt_type);
964*5113495bSYour Name 	}
965*5113495bSYour Name }
966*5113495bSYour Name 
967*5113495bSYour Name #ifdef CONNECTIVITY_PKTLOG
968*5113495bSYour Name static inline enum qdf_dp_tx_rx_status
htt_qdf_status_map(enum htt_tx_status status)969*5113495bSYour Name htt_qdf_status_map(enum htt_tx_status status)
970*5113495bSYour Name {
971*5113495bSYour Name 	switch (status) {
972*5113495bSYour Name 	case HTT_TX_COMPL_IND_STAT_OK:
973*5113495bSYour Name 		return QDF_TX_RX_STATUS_OK;
974*5113495bSYour Name 	case HTT_TX_COMPL_IND_STAT_DISCARD:
975*5113495bSYour Name 		return QDF_TX_RX_STATUS_FW_DISCARD;
976*5113495bSYour Name 	case HTT_TX_COMPL_IND_STAT_NO_ACK:
977*5113495bSYour Name 		return QDF_TX_RX_STATUS_NO_ACK;
978*5113495bSYour Name 	case HTT_TX_COMPL_IND_STAT_DROP:
979*5113495bSYour Name 		return QDF_TX_RX_STATUS_DROP;
980*5113495bSYour Name 	case HTT_HOST_ONLY_STATUS_CODE_START:
981*5113495bSYour Name 		return QDF_TX_RX_STATUS_DROP;
982*5113495bSYour Name 	default:
983*5113495bSYour Name 		return QDF_TX_RX_STATUS_DROP;
984*5113495bSYour Name 	}
985*5113495bSYour Name }
986*5113495bSYour Name 
987*5113495bSYour Name static inline void
ol_tx_send_pktlog(struct ol_txrx_soc_t * soc,ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,enum htt_tx_status status,enum qdf_pkt_type pkt_type)988*5113495bSYour Name ol_tx_send_pktlog(struct ol_txrx_soc_t *soc, ol_txrx_pdev_handle pdev,
989*5113495bSYour Name 		  struct ol_tx_desc_t *tx_desc, qdf_nbuf_t netbuf,
990*5113495bSYour Name 		  enum htt_tx_status status, enum qdf_pkt_type pkt_type)
991*5113495bSYour Name {
992*5113495bSYour Name 	ol_txrx_pktdump_cb packetdump_cb;
993*5113495bSYour Name 	enum qdf_dp_tx_rx_status tx_status;
994*5113495bSYour Name 
995*5113495bSYour Name 	if (tx_desc->pkt_type != OL_TX_FRM_TSO) {
996*5113495bSYour Name 		packetdump_cb = pdev->ol_tx_packetdump_cb;
997*5113495bSYour Name 		if (packetdump_cb) {
998*5113495bSYour Name 			tx_status = htt_qdf_status_map(status);
999*5113495bSYour Name 			packetdump_cb((void *)soc, pdev->id,
1000*5113495bSYour Name 				      tx_desc->vdev_id,
1001*5113495bSYour Name 				      netbuf, tx_status, pkt_type);
1002*5113495bSYour Name 		}
1003*5113495bSYour Name 	}
1004*5113495bSYour Name }
1005*5113495bSYour Name #else
1006*5113495bSYour Name static inline void
ol_tx_send_pktlog(struct ol_txrx_soc_t * soc,ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,enum htt_tx_status status,enum qdf_pkt_type pkt_type)1007*5113495bSYour Name ol_tx_send_pktlog(struct ol_txrx_soc_t *soc, ol_txrx_pdev_handle pdev,
1008*5113495bSYour Name 		  struct ol_tx_desc_t *tx_desc, qdf_nbuf_t netbuf,
1009*5113495bSYour Name 		  enum htt_tx_status status, enum qdf_pkt_type pkt_type)
1010*5113495bSYour Name {
1011*5113495bSYour Name }
1012*5113495bSYour Name #endif
1013*5113495bSYour Name 
1014*5113495bSYour Name /**
1015*5113495bSYour Name  * WARNING: ol_tx_inspect_handler()'s behavior is similar to that of
1016*5113495bSYour Name  * ol_tx_completion_handler().
1017*5113495bSYour Name  * any change in ol_tx_completion_handler() must be mirrored in
1018*5113495bSYour Name  * ol_tx_inspect_handler().
1019*5113495bSYour Name  */
1020*5113495bSYour Name void
ol_tx_completion_handler(ol_txrx_pdev_handle pdev,int num_msdus,enum htt_tx_status status,void * msg)1021*5113495bSYour Name ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
1022*5113495bSYour Name 			 int num_msdus,
1023*5113495bSYour Name 			 enum htt_tx_status status, void *msg)
1024*5113495bSYour Name {
1025*5113495bSYour Name 	int i;
1026*5113495bSYour Name 	uint16_t tx_desc_id;
1027*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
1028*5113495bSYour Name 	uint32_t byte_cnt = 0;
1029*5113495bSYour Name 	qdf_nbuf_t netbuf;
1030*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1031*5113495bSYour Name 	uint32_t is_tx_desc_freed = 0;
1032*5113495bSYour Name 	struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
1033*5113495bSYour Name 	struct htt_tx_compl_ind_append_tx_tsf64 *txtstamp64_list = NULL;
1034*5113495bSYour Name 	struct htt_tx_data_hdr_information *pkt_capture_txcomp_hdr_list = NULL;
1035*5113495bSYour Name 	u_int32_t *msg_word_header = (u_int32_t *)msg;
1036*5113495bSYour Name 	/*msg_word skip header*/
1037*5113495bSYour Name 	u_int32_t *msg_word_payload = msg_word_header + 1;
1038*5113495bSYour Name 	u_int32_t *msg_word = (u_int32_t *)msg;
1039*5113495bSYour Name 	u_int16_t *desc_ids = (u_int16_t *)(msg_word + 1);
1040*5113495bSYour Name 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
1041*5113495bSYour Name 	union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
1042*5113495bSYour Name 	ol_tx_desc_list tx_descs;
1043*5113495bSYour Name 	uint64_t tx_tsf64;
1044*5113495bSYour Name 	uint8_t tid;
1045*5113495bSYour Name 	uint8_t dp_status;
1046*5113495bSYour Name 
1047*5113495bSYour Name 	TAILQ_INIT(&tx_descs);
1048*5113495bSYour Name 
1049*5113495bSYour Name 	tid = HTT_TX_COMPL_IND_TID_GET(*msg_word);
1050*5113495bSYour Name 
1051*5113495bSYour Name 	ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
1052*5113495bSYour Name 	if (status == htt_tx_status_ok ||
1053*5113495bSYour Name 	    status == htt_tx_status_discard ||
1054*5113495bSYour Name 	    status == htt_tx_status_no_ack) {
1055*5113495bSYour Name 		txtstamp_list = ol_tx_get_txtstamps(
1056*5113495bSYour Name 			msg_word_header, &msg_word_payload, num_msdus);
1057*5113495bSYour Name 		if (pdev->enable_tx_compl_tsf64)
1058*5113495bSYour Name 			txtstamp64_list = ol_tx_get_txtstamp64s(
1059*5113495bSYour Name 				msg_word_header, &msg_word_payload, num_msdus);
1060*5113495bSYour Name 	}
1061*5113495bSYour Name 
1062*5113495bSYour Name 	if ((ucfg_pkt_capture_get_mode((void *)soc->psoc) ==
1063*5113495bSYour Name 						PACKET_CAPTURE_MODE_DATA_ONLY))
1064*5113495bSYour Name 		pkt_capture_txcomp_hdr_list =
1065*5113495bSYour Name 				ucfg_pkt_capture_tx_get_txcomplete_data_hdr(
1066*5113495bSYour Name 								msg_word,
1067*5113495bSYour Name 								num_msdus);
1068*5113495bSYour Name 
1069*5113495bSYour Name 	for (i = 0; i < num_msdus; i++) {
1070*5113495bSYour Name 		tx_desc_id = desc_ids[i];
1071*5113495bSYour Name 		if (tx_desc_id >= pdev->tx_desc.pool_size) {
1072*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1073*5113495bSYour Name 			"%s: drop due to invalid msdu id = %x\n",
1074*5113495bSYour Name 			__func__, tx_desc_id);
1075*5113495bSYour Name 			continue;
1076*5113495bSYour Name 		}
1077*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1078*5113495bSYour Name 		qdf_assert(tx_desc);
1079*5113495bSYour Name 		ol_tx_desc_update_comp_ts(tx_desc);
1080*5113495bSYour Name 		tx_desc->status = status;
1081*5113495bSYour Name 		netbuf = tx_desc->netbuf;
1082*5113495bSYour Name 
1083*5113495bSYour Name 		if (txtstamp64_list) {
1084*5113495bSYour Name 			tx_tsf64 =
1085*5113495bSYour Name 			(u_int64_t)txtstamp64_list[i].tx_tsf64_high << 32 |
1086*5113495bSYour Name 			txtstamp64_list[i].tx_tsf64_low;
1087*5113495bSYour Name 
1088*5113495bSYour Name 			ol_tx_timestamp(pdev, status,  netbuf, tx_tsf64);
1089*5113495bSYour Name 		} else if (txtstamp_list)
1090*5113495bSYour Name 			ol_tx_timestamp(pdev, status, netbuf,
1091*5113495bSYour Name 					(u_int64_t)txtstamp_list->timestamp[i]
1092*5113495bSYour Name 					);
1093*5113495bSYour Name 
1094*5113495bSYour Name 		if (pkt_capture_txcomp_hdr_list) {
1095*5113495bSYour Name 			ol_tx_pkt_capture_tx_completion_process(
1096*5113495bSYour Name 						pdev,
1097*5113495bSYour Name 						tx_desc,
1098*5113495bSYour Name 						&pkt_capture_txcomp_hdr_list[i],
1099*5113495bSYour Name 						tid, status);
1100*5113495bSYour Name 		}
1101*5113495bSYour Name 
1102*5113495bSYour Name 		QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
1103*5113495bSYour Name 
1104*5113495bSYour Name 		/* check tx completion notification */
1105*5113495bSYour Name 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
1106*5113495bSYour Name 			ol_tx_notify_completion(tx_desc, netbuf, status);
1107*5113495bSYour Name 
1108*5113495bSYour Name 		/* track connectivity stats */
1109*5113495bSYour Name 		ol_tx_update_connectivity_stats(tx_desc, netbuf,
1110*5113495bSYour Name 						status);
1111*5113495bSYour Name 		ol_tx_update_ack_count(tx_desc, status);
1112*5113495bSYour Name 
1113*5113495bSYour Name 		ol_tx_send_pktlog(soc, pdev, tx_desc, netbuf, status,
1114*5113495bSYour Name 				  QDF_TX_DATA_PKT);
1115*5113495bSYour Name 
1116*5113495bSYour Name 		dp_status = ol_tx_comp_hw_to_qdf_status(status);
1117*5113495bSYour Name 
1118*5113495bSYour Name 		DPTRACE(qdf_dp_trace_ptr(netbuf,
1119*5113495bSYour Name 			QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
1120*5113495bSYour Name 			QDF_TRACE_DEFAULT_PDEV_ID,
1121*5113495bSYour Name 			qdf_nbuf_data_addr(netbuf),
1122*5113495bSYour Name 			sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status,
1123*5113495bSYour Name 			dp_status,
1124*5113495bSYour Name 			tx_desc->vdev->qdf_opmode));
1125*5113495bSYour Name 
1126*5113495bSYour Name 		/*
1127*5113495bSYour Name 		 * If credits are reported through credit_update_ind then do not
1128*5113495bSYour Name 		 * update group credits on tx_complete_ind.
1129*5113495bSYour Name 		 */
1130*5113495bSYour Name 		if (!pdev->cfg.credit_update_enabled)
1131*5113495bSYour Name 			ol_tx_desc_update_group_credit(pdev,
1132*5113495bSYour Name 						       tx_desc_id,
1133*5113495bSYour Name 						       1, 0, status);
1134*5113495bSYour Name 		/* Per SDU update of byte count */
1135*5113495bSYour Name 		byte_cnt += qdf_nbuf_len(netbuf);
1136*5113495bSYour Name 		if (OL_TX_DESC_NO_REFS(tx_desc)) {
1137*5113495bSYour Name 			ol_tx_statistics(
1138*5113495bSYour Name 				pdev->ctrl_pdev,
1139*5113495bSYour Name 				HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
1140*5113495bSYour Name 							  (tx_desc->
1141*5113495bSYour Name 							   htt_tx_desc))),
1142*5113495bSYour Name 				status != htt_tx_status_ok);
1143*5113495bSYour Name 			ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
1144*5113495bSYour Name 					    lcl_freelist, tx_desc_last, status,
1145*5113495bSYour Name 					    is_tx_desc_freed);
1146*5113495bSYour Name 
1147*5113495bSYour Name #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
1148*5113495bSYour Name 			if (!is_tx_desc_freed) {
1149*5113495bSYour Name 				tx_desc->pkt_type = ol_tx_frm_freed;
1150*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
1151*5113495bSYour Name 				tx_desc->entry_timestamp_ticks = 0xffffffff;
1152*5113495bSYour Name #endif
1153*5113495bSYour Name 			}
1154*5113495bSYour Name #endif
1155*5113495bSYour Name 		}
1156*5113495bSYour Name 	}
1157*5113495bSYour Name 
1158*5113495bSYour Name 	/* One shot protected access to pdev freelist, when setup */
1159*5113495bSYour Name 	if (lcl_freelist) {
1160*5113495bSYour Name 		qdf_spin_lock(&pdev->tx_mutex);
1161*5113495bSYour Name 		tx_desc_last->next = pdev->tx_desc.freelist;
1162*5113495bSYour Name 		pdev->tx_desc.freelist = lcl_freelist;
1163*5113495bSYour Name 		pdev->tx_desc.num_free += (uint16_t) num_msdus;
1164*5113495bSYour Name 		qdf_spin_unlock(&pdev->tx_mutex);
1165*5113495bSYour Name 	} else {
1166*5113495bSYour Name 		ol_tx_desc_frame_list_free(pdev, &tx_descs,
1167*5113495bSYour Name 					   status != htt_tx_status_ok);
1168*5113495bSYour Name 	}
1169*5113495bSYour Name 
1170*5113495bSYour Name 	if (pdev->cfg.is_high_latency) {
1171*5113495bSYour Name 		/*
1172*5113495bSYour Name 		 * Credit was already explicitly updated by HTT,
1173*5113495bSYour Name 		 * but update the number of available tx descriptors,
1174*5113495bSYour Name 		 * then invoke the scheduler, since new credit is probably
1175*5113495bSYour Name 		 * available now.
1176*5113495bSYour Name 		 */
1177*5113495bSYour Name 		qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
1178*5113495bSYour Name 		ol_tx_sched(pdev);
1179*5113495bSYour Name 	} else {
1180*5113495bSYour Name 		ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
1181*5113495bSYour Name 	}
1182*5113495bSYour Name 
1183*5113495bSYour Name 	/* UNPAUSE OS Q */
1184*5113495bSYour Name 	ol_tx_flow_ct_unpause_os_q(pdev);
1185*5113495bSYour Name 	/* Do one shot statistics */
1186*5113495bSYour Name 	TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
1187*5113495bSYour Name }
1188*5113495bSYour Name 
1189*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1190*5113495bSYour Name 
ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,u_int16_t tx_desc_id,int credit,u_int8_t absolute,enum htt_tx_status status)1191*5113495bSYour Name void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
1192*5113495bSYour Name 		u_int16_t tx_desc_id, int credit, u_int8_t absolute,
1193*5113495bSYour Name 		enum htt_tx_status status)
1194*5113495bSYour Name {
1195*5113495bSYour Name 	uint8_t i, is_member;
1196*5113495bSYour Name 	uint16_t vdev_id_mask;
1197*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
1198*5113495bSYour Name 
1199*5113495bSYour Name 	if (tx_desc_id >= pdev->tx_desc.pool_size) {
1200*5113495bSYour Name 		qdf_print("Invalid desc id");
1201*5113495bSYour Name 		return;
1202*5113495bSYour Name 	}
1203*5113495bSYour Name 
1204*5113495bSYour Name 	tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1205*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1206*5113495bSYour Name 		vdev_id_mask =
1207*5113495bSYour Name 			OL_TXQ_GROUP_VDEV_ID_MASK_GET(
1208*5113495bSYour Name 					pdev->txq_grps[i].membership);
1209*5113495bSYour Name 		is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
1210*5113495bSYour Name 				tx_desc->vdev_id);
1211*5113495bSYour Name 		if (is_member) {
1212*5113495bSYour Name 			ol_txrx_update_group_credit(&pdev->txq_grps[i],
1213*5113495bSYour Name 						    credit, absolute);
1214*5113495bSYour Name 			break;
1215*5113495bSYour Name 		}
1216*5113495bSYour Name 	}
1217*5113495bSYour Name 	ol_tx_update_group_credit_stats(pdev);
1218*5113495bSYour Name }
1219*5113495bSYour Name 
ol_tx_deduct_one_any_group_credit(ol_txrx_pdev_handle pdev)1220*5113495bSYour Name void ol_tx_deduct_one_any_group_credit(ol_txrx_pdev_handle pdev)
1221*5113495bSYour Name {
1222*5113495bSYour Name 	int credits_group_0, credits_group_1;
1223*5113495bSYour Name 
1224*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1225*5113495bSYour Name 	credits_group_0 = qdf_atomic_read(&pdev->txq_grps[0].credit);
1226*5113495bSYour Name 	credits_group_1 = qdf_atomic_read(&pdev->txq_grps[1].credit);
1227*5113495bSYour Name 
1228*5113495bSYour Name 	if (credits_group_0 > credits_group_1)
1229*5113495bSYour Name 		ol_txrx_update_group_credit(&pdev->txq_grps[0], -1, 0);
1230*5113495bSYour Name 	else if (credits_group_1 != 0)
1231*5113495bSYour Name 		ol_txrx_update_group_credit(&pdev->txq_grps[1], -1, 0);
1232*5113495bSYour Name 
1233*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1234*5113495bSYour Name }
1235*5113495bSYour Name 
1236*5113495bSYour Name #ifdef DEBUG_HL_LOGGING
1237*5113495bSYour Name 
ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)1238*5113495bSYour Name void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
1239*5113495bSYour Name {
1240*5113495bSYour Name 	uint16_t curr_index;
1241*5113495bSYour Name 	uint8_t i;
1242*5113495bSYour Name 
1243*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1244*5113495bSYour Name 	pdev->grp_stats.last_valid_index++;
1245*5113495bSYour Name 	if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
1246*5113495bSYour Name 				- 1)) {
1247*5113495bSYour Name 		pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
1248*5113495bSYour Name 		pdev->grp_stats.wrap_around = 1;
1249*5113495bSYour Name 	}
1250*5113495bSYour Name 	curr_index = pdev->grp_stats.last_valid_index;
1251*5113495bSYour Name 
1252*5113495bSYour Name 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1253*5113495bSYour Name 		pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
1254*5113495bSYour Name 			OL_TXQ_GROUP_VDEV_ID_MASK_GET(
1255*5113495bSYour Name 					pdev->txq_grps[i].membership);
1256*5113495bSYour Name 		pdev->grp_stats.stats[curr_index].grp[i].credit =
1257*5113495bSYour Name 			qdf_atomic_read(&pdev->txq_grps[i].credit);
1258*5113495bSYour Name 	}
1259*5113495bSYour Name 
1260*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1261*5113495bSYour Name }
1262*5113495bSYour Name 
ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)1263*5113495bSYour Name void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
1264*5113495bSYour Name {
1265*5113495bSYour Name 	uint16_t i, j, is_break = 0;
1266*5113495bSYour Name 	int16_t curr_index, old_index, wrap_around;
1267*5113495bSYour Name 	uint16_t curr_credit, mem_vdevs;
1268*5113495bSYour Name 	uint16_t old_credit = 0;
1269*5113495bSYour Name 
1270*5113495bSYour Name 	txrx_nofl_info("Group credit stats:");
1271*5113495bSYour Name 	txrx_nofl_info("  No: GrpID: Credit: Change: vdev_map");
1272*5113495bSYour Name 
1273*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1274*5113495bSYour Name 	curr_index = pdev->grp_stats.last_valid_index;
1275*5113495bSYour Name 	wrap_around = pdev->grp_stats.wrap_around;
1276*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1277*5113495bSYour Name 
1278*5113495bSYour Name 	if (curr_index < 0) {
1279*5113495bSYour Name 		txrx_nofl_info("Not initialized");
1280*5113495bSYour Name 		return;
1281*5113495bSYour Name 	}
1282*5113495bSYour Name 
1283*5113495bSYour Name 	for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
1284*5113495bSYour Name 		old_index = curr_index - 1;
1285*5113495bSYour Name 		if (old_index < 0) {
1286*5113495bSYour Name 			if (wrap_around == 0)
1287*5113495bSYour Name 				is_break = 1;
1288*5113495bSYour Name 			else
1289*5113495bSYour Name 				old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
1290*5113495bSYour Name 		}
1291*5113495bSYour Name 
1292*5113495bSYour Name 		for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
1293*5113495bSYour Name 			qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1294*5113495bSYour Name 			curr_credit =
1295*5113495bSYour Name 				pdev->grp_stats.stats[curr_index].
1296*5113495bSYour Name 								grp[j].credit;
1297*5113495bSYour Name 			if (!is_break)
1298*5113495bSYour Name 				old_credit =
1299*5113495bSYour Name 					pdev->grp_stats.stats[old_index].
1300*5113495bSYour Name 								grp[j].credit;
1301*5113495bSYour Name 
1302*5113495bSYour Name 			mem_vdevs =
1303*5113495bSYour Name 				pdev->grp_stats.stats[curr_index].grp[j].
1304*5113495bSYour Name 								member_vdevs;
1305*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1306*5113495bSYour Name 
1307*5113495bSYour Name 			if (!is_break)
1308*5113495bSYour Name 				txrx_nofl_info("%4d: %5d: %6d %6d %8x",
1309*5113495bSYour Name 					       curr_index, j,
1310*5113495bSYour Name 					       curr_credit,
1311*5113495bSYour Name 					       (curr_credit - old_credit),
1312*5113495bSYour Name 					       mem_vdevs);
1313*5113495bSYour Name 			else
1314*5113495bSYour Name 				txrx_nofl_info("%4d: %5d: %6d %6s %8x",
1315*5113495bSYour Name 					       curr_index, j,
1316*5113495bSYour Name 					       curr_credit, "NA", mem_vdevs);
1317*5113495bSYour Name 		}
1318*5113495bSYour Name 
1319*5113495bSYour Name 		if (is_break)
1320*5113495bSYour Name 			break;
1321*5113495bSYour Name 
1322*5113495bSYour Name 		curr_index = old_index;
1323*5113495bSYour Name 	}
1324*5113495bSYour Name }
1325*5113495bSYour Name 
ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)1326*5113495bSYour Name void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
1327*5113495bSYour Name {
1328*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1329*5113495bSYour Name 	qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
1330*5113495bSYour Name 	pdev->grp_stats.last_valid_index = -1;
1331*5113495bSYour Name 	pdev->grp_stats.wrap_around = 0;
1332*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1333*5113495bSYour Name }
1334*5113495bSYour Name #endif
1335*5113495bSYour Name #endif
1336*5113495bSYour Name 
1337*5113495bSYour Name /*
1338*5113495bSYour Name  * ol_tx_single_completion_handler performs the same tx completion
1339*5113495bSYour Name  * processing as ol_tx_completion_handler, but for a single frame.
1340*5113495bSYour Name  * ol_tx_completion_handler is optimized to handle batch completions
1341*5113495bSYour Name  * as efficiently as possible; in contrast ol_tx_single_completion_handler
1342*5113495bSYour Name  * handles single frames as simply and generally as possible.
1343*5113495bSYour Name  * Thus, this ol_tx_single_completion_handler function is suitable for
1344*5113495bSYour Name  * intermittent usage, such as for tx mgmt frames.
1345*5113495bSYour Name  */
1346*5113495bSYour Name void
ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,enum htt_tx_status status,uint16_t tx_desc_id)1347*5113495bSYour Name ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
1348*5113495bSYour Name 				enum htt_tx_status status, uint16_t tx_desc_id)
1349*5113495bSYour Name {
1350*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
1351*5113495bSYour Name 	qdf_nbuf_t netbuf;
1352*5113495bSYour Name 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1353*5113495bSYour Name 
1354*5113495bSYour Name 	tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
1355*5113495bSYour Name 	if (!tx_desc) {
1356*5113495bSYour Name 		ol_txrx_err("invalid desc_id(%u), ignore it", tx_desc_id);
1357*5113495bSYour Name 		return;
1358*5113495bSYour Name 	}
1359*5113495bSYour Name 
1360*5113495bSYour Name 	tx_desc->status = status;
1361*5113495bSYour Name 	netbuf = tx_desc->netbuf;
1362*5113495bSYour Name 
1363*5113495bSYour Name 	QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
1364*5113495bSYour Name 	/* Do one shot statistics */
1365*5113495bSYour Name 	TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
1366*5113495bSYour Name 
1367*5113495bSYour Name 	ol_tx_send_pktlog(soc, pdev, tx_desc, netbuf, status, QDF_TX_MGMT_PKT);
1368*5113495bSYour Name 
1369*5113495bSYour Name 	if (OL_TX_DESC_NO_REFS(tx_desc)) {
1370*5113495bSYour Name 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1371*5113495bSYour Name 					     status != htt_tx_status_ok);
1372*5113495bSYour Name 	}
1373*5113495bSYour Name 
1374*5113495bSYour Name 	TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
1375*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit),
1376*5113495bSYour Name 			      1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
1377*5113495bSYour Name 
1378*5113495bSYour Name 	if (pdev->cfg.is_high_latency) {
1379*5113495bSYour Name 		/*
1380*5113495bSYour Name 		 * Credit was already explicitly updated by HTT,
1381*5113495bSYour Name 		 * but update the number of available tx descriptors,
1382*5113495bSYour Name 		 * then invoke the scheduler, since new credit is probably
1383*5113495bSYour Name 		 * available now.
1384*5113495bSYour Name 		 */
1385*5113495bSYour Name 		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
1386*5113495bSYour Name 		ol_tx_sched(pdev);
1387*5113495bSYour Name 	} else {
1388*5113495bSYour Name 		qdf_atomic_add(1, &pdev->target_tx_credit);
1389*5113495bSYour Name 	}
1390*5113495bSYour Name }
1391*5113495bSYour Name 
1392*5113495bSYour Name /**
1393*5113495bSYour Name  * WARNING: ol_tx_inspect_handler()'s behavior is similar to that of
1394*5113495bSYour Name  * ol_tx_completion_handler().
1395*5113495bSYour Name  * any change in ol_tx_completion_handler() must be mirrored here.
1396*5113495bSYour Name  */
1397*5113495bSYour Name void
ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,int num_msdus,void * tx_desc_id_iterator)1398*5113495bSYour Name ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
1399*5113495bSYour Name 		      int num_msdus, void *tx_desc_id_iterator)
1400*5113495bSYour Name {
1401*5113495bSYour Name 	uint16_t vdev_id, i;
1402*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
1403*5113495bSYour Name 	uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
1404*5113495bSYour Name 	uint16_t tx_desc_id;
1405*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
1406*5113495bSYour Name 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
1407*5113495bSYour Name 	union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
1408*5113495bSYour Name 	qdf_nbuf_t netbuf;
1409*5113495bSYour Name 	ol_tx_desc_list tx_descs;
1410*5113495bSYour Name 	uint32_t is_tx_desc_freed = 0;
1411*5113495bSYour Name 
1412*5113495bSYour Name 	TAILQ_INIT(&tx_descs);
1413*5113495bSYour Name 
1414*5113495bSYour Name 	for (i = 0; i < num_msdus; i++) {
1415*5113495bSYour Name 		tx_desc_id = desc_ids[i];
1416*5113495bSYour Name 		if (tx_desc_id >= pdev->tx_desc.pool_size) {
1417*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1418*5113495bSYour Name 			"%s: drop due to invalid msdu id = %x\n",
1419*5113495bSYour Name 			__func__, tx_desc_id);
1420*5113495bSYour Name 			continue;
1421*5113495bSYour Name 		}
1422*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1423*5113495bSYour Name 		qdf_assert(tx_desc);
1424*5113495bSYour Name 		ol_tx_desc_update_comp_ts(tx_desc);
1425*5113495bSYour Name 		netbuf = tx_desc->netbuf;
1426*5113495bSYour Name 
1427*5113495bSYour Name 		/* find the "vdev" this tx_desc belongs to */
1428*5113495bSYour Name 		vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
1429*5113495bSYour Name 						    (tx_desc->htt_tx_desc)));
1430*5113495bSYour Name 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1431*5113495bSYour Name 			if (vdev->vdev_id == vdev_id)
1432*5113495bSYour Name 				break;
1433*5113495bSYour Name 		}
1434*5113495bSYour Name 
1435*5113495bSYour Name 		/* vdev now points to the vdev for this descriptor. */
1436*5113495bSYour Name 
1437*5113495bSYour Name #ifndef ATH_11AC_TXCOMPACT
1438*5113495bSYour Name 		/* save this multicast packet to local free list */
1439*5113495bSYour Name 		if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt))
1440*5113495bSYour Name #endif
1441*5113495bSYour Name 		{
1442*5113495bSYour Name 			/*
1443*5113495bSYour Name 			 * For this function only, force htt status to be
1444*5113495bSYour Name 			 * "htt_tx_status_ok"
1445*5113495bSYour Name 			 * for graceful freeing of this multicast frame
1446*5113495bSYour Name 			 */
1447*5113495bSYour Name 			ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
1448*5113495bSYour Name 					    lcl_freelist, tx_desc_last,
1449*5113495bSYour Name 					    htt_tx_status_ok,
1450*5113495bSYour Name 					    is_tx_desc_freed);
1451*5113495bSYour Name #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
1452*5113495bSYour Name 			if (!is_tx_desc_freed) {
1453*5113495bSYour Name 				tx_desc->pkt_type = ol_tx_frm_freed;
1454*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
1455*5113495bSYour Name 				tx_desc->entry_timestamp_ticks = 0xffffffff;
1456*5113495bSYour Name #endif
1457*5113495bSYour Name 			}
1458*5113495bSYour Name #endif
1459*5113495bSYour Name 		}
1460*5113495bSYour Name 	}
1461*5113495bSYour Name 
1462*5113495bSYour Name 	if (lcl_freelist) {
1463*5113495bSYour Name 		qdf_spin_lock(&pdev->tx_mutex);
1464*5113495bSYour Name 		tx_desc_last->next = pdev->tx_desc.freelist;
1465*5113495bSYour Name 		pdev->tx_desc.freelist = lcl_freelist;
1466*5113495bSYour Name 		qdf_spin_unlock(&pdev->tx_mutex);
1467*5113495bSYour Name 	} else {
1468*5113495bSYour Name 		ol_tx_desc_frame_list_free(pdev, &tx_descs,
1469*5113495bSYour Name 					   htt_tx_status_discard);
1470*5113495bSYour Name 	}
1471*5113495bSYour Name 	TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
1472*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit),
1473*5113495bSYour Name 			      num_msdus,
1474*5113495bSYour Name 			      qdf_atomic_read(&pdev->target_tx_credit) +
1475*5113495bSYour Name 			      num_msdus);
1476*5113495bSYour Name 
1477*5113495bSYour Name 	if (pdev->cfg.is_high_latency) {
1478*5113495bSYour Name 		/* credit was already explicitly updated by HTT */
1479*5113495bSYour Name 		ol_tx_sched(pdev);
1480*5113495bSYour Name 	} else {
1481*5113495bSYour Name 		ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
1482*5113495bSYour Name 	}
1483*5113495bSYour Name }
1484*5113495bSYour Name 
1485*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
1486*5113495bSYour Name /**
1487*5113495bSYour Name  * ol_tx_set_compute_interval -  updates the compute interval
1488*5113495bSYour Name  *				 period for TSM stats.
1489*5113495bSYour Name  * @soc_hdl: Datapath soc handle
1490*5113495bSYour Name  * @pdev_id: id of data path pdev handle
1491*5113495bSYour Name  * @param interval: interval for stats computation
1492*5113495bSYour Name  *
1493*5113495bSYour Name  * Return: None
1494*5113495bSYour Name  */
ol_tx_set_compute_interval(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint32_t interval)1495*5113495bSYour Name void ol_tx_set_compute_interval(struct cdp_soc_t *soc_hdl,
1496*5113495bSYour Name 				uint8_t pdev_id, uint32_t interval)
1497*5113495bSYour Name {
1498*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1499*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1500*5113495bSYour Name 
1501*5113495bSYour Name 	if (!pdev) {
1502*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1503*5113495bSYour Name 		return;
1504*5113495bSYour Name 	}
1505*5113495bSYour Name 
1506*5113495bSYour Name 	pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
1507*5113495bSYour Name }
1508*5113495bSYour Name 
1509*5113495bSYour Name /**
1510*5113495bSYour Name  * ol_tx_packet_count() -  Return the uplink (transmitted) packet count
1511*5113495bSYour Name 			   and loss count.
1512*5113495bSYour Name  * @soc_hdl: soc handle
1513*5113495bSYour Name  * @pdev_id: pdev identifier
1514*5113495bSYour Name  * @out_packet_count - number of packets transmitted
1515*5113495bSYour Name  * @out_packet_loss_count - number of packets lost
1516*5113495bSYour Name  * @category - access category of interest
1517*5113495bSYour Name  *
1518*5113495bSYour Name  *  This function will be called for getting uplink packet count and
1519*5113495bSYour Name  *  loss count for given stream (access category) a regular interval.
1520*5113495bSYour Name  *  This also resets the counters hence, the value returned is packets
1521*5113495bSYour Name  *  counted in last 5(default) second interval. These counter are
1522*5113495bSYour Name  *  incremented per access category in ol_tx_completion_handler()
1523*5113495bSYour Name  */
1524*5113495bSYour Name void
ol_tx_packet_count(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint16_t * out_packet_count,uint16_t * out_packet_loss_count,int category)1525*5113495bSYour Name ol_tx_packet_count(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1526*5113495bSYour Name 		   uint16_t *out_packet_count,
1527*5113495bSYour Name 		   uint16_t *out_packet_loss_count, int category)
1528*5113495bSYour Name {
1529*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1530*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1531*5113495bSYour Name 
1532*5113495bSYour Name 	if (!pdev) {
1533*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1534*5113495bSYour Name 		return;
1535*5113495bSYour Name 	}
1536*5113495bSYour Name 
1537*5113495bSYour Name 	*out_packet_count = pdev->packet_count[category];
1538*5113495bSYour Name 	*out_packet_loss_count = pdev->packet_loss_count[category];
1539*5113495bSYour Name 	pdev->packet_count[category] = 0;
1540*5113495bSYour Name 	pdev->packet_loss_count[category] = 0;
1541*5113495bSYour Name }
1542*5113495bSYour Name 
ol_tx_delay_avg(uint64_t sum,uint32_t num)1543*5113495bSYour Name static uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
1544*5113495bSYour Name {
1545*5113495bSYour Name 	uint32_t sum32;
1546*5113495bSYour Name 	int shift = 0;
1547*5113495bSYour Name 	/*
1548*5113495bSYour Name 	 * To avoid doing a 64-bit divide, shift the sum down until it is
1549*5113495bSYour Name 	 * no more than 32 bits (and shift the denominator to match).
1550*5113495bSYour Name 	 */
1551*5113495bSYour Name 	while ((sum >> 32) != 0) {
1552*5113495bSYour Name 		sum >>= 1;
1553*5113495bSYour Name 		shift++;
1554*5113495bSYour Name 	}
1555*5113495bSYour Name 	sum32 = (uint32_t) sum;
1556*5113495bSYour Name 	num >>= shift;
1557*5113495bSYour Name 	return (sum32 + (num >> 1)) / num;      /* round to nearest */
1558*5113495bSYour Name }
1559*5113495bSYour Name 
1560*5113495bSYour Name void
ol_tx_delay(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint32_t * queue_delay_microsec,uint32_t * tx_delay_microsec,int category)1561*5113495bSYour Name ol_tx_delay(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1562*5113495bSYour Name 	    uint32_t *queue_delay_microsec,
1563*5113495bSYour Name 	    uint32_t *tx_delay_microsec, int category)
1564*5113495bSYour Name {
1565*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1566*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1567*5113495bSYour Name 	int index;
1568*5113495bSYour Name 	uint32_t avg_delay_ticks;
1569*5113495bSYour Name 	struct ol_tx_delay_data *data;
1570*5113495bSYour Name 
1571*5113495bSYour Name 	qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
1572*5113495bSYour Name 
1573*5113495bSYour Name 	if (!pdev) {
1574*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1575*5113495bSYour Name 		return;
1576*5113495bSYour Name 	}
1577*5113495bSYour Name 
1578*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
1579*5113495bSYour Name 	index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1580*5113495bSYour Name 
1581*5113495bSYour Name 	data = &pdev->tx_delay.cats[category].copies[index];
1582*5113495bSYour Name 
1583*5113495bSYour Name 	if (data->avgs.transmit_num > 0) {
1584*5113495bSYour Name 		avg_delay_ticks =
1585*5113495bSYour Name 			ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
1586*5113495bSYour Name 					data->avgs.transmit_num);
1587*5113495bSYour Name 		*tx_delay_microsec =
1588*5113495bSYour Name 			qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
1589*5113495bSYour Name 	} else {
1590*5113495bSYour Name 		/*
1591*5113495bSYour Name 		 * This case should only happen if there's a query
1592*5113495bSYour Name 		 * within 5 sec after the first tx data frame.
1593*5113495bSYour Name 		 */
1594*5113495bSYour Name 		*tx_delay_microsec = 0;
1595*5113495bSYour Name 	}
1596*5113495bSYour Name 	if (data->avgs.queue_num > 0) {
1597*5113495bSYour Name 		avg_delay_ticks =
1598*5113495bSYour Name 			ol_tx_delay_avg(data->avgs.queue_sum_ticks,
1599*5113495bSYour Name 					data->avgs.queue_num);
1600*5113495bSYour Name 		*queue_delay_microsec =
1601*5113495bSYour Name 			qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
1602*5113495bSYour Name 	} else {
1603*5113495bSYour Name 		/*
1604*5113495bSYour Name 		 * This case should only happen if there's a query
1605*5113495bSYour Name 		 * within 5 sec after the first tx data frame.
1606*5113495bSYour Name 		 */
1607*5113495bSYour Name 		*queue_delay_microsec = 0;
1608*5113495bSYour Name 	}
1609*5113495bSYour Name 
1610*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
1611*5113495bSYour Name }
1612*5113495bSYour Name 
1613*5113495bSYour Name void
ol_tx_delay_hist(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint16_t * report_bin_values,int category)1614*5113495bSYour Name ol_tx_delay_hist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1615*5113495bSYour Name 		 uint16_t *report_bin_values, int category)
1616*5113495bSYour Name {
1617*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1618*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1619*5113495bSYour Name 	int index, i, j;
1620*5113495bSYour Name 	struct ol_tx_delay_data *data;
1621*5113495bSYour Name 
1622*5113495bSYour Name 	qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
1623*5113495bSYour Name 
1624*5113495bSYour Name 	if (!pdev) {
1625*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1626*5113495bSYour Name 		return;
1627*5113495bSYour Name 	}
1628*5113495bSYour Name 
1629*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
1630*5113495bSYour Name 	index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1631*5113495bSYour Name 
1632*5113495bSYour Name 	data = &pdev->tx_delay.cats[category].copies[index];
1633*5113495bSYour Name 
1634*5113495bSYour Name 	for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
1635*5113495bSYour Name 		uint16_t internal_bin_sum = 0;
1636*5113495bSYour Name 
1637*5113495bSYour Name 		while (j < (1 << i))
1638*5113495bSYour Name 			internal_bin_sum += data->hist_bins_queue[j++];
1639*5113495bSYour Name 
1640*5113495bSYour Name 		report_bin_values[i] = internal_bin_sum;
1641*5113495bSYour Name 	}
1642*5113495bSYour Name 	report_bin_values[i] = data->hist_bins_queue[j];        /* overflow */
1643*5113495bSYour Name 
1644*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
1645*5113495bSYour Name }
1646*5113495bSYour Name 
1647*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
1648*5113495bSYour Name static uint8_t
ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t msdu,struct ol_tx_desc_t * tx_desc)1649*5113495bSYour Name ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
1650*5113495bSYour Name 			    qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
1651*5113495bSYour Name {
1652*5113495bSYour Name 	uint16_t ethertype;
1653*5113495bSYour Name 	uint8_t *dest_addr, *l3_hdr;
1654*5113495bSYour Name 	int is_mgmt, is_mcast;
1655*5113495bSYour Name 	int l2_hdr_size;
1656*5113495bSYour Name 
1657*5113495bSYour Name 	dest_addr = ol_tx_dest_addr_find(pdev, msdu);
1658*5113495bSYour Name 	if (!dest_addr)
1659*5113495bSYour Name 		return QDF_NBUF_TX_EXT_TID_INVALID;
1660*5113495bSYour Name 
1661*5113495bSYour Name 	is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
1662*5113495bSYour Name 	is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
1663*5113495bSYour Name 	if (is_mgmt) {
1664*5113495bSYour Name 		return (is_mcast) ?
1665*5113495bSYour Name 		       OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
1666*5113495bSYour Name 		       HTT_TX_EXT_TID_MGMT;
1667*5113495bSYour Name 	}
1668*5113495bSYour Name 	if (is_mcast)
1669*5113495bSYour Name 		return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
1670*5113495bSYour Name 
1671*5113495bSYour Name 	if (pdev->frame_format == wlan_frm_fmt_802_3) {
1672*5113495bSYour Name 		struct ethernet_hdr_t *enet_hdr;
1673*5113495bSYour Name 
1674*5113495bSYour Name 		enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
1675*5113495bSYour Name 		l2_hdr_size = sizeof(struct ethernet_hdr_t);
1676*5113495bSYour Name 		ethertype =
1677*5113495bSYour Name 			(enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
1678*5113495bSYour Name 		if (!IS_ETHERTYPE(ethertype)) {
1679*5113495bSYour Name 			struct llc_snap_hdr_t *llc_hdr;
1680*5113495bSYour Name 
1681*5113495bSYour Name 			llc_hdr = (struct llc_snap_hdr_t *)
1682*5113495bSYour Name 				  (qdf_nbuf_data(msdu) + l2_hdr_size);
1683*5113495bSYour Name 			l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1684*5113495bSYour Name 			ethertype =
1685*5113495bSYour Name 				(llc_hdr->ethertype[0] << 8) | llc_hdr->
1686*5113495bSYour Name 				ethertype[1];
1687*5113495bSYour Name 		}
1688*5113495bSYour Name 	} else {
1689*5113495bSYour Name 		struct llc_snap_hdr_t *llc_hdr;
1690*5113495bSYour Name 
1691*5113495bSYour Name 		l2_hdr_size = sizeof(struct ieee80211_frame);
1692*5113495bSYour Name 		llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
1693*5113495bSYour Name 						    + l2_hdr_size);
1694*5113495bSYour Name 		l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1695*5113495bSYour Name 		ethertype =
1696*5113495bSYour Name 			(llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
1697*5113495bSYour Name 	}
1698*5113495bSYour Name 	l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
1699*5113495bSYour Name 	if (ETHERTYPE_IPV4 == ethertype) {
1700*5113495bSYour Name 		return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
1701*5113495bSYour Name 	} else if (ETHERTYPE_IPV6 == ethertype) {
1702*5113495bSYour Name 		return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
1703*5113495bSYour Name 		       0x7;
1704*5113495bSYour Name 	} else {
1705*5113495bSYour Name 		return QDF_NBUF_TX_EXT_TID_INVALID;
1706*5113495bSYour Name 	}
1707*5113495bSYour Name }
1708*5113495bSYour Name 
ol_tx_delay_category(struct ol_txrx_pdev_t * pdev,uint16_t msdu_id)1709*5113495bSYour Name static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1710*5113495bSYour Name {
1711*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
1712*5113495bSYour Name 	uint8_t tid;
1713*5113495bSYour Name 	qdf_nbuf_t msdu = tx_desc->netbuf;
1714*5113495bSYour Name 
1715*5113495bSYour Name 	tid = qdf_nbuf_get_tid(msdu);
1716*5113495bSYour Name 	if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
1717*5113495bSYour Name 		tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
1718*5113495bSYour Name 		if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
1719*5113495bSYour Name 			/*
1720*5113495bSYour Name 			 * TID could not be determined
1721*5113495bSYour Name 			 * (this is not an IP frame?)
1722*5113495bSYour Name 			 */
1723*5113495bSYour Name 			return -EINVAL;
1724*5113495bSYour Name 		}
1725*5113495bSYour Name 	}
1726*5113495bSYour Name 	return tid;
1727*5113495bSYour Name }
1728*5113495bSYour Name #else
ol_tx_delay_category(struct ol_txrx_pdev_t * pdev,uint16_t msdu_id)1729*5113495bSYour Name static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1730*5113495bSYour Name {
1731*5113495bSYour Name 	return 0;
1732*5113495bSYour Name }
1733*5113495bSYour Name #endif
1734*5113495bSYour Name 
1735*5113495bSYour Name static inline int
ol_tx_delay_hist_bin(struct ol_txrx_pdev_t * pdev,uint32_t delay_ticks)1736*5113495bSYour Name ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
1737*5113495bSYour Name {
1738*5113495bSYour Name 	int bin;
1739*5113495bSYour Name 	/*
1740*5113495bSYour Name 	 * For speed, multiply and shift to approximate a divide. This causes
1741*5113495bSYour Name 	 * a small error, but the approximation error should be much less
1742*5113495bSYour Name 	 * than the other uncertainties in the tx delay computation.
1743*5113495bSYour Name 	 */
1744*5113495bSYour Name 	bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
1745*5113495bSYour Name 	      pdev->tx_delay.hist_internal_bin_width_shift;
1746*5113495bSYour Name 	if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
1747*5113495bSYour Name 		bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
1748*5113495bSYour Name 
1749*5113495bSYour Name 	return bin;
1750*5113495bSYour Name }
1751*5113495bSYour Name 
1752*5113495bSYour Name static void
ol_tx_delay_compute(struct ol_txrx_pdev_t * pdev,enum htt_tx_status status,uint16_t * desc_ids,int num_msdus)1753*5113495bSYour Name ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
1754*5113495bSYour Name 		    enum htt_tx_status status,
1755*5113495bSYour Name 		    uint16_t *desc_ids, int num_msdus)
1756*5113495bSYour Name {
1757*5113495bSYour Name 	int i, index, cat;
1758*5113495bSYour Name 	uint32_t now_ticks = qdf_system_ticks();
1759*5113495bSYour Name 	uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
1760*5113495bSYour Name 	uint32_t avg_time_ticks;
1761*5113495bSYour Name 	struct ol_tx_delay_data *data;
1762*5113495bSYour Name 
1763*5113495bSYour Name 	qdf_assert(num_msdus > 0);
1764*5113495bSYour Name 
1765*5113495bSYour Name 	/*
1766*5113495bSYour Name 	 * keep static counters for total packet and lost packets
1767*5113495bSYour Name 	 * reset them in ol_tx_delay(), function used to fetch the stats
1768*5113495bSYour Name 	 */
1769*5113495bSYour Name 
1770*5113495bSYour Name 	cat = ol_tx_delay_category(pdev, desc_ids[0]);
1771*5113495bSYour Name 	if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1772*5113495bSYour Name 		return;
1773*5113495bSYour Name 
1774*5113495bSYour Name 	pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
1775*5113495bSYour Name 	if (status != htt_tx_status_ok) {
1776*5113495bSYour Name 		for (i = 0; i < num_msdus; i++) {
1777*5113495bSYour Name 			cat = ol_tx_delay_category(pdev, desc_ids[i]);
1778*5113495bSYour Name 			if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1779*5113495bSYour Name 				return;
1780*5113495bSYour Name 			pdev->packet_loss_count[cat]++;
1781*5113495bSYour Name 		}
1782*5113495bSYour Name 		return;
1783*5113495bSYour Name 	}
1784*5113495bSYour Name 
1785*5113495bSYour Name 	/* since we may switch the ping-pong index, provide mutex w. readers */
1786*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
1787*5113495bSYour Name 	index = pdev->tx_delay.cats[cat].in_progress_idx;
1788*5113495bSYour Name 
1789*5113495bSYour Name 	data = &pdev->tx_delay.cats[cat].copies[index];
1790*5113495bSYour Name 
1791*5113495bSYour Name 	if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
1792*5113495bSYour Name 		tx_delay_transmit_ticks =
1793*5113495bSYour Name 			now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
1794*5113495bSYour Name 		/*
1795*5113495bSYour Name 		 * We'd like to account for the number of MSDUs that were
1796*5113495bSYour Name 		 * transmitted together, but we don't know this.  All we know
1797*5113495bSYour Name 		 * is the number of MSDUs that were acked together.
1798*5113495bSYour Name 		 * Since the frame error rate is small, this is nearly the same
1799*5113495bSYour Name 		 * as the number of frames transmitted together.
1800*5113495bSYour Name 		 */
1801*5113495bSYour Name 		data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
1802*5113495bSYour Name 		data->avgs.transmit_num += num_msdus;
1803*5113495bSYour Name 	}
1804*5113495bSYour Name 	pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
1805*5113495bSYour Name 
1806*5113495bSYour Name 	for (i = 0; i < num_msdus; i++) {
1807*5113495bSYour Name 		int bin;
1808*5113495bSYour Name 		uint16_t id = desc_ids[i];
1809*5113495bSYour Name 		struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
1810*5113495bSYour Name 
1811*5113495bSYour Name 		tx_delay_queue_ticks =
1812*5113495bSYour Name 			now_ticks - tx_desc->entry_timestamp_ticks;
1813*5113495bSYour Name 
1814*5113495bSYour Name 		data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
1815*5113495bSYour Name 		data->avgs.queue_num++;
1816*5113495bSYour Name 		bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
1817*5113495bSYour Name 		data->hist_bins_queue[bin]++;
1818*5113495bSYour Name 	}
1819*5113495bSYour Name 
1820*5113495bSYour Name 	/* check if it's time to start a new average */
1821*5113495bSYour Name 	avg_time_ticks =
1822*5113495bSYour Name 		now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
1823*5113495bSYour Name 	if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
1824*5113495bSYour Name 		pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
1825*5113495bSYour Name 		index = 1 - index;
1826*5113495bSYour Name 		pdev->tx_delay.cats[cat].in_progress_idx = index;
1827*5113495bSYour Name 		qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
1828*5113495bSYour Name 			     sizeof(pdev->tx_delay.cats[cat].copies[index]));
1829*5113495bSYour Name 	}
1830*5113495bSYour Name 
1831*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
1832*5113495bSYour Name }
1833*5113495bSYour Name 
1834*5113495bSYour Name #endif /* QCA_COMPUTE_TX_DELAY */
1835*5113495bSYour Name 
1836*5113495bSYour Name #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)1837*5113495bSYour Name void ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)
1838*5113495bSYour Name {
1839*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1840*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
1841*5113495bSYour Name 
1842*5113495bSYour Name 	if (qdf_unlikely(!soc))
1843*5113495bSYour Name 		return;
1844*5113495bSYour Name 
1845*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1846*5113495bSYour Name 	if (!pdev) {
1847*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1848*5113495bSYour Name 		return;
1849*5113495bSYour Name 	}
1850*5113495bSYour Name 	pdev->ol_tx_timestamp_cb = ol_tx_timestamp_cb;
1851*5113495bSYour Name }
1852*5113495bSYour Name 
ol_deregister_timestamp_callback(void)1853*5113495bSYour Name void ol_deregister_timestamp_callback(void)
1854*5113495bSYour Name {
1855*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1856*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
1857*5113495bSYour Name 
1858*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1859*5113495bSYour Name 	if (!pdev) {
1860*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1861*5113495bSYour Name 		return;
1862*5113495bSYour Name 	}
1863*5113495bSYour Name 	pdev->ol_tx_timestamp_cb = NULL;
1864*5113495bSYour Name }
1865*5113495bSYour Name #endif
1866