xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_tx_send.c (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
21 #include <qdf_lock.h>           /* qdf_os_spinlock */
22 #include <qdf_time.h>           /* qdf_system_ticks, etc. */
23 #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24 #include <qdf_net_types.h>      /* QDF_NBUF_TX_EXT_TID_INVALID */
25 
26 #include "queue.h"          /* TAILQ */
27 #ifdef QCA_COMPUTE_TX_DELAY
28 #include <enet.h>               /* ethernet_hdr_t, etc. */
29 #include <ipv6_defs.h>          /* ipv6_traffic_class */
30 #endif
31 
32 #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle, etc. */
33 #include <ol_htt_tx_api.h>      /* htt_tx_compl_desc_id */
34 #include <ol_txrx_htt_api.h>    /* htt_tx_status */
35 
36 #include <ol_ctrl_txrx_api.h>
37 #include <cdp_txrx_tx_delay.h>
38 #include <ol_txrx_types.h>      /* ol_txrx_vdev_t, etc */
39 #include <ol_tx_desc.h>         /* ol_tx_desc_find, ol_tx_desc_frame_free */
40 #ifdef QCA_COMPUTE_TX_DELAY
41 #include <ol_tx_classify.h>     /* ol_tx_dest_addr_find */
42 #endif
43 #include <ol_txrx_internal.h>   /* OL_TX_DESC_NO_REFS, etc. */
44 #include <ol_osif_txrx_api.h>
45 #include <ol_tx.h>              /* ol_tx_reinject */
46 #include <ol_tx_send.h>
47 
48 #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
49 #include <ol_tx_sched.h>
50 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
51 #include <ol_txrx_encap.h>      /* OL_TX_RESTORE_HDR, etc */
52 #endif
53 #include <ol_tx_queue.h>
54 #include <ol_txrx.h>
55 #include <pktlog_ac_fmt.h>
56 #include <cdp_txrx_handle.h>
57 #include <wlan_pkt_capture_ucfg_api.h>
58 #include <wlan_dp_txrx.h>
59 #ifdef TX_CREDIT_RECLAIM_SUPPORT
60 
61 #define OL_TX_CREDIT_RECLAIM(pdev)					\
62 	do {								\
63 		if (qdf_atomic_read(&pdev->target_tx_credit)  <		\
64 		    ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) {		\
65 			ol_osif_ath_tasklet(pdev->osdev);		\
66 		}							\
67 	} while (0)
68 
69 #else
70 
71 #define OL_TX_CREDIT_RECLAIM(pdev)
72 
73 #endif /* TX_CREDIT_RECLAIM_SUPPORT */
74 
75 #if defined(CONFIG_HL_SUPPORT) || defined(TX_CREDIT_RECLAIM_SUPPORT)
76 
77 /*
78  * HL needs to keep track of the amount of credit available to download
79  * tx frames to the target - the download scheduler decides when to
80  * download frames, and which frames to download, based on the credit
81  * availability.
82  * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
83  * of the target_tx_credit, to determine when to poll for tx completion
84  * messages.
85  */
86 static inline void
ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t * pdev,int delta)87 ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
88 {
89 	qdf_atomic_add(-1 * delta, &pdev->target_tx_credit);
90 }
91 
92 static inline void
ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t * pdev,int delta)93 ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
94 {
95 	qdf_atomic_add(delta, &pdev->target_tx_credit);
96 }
97 #else
98 
99 static inline void
ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t * pdev,int delta)100 ol_tx_target_credit_decr_int(struct ol_txrx_pdev_t *pdev, int delta)
101 {
102 }
103 
104 static inline void
ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t * pdev,int delta)105 ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
106 {
107 }
108 #endif
109 
110 #ifdef DESC_TIMESTAMP_DEBUG_INFO
ol_tx_desc_update_comp_ts(struct ol_tx_desc_t * tx_desc)111 static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
112 {
113 	tx_desc->desc_debug_info.last_comp_ts = qdf_get_log_timestamp();
114 }
115 #else
ol_tx_desc_update_comp_ts(struct ol_tx_desc_t * tx_desc)116 static inline void ol_tx_desc_update_comp_ts(struct ol_tx_desc_t *tx_desc)
117 {
118 }
119 #endif
120 
121 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)122 void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
123 {
124 	struct ol_txrx_vdev_t *vdev;
125 	bool trigger_unpause = false;
126 
127 	qdf_spin_lock_bh(&pdev->tx_mutex);
128 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
129 		if (vdev->tx_desc_limit == 0)
130 			continue;
131 
132 		/* un-pause high priority queue */
133 		if (vdev->prio_q_paused &&
134 		    (qdf_atomic_read(&vdev->tx_desc_count)
135 		     < vdev->tx_desc_limit)) {
136 			pdev->pause_cb(vdev->vdev_id,
137 				       WLAN_NETIF_PRIORITY_QUEUE_ON,
138 				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
139 			vdev->prio_q_paused = 0;
140 		}
141 		/* un-pause non priority queues */
142 		if (qdf_atomic_read(&vdev->os_q_paused) &&
143 		    (qdf_atomic_read(&vdev->tx_desc_count)
144 		    <= vdev->queue_restart_th)) {
145 			pdev->pause_cb(vdev->vdev_id,
146 				       WLAN_WAKE_NON_PRIORITY_QUEUE,
147 				       WLAN_DATA_FLOW_CONTROL);
148 			qdf_atomic_set(&vdev->os_q_paused, 0);
149 			trigger_unpause = true;
150 		}
151 	}
152 	qdf_spin_unlock_bh(&pdev->tx_mutex);
153 	if (trigger_unpause)
154 		ol_tx_hl_pdev_queue_send_all(pdev);
155 }
156 #endif
157 
158 static inline uint16_t
ol_tx_send_base(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu)159 ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
160 		struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu)
161 {
162 	int msdu_credit_consumed;
163 
164 	TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", qdf_nbuf_len(msdu));
165 	TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
166 			      qdf_atomic_read(&pdev->target_tx_credit),
167 			      qdf_atomic_read(&pdev->target_tx_credit) - 1,
168 			      qdf_nbuf_len(msdu));
169 
170 	msdu_credit_consumed = htt_tx_msdu_credit(msdu);
171 	ol_tx_target_credit_decr_int(pdev, msdu_credit_consumed);
172 	OL_TX_CREDIT_RECLAIM(pdev);
173 
174 	/*
175 	 * When the tx frame is downloaded to the target, there are two
176 	 * outstanding references:
177 	 * 1.  The host download SW (HTT, HTC, HIF)
178 	 *     This reference is cleared by the ol_tx_send_done callback
179 	 *     functions.
180 	 * 2.  The target FW
181 	 *     This reference is cleared by the ol_tx_completion_handler
182 	 *     function.
183 	 * It is extremely probable that the download completion is processed
184 	 * before the tx completion message.  However, under exceptional
185 	 * conditions the tx completion may be processed first.  Thus, rather
186 	 * that assuming that reference (1) is done before reference (2),
187 	 * explicit reference tracking is needed.
188 	 * Double-increment the ref count to account for both references
189 	 * described above.
190 	 */
191 
192 	OL_TX_DESC_REF_INIT(tx_desc);
193 	OL_TX_DESC_REF_INC(tx_desc);
194 	OL_TX_DESC_REF_INC(tx_desc);
195 
196 	return msdu_credit_consumed;
197 }
198 
199 void
ol_tx_send(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,uint8_t vdev_id)200 ol_tx_send(struct ol_txrx_pdev_t *pdev,
201 	   struct ol_tx_desc_t *tx_desc, qdf_nbuf_t msdu, uint8_t vdev_id)
202 {
203 	int msdu_credit_consumed;
204 	uint16_t id;
205 	int failed;
206 
207 	msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
208 	id = ol_tx_desc_id(pdev, tx_desc);
209 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
210 	DPTRACE(qdf_dp_trace_ptr(msdu, QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
211 				QDF_TRACE_DEFAULT_PDEV_ID,
212 				qdf_nbuf_data_addr(msdu),
213 				sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
214 				vdev_id, 0,
215 				tx_desc->vdev->qdf_opmode
216 				));
217 	failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
218 	if (qdf_unlikely(failed)) {
219 		ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
220 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
221 	}
222 }
223 
224 void
ol_tx_send_batch(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t head_msdu,int num_msdus)225 ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
226 		 qdf_nbuf_t head_msdu, int num_msdus)
227 {
228 	qdf_nbuf_t rejected;
229 
230 	OL_TX_CREDIT_RECLAIM(pdev);
231 
232 	rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
233 	while (qdf_unlikely(rejected)) {
234 		struct ol_tx_desc_t *tx_desc;
235 		uint16_t *msdu_id_storage;
236 		qdf_nbuf_t next;
237 
238 		next = qdf_nbuf_next(rejected);
239 		msdu_id_storage = ol_tx_msdu_id_storage(rejected);
240 		tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
241 
242 		ol_tx_target_credit_incr(pdev, rejected);
243 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
244 
245 		rejected = next;
246 	}
247 }
248 
249 void
ol_tx_send_nonstd(struct ol_txrx_pdev_t * pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t msdu,enum htt_pkt_type pkt_type)250 ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
251 		  struct ol_tx_desc_t *tx_desc,
252 		  qdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
253 {
254 	int msdu_credit_consumed;
255 	uint16_t id;
256 	int failed;
257 
258 	msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
259 	id = ol_tx_desc_id(pdev, tx_desc);
260 	QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu, QDF_NBUF_TX_PKT_TXRX);
261 	failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
262 	if (failed) {
263 		ol_txrx_err(
264 			   "Error: freeing tx frame after htt_tx failed");
265 		ol_tx_target_credit_incr_int(pdev, msdu_credit_consumed);
266 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
267 	}
268 }
269 
270 static inline bool
ol_tx_download_done_base(struct ol_txrx_pdev_t * pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)271 ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
272 			 A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
273 {
274 	struct ol_tx_desc_t *tx_desc;
275 	bool is_frame_freed = false;
276 
277 	tx_desc = ol_tx_desc_find(pdev, msdu_id);
278 	qdf_assert(tx_desc);
279 
280 	/*
281 	 * If the download is done for
282 	 * the Management frame then
283 	 * call the download callback if registered
284 	 */
285 	if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
286 		ol_txrx_mgmt_tx_cb download_cb =
287 			pdev->tx_mgmt_cb.download_cb;
288 		if (download_cb) {
289 			download_cb(pdev->tx_mgmt_cb.ctxt,
290 				    tx_desc->netbuf, status != A_OK);
291 		}
292 	}
293 
294 	if (status != A_OK) {
295 		ol_tx_target_credit_incr(pdev, msdu);
296 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
297 					     1 /* download err */);
298 		is_frame_freed = true;
299 	} else {
300 		if (OL_TX_DESC_NO_REFS(tx_desc)) {
301 			/*
302 			 * The decremented value was zero - free the frame.
303 			 * Use the tx status recorded previously during
304 			 * tx completion handling.
305 			 */
306 			ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
307 						     tx_desc->status !=
308 						     htt_tx_status_ok);
309 			is_frame_freed = true;
310 		}
311 	}
312 	return is_frame_freed;
313 }
314 
315 void
ol_tx_download_done_ll(void * pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)316 ol_tx_download_done_ll(void *pdev,
317 		       A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
318 {
319 	ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
320 				 msdu_id);
321 }
322 
323 void
ol_tx_download_done_hl_retain(void * txrx_pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)324 ol_tx_download_done_hl_retain(void *txrx_pdev,
325 			      A_STATUS status,
326 			      qdf_nbuf_t msdu, uint16_t msdu_id)
327 {
328 	struct ol_txrx_pdev_t *pdev = txrx_pdev;
329 
330 	ol_tx_download_done_base(pdev, status, msdu, msdu_id);
331 }
332 
333 void
ol_tx_download_done_hl_free(void * txrx_pdev,A_STATUS status,qdf_nbuf_t msdu,uint16_t msdu_id)334 ol_tx_download_done_hl_free(void *txrx_pdev,
335 			    A_STATUS status, qdf_nbuf_t msdu, uint16_t msdu_id)
336 {
337 	struct ol_txrx_pdev_t *pdev = txrx_pdev;
338 	struct ol_tx_desc_t *tx_desc;
339 	bool is_frame_freed;
340 	uint8_t dp_status;
341 
342 	tx_desc = ol_tx_desc_find(pdev, msdu_id);
343 	qdf_assert(tx_desc);
344 	dp_status = qdf_dp_get_status_from_a_status(status);
345 	DPTRACE(qdf_dp_trace_ptr(msdu,
346 				 QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
347 				 QDF_TRACE_DEFAULT_PDEV_ID,
348 				 qdf_nbuf_data_addr(msdu),
349 				 sizeof(qdf_nbuf_data(msdu)), tx_desc->id,
350 				 dp_status, 0,
351 				 tx_desc->vdev->qdf_opmode
352 				 ));
353 
354 	is_frame_freed = ol_tx_download_done_base(pdev, status, msdu, msdu_id);
355 
356 	/*
357 	 * if frame is freed in ol_tx_download_done_base then return.
358 	 */
359 	if (is_frame_freed) {
360 		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
361 		return;
362 	}
363 
364 	if ((tx_desc->pkt_type != OL_TX_FRM_NO_FREE) &&
365 	    (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
366 		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
367 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
368 	}
369 }
370 
ol_tx_target_credit_init(struct ol_txrx_pdev_t * pdev,int credit_delta)371 void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
372 {
373 	qdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
374 }
375 
ol_tx_target_credit_update(struct ol_txrx_pdev_t * pdev,int credit_delta)376 void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
377 {
378 	TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
379 			      qdf_atomic_read(&pdev->target_tx_credit),
380 			      credit_delta,
381 			      qdf_atomic_read(&pdev->target_tx_credit) +
382 			      credit_delta);
383 	qdf_atomic_add(credit_delta, &pdev->target_tx_credit);
384 }
385 
386 #ifdef QCA_COMPUTE_TX_DELAY
387 
388 static void
389 ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
390 		    enum htt_tx_status status,
391 		    uint16_t *desc_ids, int num_msdus);
392 
393 #else
394 static inline void
ol_tx_delay_compute(struct ol_txrx_pdev_t * pdev,enum htt_tx_status status,uint16_t * desc_ids,int num_msdus)395 ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
396 		    enum htt_tx_status status,
397 		    uint16_t *desc_ids, int num_msdus)
398 {
399 }
400 #endif /* QCA_COMPUTE_TX_DELAY */
401 
402 #if defined(CONFIG_HL_SUPPORT)
ol_tx_deduct_one_credit(struct ol_txrx_pdev_t * pdev)403 int ol_tx_deduct_one_credit(struct ol_txrx_pdev_t *pdev)
404 {
405 	/* TODO: Check if enough credits */
406 
407 	if (!pdev->cfg.default_tx_comp_req) {
408 		ol_tx_target_credit_update(pdev, -1);
409 		ol_tx_deduct_one_any_group_credit(pdev);
410 
411 		DPTRACE(qdf_dp_trace_credit_record(QDF_TX_HTT_MSG,
412 			QDF_CREDIT_DEC, 1,
413 			qdf_atomic_read(&pdev->target_tx_credit),
414 			qdf_atomic_read(&pdev->txq_grps[0].credit),
415 			qdf_atomic_read(&pdev->txq_grps[1].credit)));
416 	}
417 
418 	return 0;
419 }
420 #endif /* CONFIG_HL_SUPPORT */
421 
422 #ifndef OL_TX_RESTORE_HDR
423 #define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
424 #endif
425 /*
426  * The following macros could have been inline functions too.
427  * The only rationale for choosing macros, is to force the compiler to inline
428  * the implementation, which cannot be controlled for actual "inline" functions,
429  * since "inline" is only a hint to the compiler.
430  * In the performance path, we choose to force the inlining, in preference to
431  * type-checking offered by the actual inlined functions.
432  */
433 #define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
434 	TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
435 #ifndef ATH_11AC_TXCOMPACT
436 #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
437 				   _lcl_freelist, _tx_desc_last)	\
438 	do {								\
439 		qdf_atomic_init(&(_tx_desc)->ref_cnt);			\
440 		/* restore original hdr offset */			\
441 		OL_TX_RESTORE_HDR((_tx_desc), (_netbuf));		\
442 		qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
443 		qdf_nbuf_free((_netbuf));				\
444 		((union ol_tx_desc_list_elem_t *)(_tx_desc))->next =	\
445 			(_lcl_freelist);				\
446 		if (qdf_unlikely(!lcl_freelist)) {			\
447 			(_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
448 				(_tx_desc);				\
449 		}							\
450 		(_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
451 	} while (0)
452 #else    /*!ATH_11AC_TXCOMPACT */
453 #define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
454 				   _lcl_freelist, _tx_desc_last)	\
455 	do {								\
456 		/* restore original hdr offset */			\
457 		OL_TX_RESTORE_HDR((_tx_desc), (_netbuf));		\
458 		qdf_nbuf_unmap((_pdev)->osdev, (_netbuf), QDF_DMA_TO_DEVICE); \
459 		qdf_nbuf_free((_netbuf));				\
460 		((union ol_tx_desc_list_elem_t *)(_tx_desc))->next =	\
461 			(_lcl_freelist);				\
462 		if (qdf_unlikely(!lcl_freelist)) {			\
463 			(_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
464 				(_tx_desc);				\
465 		}							\
466 		(_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
467 	} while (0)
468 
469 #endif /*!ATH_11AC_TXCOMPACT */
470 
471 #ifdef QCA_TX_SINGLE_COMPLETIONS
472 #ifdef QCA_TX_STD_PATH_ONLY
473 #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
474 			    _netbuf, _lcl_freelist,			\
475 			    _tx_desc_last, _status, is_tx_desc_freed)	\
476 	{								\
477 		is_tx_desc_freed = 0;					\
478 		ol_tx_msdu_complete_single((_pdev), (_tx_desc),		\
479 					   (_netbuf), (_lcl_freelist),	\
480 					   _tx_desc_last)		\
481 	}
482 #else                           /* !QCA_TX_STD_PATH_ONLY */
483 #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
484 			    _netbuf, _lcl_freelist,			\
485 			    _tx_desc_last, _status, is_tx_desc_freed)	\
486 	do {								\
487 		if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
488 			is_tx_desc_freed = 0;				\
489 			ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
490 						   (_netbuf), (_lcl_freelist), \
491 						   (_tx_desc_last));	\
492 		} else {						\
493 			is_tx_desc_freed = 1;				\
494 			ol_tx_desc_frame_free_nonstd(			\
495 				(_pdev), (_tx_desc),			\
496 				(_status) != htt_tx_status_ok);		\
497 		}							\
498 	} while (0)
499 #endif /* !QCA_TX_STD_PATH_ONLY */
500 #else                           /* !QCA_TX_SINGLE_COMPLETIONS */
501 #ifdef QCA_TX_STD_PATH_ONLY
502 #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
503 			    _netbuf, _lcl_freelist,			\
504 			    _tx_desc_last, _status, is_tx_desc_freed)	\
505 	{								\
506 		is_tx_desc_freed = 0;					\
507 		ol_tx_msdu_complete_batch((_pdev), (_tx_desc),		\
508 					(_tx_descs), (_status))		\
509 	}
510 #else                           /* !QCA_TX_STD_PATH_ONLY */
511 #define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs,			\
512 			    _netbuf, _lcl_freelist,			\
513 			    _tx_desc_last, _status, is_tx_desc_freed)	\
514 	do {								\
515 		if (qdf_likely((_tx_desc)->pkt_type == OL_TX_FRM_STD)) { \
516 			is_tx_desc_freed = 0;				\
517 			ol_tx_msdu_complete_batch((_pdev), (_tx_desc),	\
518 						  (_tx_descs), (_status)); \
519 		} else {						\
520 			is_tx_desc_freed = 1;				\
521 			ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
522 						     (_status) !=	\
523 						     htt_tx_status_ok); \
524 		}							\
525 	} while (0)
526 #endif /* !QCA_TX_STD_PATH_ONLY */
527 #endif /* QCA_TX_SINGLE_COMPLETIONS */
528 
529 #if !defined(CONFIG_HL_SUPPORT)
ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)530 void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
531 {
532 	int i = 0;
533 	struct ol_tx_desc_t *tx_desc;
534 	int num_disarded = 0;
535 
536 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
537 		tx_desc = ol_tx_desc_find(pdev, i);
538 		/*
539 		 * Confirm that each tx descriptor is "empty", i.e. it has
540 		 * no tx frame attached.
541 		 * In particular, check that there are no frames that have
542 		 * been given to the target to transmit, for which the
543 		 * target has never provided a response.
544 		 */
545 		if (qdf_atomic_read(&tx_desc->ref_cnt)) {
546 			ol_txrx_dbg(
547 				   "Warning: freeing tx desc %d", tx_desc->id);
548 			ol_tx_desc_frame_free_nonstd(pdev,
549 						     tx_desc, 1);
550 			num_disarded++;
551 		}
552 	}
553 
554 	if (num_disarded)
555 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
556 			"Warning: freed %d tx descs for which no tx completion rcvd from the target",
557 			num_disarded);
558 }
559 #endif
560 
ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev,int credits)561 void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
562 {
563 	ol_tx_target_credit_update(pdev, credits);
564 
565 	if (pdev->cfg.is_high_latency)
566 		ol_tx_sched(pdev);
567 
568 	/* UNPAUSE OS Q */
569 	ol_tx_flow_ct_unpause_os_q(pdev);
570 }
571 
572 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
573 /**
574  * ol_tx_flow_pool_lock() - take flow pool lock
575  * @tx_desc: tx desc
576  *
577  * Return: None
578  */
579 static inline
ol_tx_flow_pool_lock(struct ol_tx_desc_t * tx_desc)580 void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc)
581 {
582 	struct ol_tx_flow_pool_t *pool;
583 
584 	pool = tx_desc->pool;
585 	qdf_spin_lock_bh(&pool->flow_pool_lock);
586 }
587 
588 /**
589  * ol_tx_flow_pool_unlock() - release flow pool lock
590  * @tx_desc: tx desc
591  *
592  * Return: None
593  */
594 static inline
ol_tx_flow_pool_unlock(struct ol_tx_desc_t * tx_desc)595 void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc)
596 {
597 	struct ol_tx_flow_pool_t *pool;
598 
599 	pool = tx_desc->pool;
600 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
601 }
602 #else
603 static inline
ol_tx_flow_pool_lock(struct ol_tx_desc_t * tx_desc)604 void ol_tx_flow_pool_lock(struct ol_tx_desc_t *tx_desc)
605 {
606 }
607 
608 static inline
ol_tx_flow_pool_unlock(struct ol_tx_desc_t * tx_desc)609 void ol_tx_flow_pool_unlock(struct ol_tx_desc_t *tx_desc)
610 {
611 }
612 #endif
613 
614 #ifdef WLAN_FEATURE_PKT_CAPTURE
615 #define RESERVE_BYTES 100
616 /**
617  * ol_tx_pkt_capture_tx_completion_process(): process tx packets
618  * for pkt capture mode
619  * @pdev: device handler
620  * @tx_desc: tx desc
621  * @payload: tx data header
622  * @tid:  tid number
623  * @status: Tx status
624  *
625  * Return: none
626  */
627 static void
ol_tx_pkt_capture_tx_completion_process(ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,struct htt_tx_data_hdr_information * payload_hdr,uint8_t tid,uint8_t status)628 ol_tx_pkt_capture_tx_completion_process(
629 			ol_txrx_pdev_handle pdev,
630 			struct ol_tx_desc_t *tx_desc,
631 			struct htt_tx_data_hdr_information *payload_hdr,
632 			uint8_t tid, uint8_t status)
633 {
634 	qdf_nbuf_t netbuf;
635 	int nbuf_len;
636 	struct qdf_tso_seg_elem_t *tso_seg = NULL;
637 	struct ol_txrx_peer_t *peer;
638 	uint8_t bssid[QDF_MAC_ADDR_SIZE];
639 	uint8_t pkt_type = 0;
640 
641 	qdf_assert(tx_desc);
642 
643 	ol_tx_flow_pool_lock(tx_desc);
644 	/*
645 	 * In cases when vdev has gone down and tx completion
646 	 * are received, leads to NULL vdev access.
647 	 * So, check for NULL before dereferencing it.
648 	 */
649 	if (!tx_desc->vdev) {
650 		ol_tx_flow_pool_unlock(tx_desc);
651 		return;
652 	}
653 
654 	ol_tx_flow_pool_unlock(tx_desc);
655 
656 	if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
657 		if (!tx_desc->tso_desc)
658 			return;
659 
660 		tso_seg = tx_desc->tso_desc;
661 		nbuf_len = tso_seg->seg.total_len;
662 	} else {
663 		int i, extra_frag_len = 0;
664 
665 		i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(tx_desc->netbuf);
666 		if (i > 0)
667 			extra_frag_len =
668 			QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(tx_desc->netbuf);
669 		nbuf_len = qdf_nbuf_len(tx_desc->netbuf) - extra_frag_len;
670 	}
671 
672 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
673 	peer = TAILQ_FIRST(&tx_desc->vdev->peer_list);
674 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
675 	if (!peer)
676 		return;
677 
678 	qdf_spin_lock_bh(&peer->peer_info_lock);
679 	qdf_mem_copy(bssid, &peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
680 	qdf_spin_unlock_bh(&peer->peer_info_lock);
681 
682 	netbuf = qdf_nbuf_alloc(NULL,
683 				roundup(nbuf_len + RESERVE_BYTES, 4),
684 				RESERVE_BYTES, 4, false);
685 	if (!netbuf)
686 		return;
687 
688 	qdf_nbuf_put_tail(netbuf, nbuf_len);
689 
690 	if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
691 		uint8_t frag_cnt, num_frags = 0;
692 		int frag_len = 0;
693 		uint32_t tcp_seq_num;
694 		uint16_t ip_len;
695 
696 		qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
697 
698 		if (tso_seg->seg.num_frags > 0)
699 			num_frags = tso_seg->seg.num_frags - 1;
700 
701 		/*Num of frags in a tso seg cannot be less than 2 */
702 		if (num_frags < 1) {
703 			qdf_print("ERROR: num of frags in tso segment is %d\n",
704 				  (num_frags + 1));
705 			qdf_nbuf_free(netbuf);
706 			qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
707 			return;
708 		}
709 
710 		tcp_seq_num = tso_seg->seg.tso_flags.tcp_seq_num;
711 		tcp_seq_num = qdf_cpu_to_be32(tcp_seq_num);
712 
713 		ip_len = tso_seg->seg.tso_flags.ip_len;
714 		ip_len = qdf_cpu_to_be16(ip_len);
715 
716 		for (frag_cnt = 0; frag_cnt <= num_frags; frag_cnt++) {
717 			qdf_mem_copy(qdf_nbuf_data(netbuf) + frag_len,
718 				     tso_seg->seg.tso_frags[frag_cnt].vaddr,
719 				     tso_seg->seg.tso_frags[frag_cnt].length);
720 			frag_len += tso_seg->seg.tso_frags[frag_cnt].length;
721 		}
722 
723 		qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
724 
725 		qdf_mem_copy((qdf_nbuf_data(netbuf) + IPV4_PKT_LEN_OFFSET),
726 			     &ip_len, sizeof(ip_len));
727 		qdf_mem_copy((qdf_nbuf_data(netbuf) + IPV4_TCP_SEQ_NUM_OFFSET),
728 			     &tcp_seq_num, sizeof(tcp_seq_num));
729 	} else {
730 		qdf_mem_copy(qdf_nbuf_data(netbuf),
731 			     qdf_nbuf_data(tx_desc->netbuf),
732 			     nbuf_len);
733 	}
734 
735 	qdf_nbuf_push_head(
736 			netbuf,
737 			sizeof(struct htt_tx_data_hdr_information));
738 	qdf_mem_copy(qdf_nbuf_data(netbuf), payload_hdr,
739 		     sizeof(struct htt_tx_data_hdr_information));
740 
741 	ucfg_pkt_capture_tx_completion_process(
742 				tx_desc->vdev_id,
743 				netbuf, pkt_type,
744 				tid, status,
745 				TXRX_PKTCAPTURE_PKT_FORMAT_8023, bssid,
746 				pdev->htt_pdev, payload_hdr->tx_retry_cnt);
747 }
748 #else
749 static void
ol_tx_pkt_capture_tx_completion_process(ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,struct htt_tx_data_hdr_information * payload_hdr,uint8_t tid,uint8_t status)750 ol_tx_pkt_capture_tx_completion_process(
751 			ol_txrx_pdev_handle pdev,
752 			struct ol_tx_desc_t *tx_desc,
753 			struct htt_tx_data_hdr_information *payload_hdr,
754 			uint8_t tid, uint8_t status)
755 {
756 }
757 #endif /* WLAN_FEATURE_PKT_CAPTURE */
758 
759 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
ol_tx_get_txtstamps(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)760 static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
761 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
762 		int num_msdus)
763 {
764 	u_int32_t has_tx_tsf;
765 	u_int32_t has_retry;
766 
767 	struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
768 	struct htt_tx_compl_ind_append_retries *retry_list = NULL;
769 	int offset_dwords;
770 
771 	if (num_msdus <= 0)
772 		return NULL;
773 
774 	has_tx_tsf = HTT_TX_COMPL_IND_APPEND1_GET(*msg_word_header);
775 
776 	/* skip header and MSDUx ID part*/
777 	offset_dwords = ((num_msdus + 1) >> 1);
778 	*msg_word_payload += offset_dwords;
779 
780 	if (!has_tx_tsf)
781 		return NULL;
782 
783 	has_retry = HTT_TX_COMPL_IND_APPEND_GET(*msg_word_header);
784 	if (has_retry) {
785 		int retry_index = 0;
786 		int width_for_each_retry =
787 			(sizeof(struct htt_tx_compl_ind_append_retries) +
788 			3) >> 2;
789 
790 		retry_list = (struct htt_tx_compl_ind_append_retries *)
791 			(*msg_word_payload + offset_dwords);
792 		while (retry_list) {
793 			if (retry_list[retry_index++].flag == 0)
794 				break;
795 		}
796 		offset_dwords = retry_index * width_for_each_retry;
797 	}
798 
799 	*msg_word_payload +=  offset_dwords;
800 	txtstamp_list = (struct htt_tx_compl_ind_append_tx_tstamp *)
801 		(*msg_word_payload);
802 	return txtstamp_list;
803 }
804 
805 static inline
ol_tx_get_txtstamp64s(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)806 struct htt_tx_compl_ind_append_tx_tsf64 *ol_tx_get_txtstamp64s(
807 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
808 		int num_msdus)
809 {
810 	u_int32_t has_tx_tstamp64;
811 	u_int32_t has_rssi;
812 	struct htt_tx_compl_ind_append_tx_tsf64 *txtstamp64_list = NULL;
813 
814 	int offset_dwords = 0;
815 
816 	if (num_msdus <= 0)
817 		return NULL;
818 
819 	has_tx_tstamp64 = HTT_TX_COMPL_IND_APPEND3_GET(*msg_word_header);
820 	if (!has_tx_tstamp64)
821 		return NULL;
822 
823 	/*skip MSDUx ACK RSSI part*/
824 	has_rssi = HTT_TX_COMPL_IND_APPEND2_GET(*msg_word_header);
825 	if (has_rssi)
826 		offset_dwords = ((num_msdus + 1) >> 1);
827 
828 	*msg_word_payload = *msg_word_payload + offset_dwords;
829 	txtstamp64_list =
830 		(struct htt_tx_compl_ind_append_tx_tsf64 *)
831 		(*msg_word_payload);
832 
833 	return txtstamp64_list;
834 }
835 
ol_tx_timestamp(ol_txrx_pdev_handle pdev,enum htt_tx_status status,qdf_nbuf_t netbuf,u_int64_t ts)836 static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
837 				   enum htt_tx_status status,
838 				   qdf_nbuf_t netbuf, u_int64_t ts)
839 {
840 	if (!netbuf)
841 		return;
842 
843 	if (pdev->ol_tx_timestamp_cb)
844 		pdev->ol_tx_timestamp_cb(status, netbuf, ts);
845 }
846 #else
ol_tx_get_txtstamps(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)847 static inline struct htt_tx_compl_ind_append_tx_tstamp *ol_tx_get_txtstamps(
848 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
849 		int num_msdus)
850 {
851 	return NULL;
852 }
853 
854 static inline
ol_tx_get_txtstamp64s(u_int32_t * msg_word_header,u_int32_t ** msg_word_payload,int num_msdus)855 struct htt_tx_compl_ind_append_tx_tsf64 *ol_tx_get_txtstamp64s(
856 		u_int32_t *msg_word_header, u_int32_t **msg_word_payload,
857 		int num_msdus)
858 {
859 	return NULL;
860 }
861 
ol_tx_timestamp(ol_txrx_pdev_handle pdev,enum htt_tx_status status,qdf_nbuf_t netbuf,u_int64_t ts)862 static inline void ol_tx_timestamp(ol_txrx_pdev_handle pdev,
863 				   enum htt_tx_status status,
864 				   qdf_nbuf_t netbuf, u_int64_t ts)
865 {
866 }
867 #endif /* WLAN_FEATURE_TSF_PLUS_SOCK_TS */
868 
ol_tx_update_ack_count(struct ol_tx_desc_t * tx_desc,enum htt_tx_status status)869 static void ol_tx_update_ack_count(struct ol_tx_desc_t *tx_desc,
870 				   enum htt_tx_status status)
871 {
872 	if (!tx_desc->vdev)
873 		return;
874 
875 	if (status == htt_tx_status_ok)
876 		++tx_desc->vdev->txrx_stats.txack_success;
877 	else
878 		++tx_desc->vdev->txrx_stats.txack_failed;
879 }
880 
881 /**
882  * ol_tx_notify_completion() - Notify tx completion for this desc
883  * @tx_desc: tx desc
884  * @netbuf:  buffer
885  * @status: tx status
886  *
887  * Return: none
888  */
ol_tx_notify_completion(struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,uint8_t status)889 static void ol_tx_notify_completion(struct ol_tx_desc_t *tx_desc,
890 				    qdf_nbuf_t netbuf, uint8_t status)
891 {
892 	void *osif_dev;
893 	ol_txrx_completion_fp tx_compl_cbk = NULL;
894 	uint16_t flag = 0;
895 
896 	qdf_assert(tx_desc);
897 
898 	ol_tx_flow_pool_lock(tx_desc);
899 
900 	if (!tx_desc->vdev ||
901 	    !tx_desc->vdev->osif_dev) {
902 		ol_tx_flow_pool_unlock(tx_desc);
903 		return;
904 	}
905 	osif_dev = tx_desc->vdev->osif_dev;
906 	tx_compl_cbk = tx_desc->vdev->tx_comp;
907 	ol_tx_flow_pool_unlock(tx_desc);
908 
909 	if (status == htt_tx_status_ok)
910 		flag = (BIT(QDF_TX_RX_STATUS_OK) |
911 			BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC));
912 	else if (status != htt_tx_status_download_fail)
913 		flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
914 
915 	if (tx_compl_cbk)
916 		tx_compl_cbk(netbuf, osif_dev, flag);
917 }
918 
919 /**
920  * ol_tx_update_connectivity_stats() - update connectivity stats
921  * @tx_desc: tx desc
922  * @netbuf:  buffer
923  * @status: htt status
924  *
925  *
926  * Return: none
927  */
ol_tx_update_connectivity_stats(struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,enum htt_tx_status status)928 static void ol_tx_update_connectivity_stats(struct ol_tx_desc_t *tx_desc,
929 					    qdf_nbuf_t netbuf,
930 					    enum htt_tx_status status)
931 {
932 	void *osif_dev;
933 	uint32_t pkt_type_bitmap;
934 	ol_txrx_stats_rx_fp stats_rx = NULL;
935 	uint8_t pkt_type = 0;
936 
937 	qdf_assert(tx_desc);
938 
939 	ol_tx_flow_pool_lock(tx_desc);
940 	/*
941 	 * In cases when vdev has gone down and tx completion
942 	 * are received, leads to NULL vdev access.
943 	 * So, check for NULL before dereferencing it.
944 	 */
945 	if (!tx_desc->vdev ||
946 	    !tx_desc->vdev->osif_dev ||
947 	    !tx_desc->vdev->stats_rx) {
948 		ol_tx_flow_pool_unlock(tx_desc);
949 		return;
950 	}
951 	osif_dev = tx_desc->vdev->osif_dev;
952 	stats_rx = tx_desc->vdev->stats_rx;
953 	ol_tx_flow_pool_unlock(tx_desc);
954 
955 	pkt_type_bitmap = wlan_dp_intf_get_pkt_type_bitmap_value(tx_desc->vdev);
956 
957 	if (pkt_type_bitmap) {
958 		if (status != htt_tx_status_download_fail)
959 			stats_rx(netbuf, osif_dev,
960 				 PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
961 		if (status == htt_tx_status_ok)
962 			stats_rx(netbuf, osif_dev,
963 				 PKT_TYPE_TX_ACK_CNT, &pkt_type);
964 	}
965 }
966 
967 #ifdef CONNECTIVITY_PKTLOG
968 static inline enum qdf_dp_tx_rx_status
htt_qdf_status_map(enum htt_tx_status status)969 htt_qdf_status_map(enum htt_tx_status status)
970 {
971 	switch (status) {
972 	case HTT_TX_COMPL_IND_STAT_OK:
973 		return QDF_TX_RX_STATUS_OK;
974 	case HTT_TX_COMPL_IND_STAT_DISCARD:
975 		return QDF_TX_RX_STATUS_FW_DISCARD;
976 	case HTT_TX_COMPL_IND_STAT_NO_ACK:
977 		return QDF_TX_RX_STATUS_NO_ACK;
978 	case HTT_TX_COMPL_IND_STAT_DROP:
979 		return QDF_TX_RX_STATUS_DROP;
980 	case HTT_HOST_ONLY_STATUS_CODE_START:
981 		return QDF_TX_RX_STATUS_DROP;
982 	default:
983 		return QDF_TX_RX_STATUS_DROP;
984 	}
985 }
986 
987 static inline void
ol_tx_send_pktlog(struct ol_txrx_soc_t * soc,ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,enum htt_tx_status status,enum qdf_pkt_type pkt_type)988 ol_tx_send_pktlog(struct ol_txrx_soc_t *soc, ol_txrx_pdev_handle pdev,
989 		  struct ol_tx_desc_t *tx_desc, qdf_nbuf_t netbuf,
990 		  enum htt_tx_status status, enum qdf_pkt_type pkt_type)
991 {
992 	ol_txrx_pktdump_cb packetdump_cb;
993 	enum qdf_dp_tx_rx_status tx_status;
994 
995 	if (tx_desc->pkt_type != OL_TX_FRM_TSO) {
996 		packetdump_cb = pdev->ol_tx_packetdump_cb;
997 		if (packetdump_cb) {
998 			tx_status = htt_qdf_status_map(status);
999 			packetdump_cb((void *)soc, pdev->id,
1000 				      tx_desc->vdev_id,
1001 				      netbuf, tx_status, pkt_type);
1002 		}
1003 	}
1004 }
1005 #else
1006 static inline void
ol_tx_send_pktlog(struct ol_txrx_soc_t * soc,ol_txrx_pdev_handle pdev,struct ol_tx_desc_t * tx_desc,qdf_nbuf_t netbuf,enum htt_tx_status status,enum qdf_pkt_type pkt_type)1007 ol_tx_send_pktlog(struct ol_txrx_soc_t *soc, ol_txrx_pdev_handle pdev,
1008 		  struct ol_tx_desc_t *tx_desc, qdf_nbuf_t netbuf,
1009 		  enum htt_tx_status status, enum qdf_pkt_type pkt_type)
1010 {
1011 }
1012 #endif
1013 
1014 /**
1015  * WARNING: ol_tx_inspect_handler()'s behavior is similar to that of
1016  * ol_tx_completion_handler().
1017  * any change in ol_tx_completion_handler() must be mirrored in
1018  * ol_tx_inspect_handler().
1019  */
1020 void
ol_tx_completion_handler(ol_txrx_pdev_handle pdev,int num_msdus,enum htt_tx_status status,void * msg)1021 ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
1022 			 int num_msdus,
1023 			 enum htt_tx_status status, void *msg)
1024 {
1025 	int i;
1026 	uint16_t tx_desc_id;
1027 	struct ol_tx_desc_t *tx_desc;
1028 	uint32_t byte_cnt = 0;
1029 	qdf_nbuf_t netbuf;
1030 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1031 	uint32_t is_tx_desc_freed = 0;
1032 	struct htt_tx_compl_ind_append_tx_tstamp *txtstamp_list = NULL;
1033 	struct htt_tx_compl_ind_append_tx_tsf64 *txtstamp64_list = NULL;
1034 	struct htt_tx_data_hdr_information *pkt_capture_txcomp_hdr_list = NULL;
1035 	u_int32_t *msg_word_header = (u_int32_t *)msg;
1036 	/*msg_word skip header*/
1037 	u_int32_t *msg_word_payload = msg_word_header + 1;
1038 	u_int32_t *msg_word = (u_int32_t *)msg;
1039 	u_int16_t *desc_ids = (u_int16_t *)(msg_word + 1);
1040 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
1041 	union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
1042 	ol_tx_desc_list tx_descs;
1043 	uint64_t tx_tsf64;
1044 	uint8_t tid;
1045 	uint8_t dp_status;
1046 
1047 	TAILQ_INIT(&tx_descs);
1048 
1049 	tid = HTT_TX_COMPL_IND_TID_GET(*msg_word);
1050 
1051 	ol_tx_delay_compute(pdev, status, desc_ids, num_msdus);
1052 	if (status == htt_tx_status_ok ||
1053 	    status == htt_tx_status_discard ||
1054 	    status == htt_tx_status_no_ack) {
1055 		txtstamp_list = ol_tx_get_txtstamps(
1056 			msg_word_header, &msg_word_payload, num_msdus);
1057 		if (pdev->enable_tx_compl_tsf64)
1058 			txtstamp64_list = ol_tx_get_txtstamp64s(
1059 				msg_word_header, &msg_word_payload, num_msdus);
1060 	}
1061 
1062 	if ((ucfg_pkt_capture_get_mode((void *)soc->psoc) ==
1063 						PACKET_CAPTURE_MODE_DATA_ONLY))
1064 		pkt_capture_txcomp_hdr_list =
1065 				ucfg_pkt_capture_tx_get_txcomplete_data_hdr(
1066 								msg_word,
1067 								num_msdus);
1068 
1069 	for (i = 0; i < num_msdus; i++) {
1070 		tx_desc_id = desc_ids[i];
1071 		if (tx_desc_id >= pdev->tx_desc.pool_size) {
1072 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1073 			"%s: drop due to invalid msdu id = %x\n",
1074 			__func__, tx_desc_id);
1075 			continue;
1076 		}
1077 		tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1078 		qdf_assert(tx_desc);
1079 		ol_tx_desc_update_comp_ts(tx_desc);
1080 		tx_desc->status = status;
1081 		netbuf = tx_desc->netbuf;
1082 
1083 		if (txtstamp64_list) {
1084 			tx_tsf64 =
1085 			(u_int64_t)txtstamp64_list[i].tx_tsf64_high << 32 |
1086 			txtstamp64_list[i].tx_tsf64_low;
1087 
1088 			ol_tx_timestamp(pdev, status,  netbuf, tx_tsf64);
1089 		} else if (txtstamp_list)
1090 			ol_tx_timestamp(pdev, status, netbuf,
1091 					(u_int64_t)txtstamp_list->timestamp[i]
1092 					);
1093 
1094 		if (pkt_capture_txcomp_hdr_list) {
1095 			ol_tx_pkt_capture_tx_completion_process(
1096 						pdev,
1097 						tx_desc,
1098 						&pkt_capture_txcomp_hdr_list[i],
1099 						tid, status);
1100 		}
1101 
1102 		QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
1103 
1104 		/* check tx completion notification */
1105 		if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
1106 			ol_tx_notify_completion(tx_desc, netbuf, status);
1107 
1108 		/* track connectivity stats */
1109 		ol_tx_update_connectivity_stats(tx_desc, netbuf,
1110 						status);
1111 		ol_tx_update_ack_count(tx_desc, status);
1112 
1113 		ol_tx_send_pktlog(soc, pdev, tx_desc, netbuf, status,
1114 				  QDF_TX_DATA_PKT);
1115 
1116 		dp_status = ol_tx_comp_hw_to_qdf_status(status);
1117 
1118 		DPTRACE(qdf_dp_trace_ptr(netbuf,
1119 			QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
1120 			QDF_TRACE_DEFAULT_PDEV_ID,
1121 			qdf_nbuf_data_addr(netbuf),
1122 			sizeof(qdf_nbuf_data(netbuf)), tx_desc->id, status,
1123 			dp_status,
1124 			tx_desc->vdev->qdf_opmode));
1125 
1126 		/*
1127 		 * If credits are reported through credit_update_ind then do not
1128 		 * update group credits on tx_complete_ind.
1129 		 */
1130 		if (!pdev->cfg.credit_update_enabled)
1131 			ol_tx_desc_update_group_credit(pdev,
1132 						       tx_desc_id,
1133 						       1, 0, status);
1134 		/* Per SDU update of byte count */
1135 		byte_cnt += qdf_nbuf_len(netbuf);
1136 		if (OL_TX_DESC_NO_REFS(tx_desc)) {
1137 			ol_tx_statistics(
1138 				pdev->ctrl_pdev,
1139 				HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
1140 							  (tx_desc->
1141 							   htt_tx_desc))),
1142 				status != htt_tx_status_ok);
1143 			ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
1144 					    lcl_freelist, tx_desc_last, status,
1145 					    is_tx_desc_freed);
1146 
1147 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
1148 			if (!is_tx_desc_freed) {
1149 				tx_desc->pkt_type = ol_tx_frm_freed;
1150 #ifdef QCA_COMPUTE_TX_DELAY
1151 				tx_desc->entry_timestamp_ticks = 0xffffffff;
1152 #endif
1153 			}
1154 #endif
1155 		}
1156 	}
1157 
1158 	/* One shot protected access to pdev freelist, when setup */
1159 	if (lcl_freelist) {
1160 		qdf_spin_lock(&pdev->tx_mutex);
1161 		tx_desc_last->next = pdev->tx_desc.freelist;
1162 		pdev->tx_desc.freelist = lcl_freelist;
1163 		pdev->tx_desc.num_free += (uint16_t) num_msdus;
1164 		qdf_spin_unlock(&pdev->tx_mutex);
1165 	} else {
1166 		ol_tx_desc_frame_list_free(pdev, &tx_descs,
1167 					   status != htt_tx_status_ok);
1168 	}
1169 
1170 	if (pdev->cfg.is_high_latency) {
1171 		/*
1172 		 * Credit was already explicitly updated by HTT,
1173 		 * but update the number of available tx descriptors,
1174 		 * then invoke the scheduler, since new credit is probably
1175 		 * available now.
1176 		 */
1177 		qdf_atomic_add(num_msdus, &pdev->tx_queue.rsrc_cnt);
1178 		ol_tx_sched(pdev);
1179 	} else {
1180 		ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
1181 	}
1182 
1183 	/* UNPAUSE OS Q */
1184 	ol_tx_flow_ct_unpause_os_q(pdev);
1185 	/* Do one shot statistics */
1186 	TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
1187 }
1188 
1189 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
1190 
ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,u_int16_t tx_desc_id,int credit,u_int8_t absolute,enum htt_tx_status status)1191 void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
1192 		u_int16_t tx_desc_id, int credit, u_int8_t absolute,
1193 		enum htt_tx_status status)
1194 {
1195 	uint8_t i, is_member;
1196 	uint16_t vdev_id_mask;
1197 	struct ol_tx_desc_t *tx_desc;
1198 
1199 	if (tx_desc_id >= pdev->tx_desc.pool_size) {
1200 		qdf_print("Invalid desc id");
1201 		return;
1202 	}
1203 
1204 	tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1205 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1206 		vdev_id_mask =
1207 			OL_TXQ_GROUP_VDEV_ID_MASK_GET(
1208 					pdev->txq_grps[i].membership);
1209 		is_member = OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,
1210 				tx_desc->vdev_id);
1211 		if (is_member) {
1212 			ol_txrx_update_group_credit(&pdev->txq_grps[i],
1213 						    credit, absolute);
1214 			break;
1215 		}
1216 	}
1217 	ol_tx_update_group_credit_stats(pdev);
1218 }
1219 
ol_tx_deduct_one_any_group_credit(ol_txrx_pdev_handle pdev)1220 void ol_tx_deduct_one_any_group_credit(ol_txrx_pdev_handle pdev)
1221 {
1222 	int credits_group_0, credits_group_1;
1223 
1224 	qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
1225 	credits_group_0 = qdf_atomic_read(&pdev->txq_grps[0].credit);
1226 	credits_group_1 = qdf_atomic_read(&pdev->txq_grps[1].credit);
1227 
1228 	if (credits_group_0 > credits_group_1)
1229 		ol_txrx_update_group_credit(&pdev->txq_grps[0], -1, 0);
1230 	else if (credits_group_1 != 0)
1231 		ol_txrx_update_group_credit(&pdev->txq_grps[1], -1, 0);
1232 
1233 	qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
1234 }
1235 
1236 #ifdef DEBUG_HL_LOGGING
1237 
ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)1238 void ol_tx_update_group_credit_stats(ol_txrx_pdev_handle pdev)
1239 {
1240 	uint16_t curr_index;
1241 	uint8_t i;
1242 
1243 	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1244 	pdev->grp_stats.last_valid_index++;
1245 	if (pdev->grp_stats.last_valid_index > (OL_TX_GROUP_STATS_LOG_SIZE
1246 				- 1)) {
1247 		pdev->grp_stats.last_valid_index -= OL_TX_GROUP_STATS_LOG_SIZE;
1248 		pdev->grp_stats.wrap_around = 1;
1249 	}
1250 	curr_index = pdev->grp_stats.last_valid_index;
1251 
1252 	for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
1253 		pdev->grp_stats.stats[curr_index].grp[i].member_vdevs =
1254 			OL_TXQ_GROUP_VDEV_ID_MASK_GET(
1255 					pdev->txq_grps[i].membership);
1256 		pdev->grp_stats.stats[curr_index].grp[i].credit =
1257 			qdf_atomic_read(&pdev->txq_grps[i].credit);
1258 	}
1259 
1260 	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1261 }
1262 
ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)1263 void ol_tx_dump_group_credit_stats(ol_txrx_pdev_handle pdev)
1264 {
1265 	uint16_t i, j, is_break = 0;
1266 	int16_t curr_index, old_index, wrap_around;
1267 	uint16_t curr_credit, mem_vdevs;
1268 	uint16_t old_credit = 0;
1269 
1270 	txrx_nofl_info("Group credit stats:");
1271 	txrx_nofl_info("  No: GrpID: Credit: Change: vdev_map");
1272 
1273 	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1274 	curr_index = pdev->grp_stats.last_valid_index;
1275 	wrap_around = pdev->grp_stats.wrap_around;
1276 	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1277 
1278 	if (curr_index < 0) {
1279 		txrx_nofl_info("Not initialized");
1280 		return;
1281 	}
1282 
1283 	for (i = 0; i < OL_TX_GROUP_STATS_LOG_SIZE; i++) {
1284 		old_index = curr_index - 1;
1285 		if (old_index < 0) {
1286 			if (wrap_around == 0)
1287 				is_break = 1;
1288 			else
1289 				old_index = OL_TX_GROUP_STATS_LOG_SIZE - 1;
1290 		}
1291 
1292 		for (j = 0; j < OL_TX_MAX_TXQ_GROUPS; j++) {
1293 			qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1294 			curr_credit =
1295 				pdev->grp_stats.stats[curr_index].
1296 								grp[j].credit;
1297 			if (!is_break)
1298 				old_credit =
1299 					pdev->grp_stats.stats[old_index].
1300 								grp[j].credit;
1301 
1302 			mem_vdevs =
1303 				pdev->grp_stats.stats[curr_index].grp[j].
1304 								member_vdevs;
1305 			qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1306 
1307 			if (!is_break)
1308 				txrx_nofl_info("%4d: %5d: %6d %6d %8x",
1309 					       curr_index, j,
1310 					       curr_credit,
1311 					       (curr_credit - old_credit),
1312 					       mem_vdevs);
1313 			else
1314 				txrx_nofl_info("%4d: %5d: %6d %6s %8x",
1315 					       curr_index, j,
1316 					       curr_credit, "NA", mem_vdevs);
1317 		}
1318 
1319 		if (is_break)
1320 			break;
1321 
1322 		curr_index = old_index;
1323 	}
1324 }
1325 
ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)1326 void ol_tx_clear_group_credit_stats(ol_txrx_pdev_handle pdev)
1327 {
1328 	qdf_spin_lock_bh(&pdev->grp_stat_spinlock);
1329 	qdf_mem_zero(&pdev->grp_stats, sizeof(pdev->grp_stats));
1330 	pdev->grp_stats.last_valid_index = -1;
1331 	pdev->grp_stats.wrap_around = 0;
1332 	qdf_spin_unlock_bh(&pdev->grp_stat_spinlock);
1333 }
1334 #endif
1335 #endif
1336 
1337 /*
1338  * ol_tx_single_completion_handler performs the same tx completion
1339  * processing as ol_tx_completion_handler, but for a single frame.
1340  * ol_tx_completion_handler is optimized to handle batch completions
1341  * as efficiently as possible; in contrast ol_tx_single_completion_handler
1342  * handles single frames as simply and generally as possible.
1343  * Thus, this ol_tx_single_completion_handler function is suitable for
1344  * intermittent usage, such as for tx mgmt frames.
1345  */
1346 void
ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,enum htt_tx_status status,uint16_t tx_desc_id)1347 ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
1348 				enum htt_tx_status status, uint16_t tx_desc_id)
1349 {
1350 	struct ol_tx_desc_t *tx_desc;
1351 	qdf_nbuf_t netbuf;
1352 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1353 
1354 	tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
1355 	if (!tx_desc) {
1356 		ol_txrx_err("invalid desc_id(%u), ignore it", tx_desc_id);
1357 		return;
1358 	}
1359 
1360 	tx_desc->status = status;
1361 	netbuf = tx_desc->netbuf;
1362 
1363 	QDF_NBUF_UPDATE_TX_PKT_COUNT(netbuf, QDF_NBUF_TX_PKT_FREE);
1364 	/* Do one shot statistics */
1365 	TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, qdf_nbuf_len(netbuf));
1366 
1367 	ol_tx_send_pktlog(soc, pdev, tx_desc, netbuf, status, QDF_TX_MGMT_PKT);
1368 
1369 	if (OL_TX_DESC_NO_REFS(tx_desc)) {
1370 		ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
1371 					     status != htt_tx_status_ok);
1372 	}
1373 
1374 	TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
1375 			      qdf_atomic_read(&pdev->target_tx_credit),
1376 			      1, qdf_atomic_read(&pdev->target_tx_credit) + 1);
1377 
1378 	if (pdev->cfg.is_high_latency) {
1379 		/*
1380 		 * Credit was already explicitly updated by HTT,
1381 		 * but update the number of available tx descriptors,
1382 		 * then invoke the scheduler, since new credit is probably
1383 		 * available now.
1384 		 */
1385 		qdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
1386 		ol_tx_sched(pdev);
1387 	} else {
1388 		qdf_atomic_add(1, &pdev->target_tx_credit);
1389 	}
1390 }
1391 
1392 /**
1393  * WARNING: ol_tx_inspect_handler()'s behavior is similar to that of
1394  * ol_tx_completion_handler().
1395  * any change in ol_tx_completion_handler() must be mirrored here.
1396  */
1397 void
ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,int num_msdus,void * tx_desc_id_iterator)1398 ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
1399 		      int num_msdus, void *tx_desc_id_iterator)
1400 {
1401 	uint16_t vdev_id, i;
1402 	struct ol_txrx_vdev_t *vdev;
1403 	uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
1404 	uint16_t tx_desc_id;
1405 	struct ol_tx_desc_t *tx_desc;
1406 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
1407 	union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
1408 	qdf_nbuf_t netbuf;
1409 	ol_tx_desc_list tx_descs;
1410 	uint32_t is_tx_desc_freed = 0;
1411 
1412 	TAILQ_INIT(&tx_descs);
1413 
1414 	for (i = 0; i < num_msdus; i++) {
1415 		tx_desc_id = desc_ids[i];
1416 		if (tx_desc_id >= pdev->tx_desc.pool_size) {
1417 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1418 			"%s: drop due to invalid msdu id = %x\n",
1419 			__func__, tx_desc_id);
1420 			continue;
1421 		}
1422 		tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
1423 		qdf_assert(tx_desc);
1424 		ol_tx_desc_update_comp_ts(tx_desc);
1425 		netbuf = tx_desc->netbuf;
1426 
1427 		/* find the "vdev" this tx_desc belongs to */
1428 		vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
1429 						    (tx_desc->htt_tx_desc)));
1430 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1431 			if (vdev->vdev_id == vdev_id)
1432 				break;
1433 		}
1434 
1435 		/* vdev now points to the vdev for this descriptor. */
1436 
1437 #ifndef ATH_11AC_TXCOMPACT
1438 		/* save this multicast packet to local free list */
1439 		if (qdf_atomic_dec_and_test(&tx_desc->ref_cnt))
1440 #endif
1441 		{
1442 			/*
1443 			 * For this function only, force htt status to be
1444 			 * "htt_tx_status_ok"
1445 			 * for graceful freeing of this multicast frame
1446 			 */
1447 			ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
1448 					    lcl_freelist, tx_desc_last,
1449 					    htt_tx_status_ok,
1450 					    is_tx_desc_freed);
1451 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
1452 			if (!is_tx_desc_freed) {
1453 				tx_desc->pkt_type = ol_tx_frm_freed;
1454 #ifdef QCA_COMPUTE_TX_DELAY
1455 				tx_desc->entry_timestamp_ticks = 0xffffffff;
1456 #endif
1457 			}
1458 #endif
1459 		}
1460 	}
1461 
1462 	if (lcl_freelist) {
1463 		qdf_spin_lock(&pdev->tx_mutex);
1464 		tx_desc_last->next = pdev->tx_desc.freelist;
1465 		pdev->tx_desc.freelist = lcl_freelist;
1466 		qdf_spin_unlock(&pdev->tx_mutex);
1467 	} else {
1468 		ol_tx_desc_frame_list_free(pdev, &tx_descs,
1469 					   htt_tx_status_discard);
1470 	}
1471 	TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
1472 			      qdf_atomic_read(&pdev->target_tx_credit),
1473 			      num_msdus,
1474 			      qdf_atomic_read(&pdev->target_tx_credit) +
1475 			      num_msdus);
1476 
1477 	if (pdev->cfg.is_high_latency) {
1478 		/* credit was already explicitly updated by HTT */
1479 		ol_tx_sched(pdev);
1480 	} else {
1481 		ol_tx_target_credit_adjust(num_msdus, pdev, NULL);
1482 	}
1483 }
1484 
1485 #ifdef QCA_COMPUTE_TX_DELAY
1486 /**
1487  * ol_tx_set_compute_interval -  updates the compute interval
1488  *				 period for TSM stats.
1489  * @soc_hdl: Datapath soc handle
1490  * @pdev_id: id of data path pdev handle
1491  * @param interval: interval for stats computation
1492  *
1493  * Return: None
1494  */
ol_tx_set_compute_interval(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint32_t interval)1495 void ol_tx_set_compute_interval(struct cdp_soc_t *soc_hdl,
1496 				uint8_t pdev_id, uint32_t interval)
1497 {
1498 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1499 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1500 
1501 	if (!pdev) {
1502 		ol_txrx_err("pdev is NULL");
1503 		return;
1504 	}
1505 
1506 	pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
1507 }
1508 
1509 /**
1510  * ol_tx_packet_count() -  Return the uplink (transmitted) packet count
1511 			   and loss count.
1512  * @soc_hdl: soc handle
1513  * @pdev_id: pdev identifier
1514  * @out_packet_count - number of packets transmitted
1515  * @out_packet_loss_count - number of packets lost
1516  * @category - access category of interest
1517  *
1518  *  This function will be called for getting uplink packet count and
1519  *  loss count for given stream (access category) a regular interval.
1520  *  This also resets the counters hence, the value returned is packets
1521  *  counted in last 5(default) second interval. These counter are
1522  *  incremented per access category in ol_tx_completion_handler()
1523  */
1524 void
ol_tx_packet_count(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint16_t * out_packet_count,uint16_t * out_packet_loss_count,int category)1525 ol_tx_packet_count(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1526 		   uint16_t *out_packet_count,
1527 		   uint16_t *out_packet_loss_count, int category)
1528 {
1529 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1530 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1531 
1532 	if (!pdev) {
1533 		ol_txrx_err("pdev is NULL");
1534 		return;
1535 	}
1536 
1537 	*out_packet_count = pdev->packet_count[category];
1538 	*out_packet_loss_count = pdev->packet_loss_count[category];
1539 	pdev->packet_count[category] = 0;
1540 	pdev->packet_loss_count[category] = 0;
1541 }
1542 
ol_tx_delay_avg(uint64_t sum,uint32_t num)1543 static uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
1544 {
1545 	uint32_t sum32;
1546 	int shift = 0;
1547 	/*
1548 	 * To avoid doing a 64-bit divide, shift the sum down until it is
1549 	 * no more than 32 bits (and shift the denominator to match).
1550 	 */
1551 	while ((sum >> 32) != 0) {
1552 		sum >>= 1;
1553 		shift++;
1554 	}
1555 	sum32 = (uint32_t) sum;
1556 	num >>= shift;
1557 	return (sum32 + (num >> 1)) / num;      /* round to nearest */
1558 }
1559 
1560 void
ol_tx_delay(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint32_t * queue_delay_microsec,uint32_t * tx_delay_microsec,int category)1561 ol_tx_delay(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1562 	    uint32_t *queue_delay_microsec,
1563 	    uint32_t *tx_delay_microsec, int category)
1564 {
1565 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1566 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1567 	int index;
1568 	uint32_t avg_delay_ticks;
1569 	struct ol_tx_delay_data *data;
1570 
1571 	qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
1572 
1573 	if (!pdev) {
1574 		ol_txrx_err("pdev is NULL");
1575 		return;
1576 	}
1577 
1578 	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
1579 	index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1580 
1581 	data = &pdev->tx_delay.cats[category].copies[index];
1582 
1583 	if (data->avgs.transmit_num > 0) {
1584 		avg_delay_ticks =
1585 			ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
1586 					data->avgs.transmit_num);
1587 		*tx_delay_microsec =
1588 			qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
1589 	} else {
1590 		/*
1591 		 * This case should only happen if there's a query
1592 		 * within 5 sec after the first tx data frame.
1593 		 */
1594 		*tx_delay_microsec = 0;
1595 	}
1596 	if (data->avgs.queue_num > 0) {
1597 		avg_delay_ticks =
1598 			ol_tx_delay_avg(data->avgs.queue_sum_ticks,
1599 					data->avgs.queue_num);
1600 		*queue_delay_microsec =
1601 			qdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
1602 	} else {
1603 		/*
1604 		 * This case should only happen if there's a query
1605 		 * within 5 sec after the first tx data frame.
1606 		 */
1607 		*queue_delay_microsec = 0;
1608 	}
1609 
1610 	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
1611 }
1612 
1613 void
ol_tx_delay_hist(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint16_t * report_bin_values,int category)1614 ol_tx_delay_hist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1615 		 uint16_t *report_bin_values, int category)
1616 {
1617 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1618 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1619 	int index, i, j;
1620 	struct ol_tx_delay_data *data;
1621 
1622 	qdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
1623 
1624 	if (!pdev) {
1625 		ol_txrx_err("pdev is NULL");
1626 		return;
1627 	}
1628 
1629 	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
1630 	index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
1631 
1632 	data = &pdev->tx_delay.cats[category].copies[index];
1633 
1634 	for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
1635 		uint16_t internal_bin_sum = 0;
1636 
1637 		while (j < (1 << i))
1638 			internal_bin_sum += data->hist_bins_queue[j++];
1639 
1640 		report_bin_values[i] = internal_bin_sum;
1641 	}
1642 	report_bin_values[i] = data->hist_bins_queue[j];        /* overflow */
1643 
1644 	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
1645 }
1646 
1647 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
1648 static uint8_t
ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t msdu,struct ol_tx_desc_t * tx_desc)1649 ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
1650 			    qdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
1651 {
1652 	uint16_t ethertype;
1653 	uint8_t *dest_addr, *l3_hdr;
1654 	int is_mgmt, is_mcast;
1655 	int l2_hdr_size;
1656 
1657 	dest_addr = ol_tx_dest_addr_find(pdev, msdu);
1658 	if (!dest_addr)
1659 		return QDF_NBUF_TX_EXT_TID_INVALID;
1660 
1661 	is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
1662 	is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
1663 	if (is_mgmt) {
1664 		return (is_mcast) ?
1665 		       OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
1666 		       HTT_TX_EXT_TID_MGMT;
1667 	}
1668 	if (is_mcast)
1669 		return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
1670 
1671 	if (pdev->frame_format == wlan_frm_fmt_802_3) {
1672 		struct ethernet_hdr_t *enet_hdr;
1673 
1674 		enet_hdr = (struct ethernet_hdr_t *)qdf_nbuf_data(msdu);
1675 		l2_hdr_size = sizeof(struct ethernet_hdr_t);
1676 		ethertype =
1677 			(enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
1678 		if (!IS_ETHERTYPE(ethertype)) {
1679 			struct llc_snap_hdr_t *llc_hdr;
1680 
1681 			llc_hdr = (struct llc_snap_hdr_t *)
1682 				  (qdf_nbuf_data(msdu) + l2_hdr_size);
1683 			l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1684 			ethertype =
1685 				(llc_hdr->ethertype[0] << 8) | llc_hdr->
1686 				ethertype[1];
1687 		}
1688 	} else {
1689 		struct llc_snap_hdr_t *llc_hdr;
1690 
1691 		l2_hdr_size = sizeof(struct ieee80211_frame);
1692 		llc_hdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(msdu)
1693 						    + l2_hdr_size);
1694 		l2_hdr_size += sizeof(struct llc_snap_hdr_t);
1695 		ethertype =
1696 			(llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
1697 	}
1698 	l3_hdr = qdf_nbuf_data(msdu) + l2_hdr_size;
1699 	if (ETHERTYPE_IPV4 == ethertype) {
1700 		return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
1701 	} else if (ETHERTYPE_IPV6 == ethertype) {
1702 		return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
1703 		       0x7;
1704 	} else {
1705 		return QDF_NBUF_TX_EXT_TID_INVALID;
1706 	}
1707 }
1708 
ol_tx_delay_category(struct ol_txrx_pdev_t * pdev,uint16_t msdu_id)1709 static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1710 {
1711 	struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
1712 	uint8_t tid;
1713 	qdf_nbuf_t msdu = tx_desc->netbuf;
1714 
1715 	tid = qdf_nbuf_get_tid(msdu);
1716 	if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
1717 		tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
1718 		if (tid == QDF_NBUF_TX_EXT_TID_INVALID) {
1719 			/*
1720 			 * TID could not be determined
1721 			 * (this is not an IP frame?)
1722 			 */
1723 			return -EINVAL;
1724 		}
1725 	}
1726 	return tid;
1727 }
1728 #else
ol_tx_delay_category(struct ol_txrx_pdev_t * pdev,uint16_t msdu_id)1729 static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
1730 {
1731 	return 0;
1732 }
1733 #endif
1734 
1735 static inline int
ol_tx_delay_hist_bin(struct ol_txrx_pdev_t * pdev,uint32_t delay_ticks)1736 ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
1737 {
1738 	int bin;
1739 	/*
1740 	 * For speed, multiply and shift to approximate a divide. This causes
1741 	 * a small error, but the approximation error should be much less
1742 	 * than the other uncertainties in the tx delay computation.
1743 	 */
1744 	bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
1745 	      pdev->tx_delay.hist_internal_bin_width_shift;
1746 	if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
1747 		bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
1748 
1749 	return bin;
1750 }
1751 
1752 static void
ol_tx_delay_compute(struct ol_txrx_pdev_t * pdev,enum htt_tx_status status,uint16_t * desc_ids,int num_msdus)1753 ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
1754 		    enum htt_tx_status status,
1755 		    uint16_t *desc_ids, int num_msdus)
1756 {
1757 	int i, index, cat;
1758 	uint32_t now_ticks = qdf_system_ticks();
1759 	uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
1760 	uint32_t avg_time_ticks;
1761 	struct ol_tx_delay_data *data;
1762 
1763 	qdf_assert(num_msdus > 0);
1764 
1765 	/*
1766 	 * keep static counters for total packet and lost packets
1767 	 * reset them in ol_tx_delay(), function used to fetch the stats
1768 	 */
1769 
1770 	cat = ol_tx_delay_category(pdev, desc_ids[0]);
1771 	if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1772 		return;
1773 
1774 	pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
1775 	if (status != htt_tx_status_ok) {
1776 		for (i = 0; i < num_msdus; i++) {
1777 			cat = ol_tx_delay_category(pdev, desc_ids[i]);
1778 			if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
1779 				return;
1780 			pdev->packet_loss_count[cat]++;
1781 		}
1782 		return;
1783 	}
1784 
1785 	/* since we may switch the ping-pong index, provide mutex w. readers */
1786 	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
1787 	index = pdev->tx_delay.cats[cat].in_progress_idx;
1788 
1789 	data = &pdev->tx_delay.cats[cat].copies[index];
1790 
1791 	if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
1792 		tx_delay_transmit_ticks =
1793 			now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
1794 		/*
1795 		 * We'd like to account for the number of MSDUs that were
1796 		 * transmitted together, but we don't know this.  All we know
1797 		 * is the number of MSDUs that were acked together.
1798 		 * Since the frame error rate is small, this is nearly the same
1799 		 * as the number of frames transmitted together.
1800 		 */
1801 		data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
1802 		data->avgs.transmit_num += num_msdus;
1803 	}
1804 	pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
1805 
1806 	for (i = 0; i < num_msdus; i++) {
1807 		int bin;
1808 		uint16_t id = desc_ids[i];
1809 		struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
1810 
1811 		tx_delay_queue_ticks =
1812 			now_ticks - tx_desc->entry_timestamp_ticks;
1813 
1814 		data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
1815 		data->avgs.queue_num++;
1816 		bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
1817 		data->hist_bins_queue[bin]++;
1818 	}
1819 
1820 	/* check if it's time to start a new average */
1821 	avg_time_ticks =
1822 		now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
1823 	if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
1824 		pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
1825 		index = 1 - index;
1826 		pdev->tx_delay.cats[cat].in_progress_idx = index;
1827 		qdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
1828 			     sizeof(pdev->tx_delay.cats[cat].copies[index]));
1829 	}
1830 
1831 	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
1832 }
1833 
1834 #endif /* QCA_COMPUTE_TX_DELAY */
1835 
1836 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)1837 void ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)
1838 {
1839 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1840 	ol_txrx_pdev_handle pdev;
1841 
1842 	if (qdf_unlikely(!soc))
1843 		return;
1844 
1845 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1846 	if (!pdev) {
1847 		ol_txrx_err("pdev is NULL");
1848 		return;
1849 	}
1850 	pdev->ol_tx_timestamp_cb = ol_tx_timestamp_cb;
1851 }
1852 
ol_deregister_timestamp_callback(void)1853 void ol_deregister_timestamp_callback(void)
1854 {
1855 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
1856 	ol_txrx_pdev_handle pdev;
1857 
1858 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
1859 	if (!pdev) {
1860 		ol_txrx_err("pdev is NULL");
1861 		return;
1862 	}
1863 	pdev->ol_tx_timestamp_cb = NULL;
1864 }
1865 #endif
1866