xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "htt.h"
21*5113495bSYour Name #include "dp_htt.h"
22*5113495bSYour Name #include "hal_hw_headers.h"
23*5113495bSYour Name #include "dp_tx.h"
24*5113495bSYour Name #include "dp_tx_desc.h"
25*5113495bSYour Name #include "dp_peer.h"
26*5113495bSYour Name #include "dp_types.h"
27*5113495bSYour Name #include "hal_tx.h"
28*5113495bSYour Name #include "qdf_mem.h"
29*5113495bSYour Name #include "qdf_nbuf.h"
30*5113495bSYour Name #include "qdf_net_types.h"
31*5113495bSYour Name #include "qdf_module.h"
32*5113495bSYour Name #include <wlan_cfg.h>
33*5113495bSYour Name #include "dp_ipa.h"
34*5113495bSYour Name #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
35*5113495bSYour Name #include "if_meta_hdr.h"
36*5113495bSYour Name #endif
37*5113495bSYour Name #include "enet.h"
38*5113495bSYour Name #include "dp_internal.h"
39*5113495bSYour Name #ifdef ATH_SUPPORT_IQUE
40*5113495bSYour Name #include "dp_txrx_me.h"
41*5113495bSYour Name #endif
42*5113495bSYour Name #include "dp_hist.h"
43*5113495bSYour Name #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
44*5113495bSYour Name #include <wlan_dp_swlm.h>
45*5113495bSYour Name #endif
46*5113495bSYour Name #ifdef WIFI_MONITOR_SUPPORT
47*5113495bSYour Name #include <dp_mon.h>
48*5113495bSYour Name #endif
49*5113495bSYour Name #ifdef FEATURE_WDS
50*5113495bSYour Name #include "dp_txrx_wds.h"
51*5113495bSYour Name #endif
52*5113495bSYour Name #include "cdp_txrx_cmn_reg.h"
53*5113495bSYour Name #ifdef CONFIG_SAWF
54*5113495bSYour Name #include <dp_sawf.h>
55*5113495bSYour Name #endif
56*5113495bSYour Name 
57*5113495bSYour Name /* Flag to skip CCE classify when mesh or tid override enabled */
58*5113495bSYour Name #define DP_TX_SKIP_CCE_CLASSIFY \
59*5113495bSYour Name 	(DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
60*5113495bSYour Name 
61*5113495bSYour Name /* TODO Add support in TSO */
62*5113495bSYour Name #define DP_DESC_NUM_FRAG(x) 0
63*5113495bSYour Name 
64*5113495bSYour Name /* disable TQM_BYPASS */
65*5113495bSYour Name #define TQM_BYPASS_WAR 0
66*5113495bSYour Name 
67*5113495bSYour Name #define DP_RETRY_COUNT 7
68*5113495bSYour Name #ifdef WLAN_PEER_JITTER
69*5113495bSYour Name #define DP_AVG_JITTER_WEIGHT_DENOM 4
70*5113495bSYour Name #define DP_AVG_DELAY_WEIGHT_DENOM 3
71*5113495bSYour Name #endif
72*5113495bSYour Name 
73*5113495bSYour Name #ifdef QCA_DP_TX_FW_METADATA_V2
74*5113495bSYour Name #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
75*5113495bSYour Name 	HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
76*5113495bSYour Name #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
77*5113495bSYour Name 	HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
78*5113495bSYour Name #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
79*5113495bSYour Name 	HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
80*5113495bSYour Name #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
81*5113495bSYour Name 	HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
82*5113495bSYour Name #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
83*5113495bSYour Name 	 HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
84*5113495bSYour Name #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
85*5113495bSYour Name 	HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
86*5113495bSYour Name #define DP_TCL_METADATA_TYPE_PEER_BASED \
87*5113495bSYour Name 	HTT_TCL_METADATA_V2_TYPE_PEER_BASED
88*5113495bSYour Name #define DP_TCL_METADATA_TYPE_VDEV_BASED \
89*5113495bSYour Name 	HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
90*5113495bSYour Name #else
91*5113495bSYour Name #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
92*5113495bSYour Name 	HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
93*5113495bSYour Name #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
94*5113495bSYour Name 	HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
95*5113495bSYour Name #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
96*5113495bSYour Name 	HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
97*5113495bSYour Name #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
98*5113495bSYour Name 	HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
99*5113495bSYour Name #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
100*5113495bSYour Name 	HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
101*5113495bSYour Name #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
102*5113495bSYour Name 	HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
103*5113495bSYour Name #define DP_TCL_METADATA_TYPE_PEER_BASED \
104*5113495bSYour Name 	HTT_TCL_METADATA_TYPE_PEER_BASED
105*5113495bSYour Name #define DP_TCL_METADATA_TYPE_VDEV_BASED \
106*5113495bSYour Name 	HTT_TCL_METADATA_TYPE_VDEV_BASED
107*5113495bSYour Name #endif
108*5113495bSYour Name 
109*5113495bSYour Name #define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
110*5113495bSYour Name 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
111*5113495bSYour Name 
112*5113495bSYour Name /*mapping between hal encrypt type and cdp_sec_type*/
113*5113495bSYour Name uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
114*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_WEP_128,
115*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_WEP_104,
116*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_WEP_40,
117*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
118*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
119*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
120*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_WAPI,
121*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
122*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
123*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
124*5113495bSYour Name 					  HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
125*5113495bSYour Name qdf_export_symbol(sec_type_map);
126*5113495bSYour Name 
127*5113495bSYour Name #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
dp_tx_get_event_type(uint32_t flags)128*5113495bSYour Name static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
129*5113495bSYour Name {
130*5113495bSYour Name 	enum dp_tx_event_type type;
131*5113495bSYour Name 
132*5113495bSYour Name 	if (flags & DP_TX_DESC_FLAG_FLUSH)
133*5113495bSYour Name 		type = DP_TX_DESC_FLUSH;
134*5113495bSYour Name 	else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
135*5113495bSYour Name 		type = DP_TX_COMP_UNMAP_ERR;
136*5113495bSYour Name 	else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
137*5113495bSYour Name 		type = DP_TX_COMP_UNMAP;
138*5113495bSYour Name 	else
139*5113495bSYour Name 		type = DP_TX_DESC_UNMAP;
140*5113495bSYour Name 
141*5113495bSYour Name 	return type;
142*5113495bSYour Name }
143*5113495bSYour Name 
144*5113495bSYour Name static inline void
dp_tx_desc_history_add(struct dp_soc * soc,dma_addr_t paddr,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)145*5113495bSYour Name dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
146*5113495bSYour Name 		       qdf_nbuf_t skb, uint32_t sw_cookie,
147*5113495bSYour Name 		       enum dp_tx_event_type type)
148*5113495bSYour Name {
149*5113495bSYour Name 	struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
150*5113495bSYour Name 	struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
151*5113495bSYour Name 	struct dp_tx_desc_event *entry;
152*5113495bSYour Name 	uint32_t idx;
153*5113495bSYour Name 	uint16_t slot;
154*5113495bSYour Name 
155*5113495bSYour Name 	switch (type) {
156*5113495bSYour Name 	case DP_TX_COMP_UNMAP:
157*5113495bSYour Name 	case DP_TX_COMP_UNMAP_ERR:
158*5113495bSYour Name 	case DP_TX_COMP_MSDU_EXT:
159*5113495bSYour Name 		if (qdf_unlikely(!tx_comp_history->allocated))
160*5113495bSYour Name 			return;
161*5113495bSYour Name 
162*5113495bSYour Name 		dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
163*5113495bSYour Name 						 &slot,
164*5113495bSYour Name 						 DP_TX_COMP_HIST_SLOT_SHIFT,
165*5113495bSYour Name 						 DP_TX_COMP_HIST_PER_SLOT_MAX,
166*5113495bSYour Name 						 DP_TX_COMP_HISTORY_SIZE);
167*5113495bSYour Name 		entry = &tx_comp_history->entry[slot][idx];
168*5113495bSYour Name 		break;
169*5113495bSYour Name 	case DP_TX_DESC_MAP:
170*5113495bSYour Name 	case DP_TX_DESC_UNMAP:
171*5113495bSYour Name 	case DP_TX_DESC_COOKIE:
172*5113495bSYour Name 	case DP_TX_DESC_FLUSH:
173*5113495bSYour Name 		if (qdf_unlikely(!tx_tcl_history->allocated))
174*5113495bSYour Name 			return;
175*5113495bSYour Name 
176*5113495bSYour Name 		dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
177*5113495bSYour Name 						 &slot,
178*5113495bSYour Name 						 DP_TX_TCL_HIST_SLOT_SHIFT,
179*5113495bSYour Name 						 DP_TX_TCL_HIST_PER_SLOT_MAX,
180*5113495bSYour Name 						 DP_TX_TCL_HISTORY_SIZE);
181*5113495bSYour Name 		entry = &tx_tcl_history->entry[slot][idx];
182*5113495bSYour Name 		break;
183*5113495bSYour Name 	default:
184*5113495bSYour Name 		dp_info_rl("Invalid dp_tx_event_type: %d", type);
185*5113495bSYour Name 		return;
186*5113495bSYour Name 	}
187*5113495bSYour Name 
188*5113495bSYour Name 	entry->skb = skb;
189*5113495bSYour Name 	entry->paddr = paddr;
190*5113495bSYour Name 	entry->sw_cookie = sw_cookie;
191*5113495bSYour Name 	entry->type = type;
192*5113495bSYour Name 	entry->ts = qdf_get_log_timestamp();
193*5113495bSYour Name }
194*5113495bSYour Name 
195*5113495bSYour Name static inline void
dp_tx_tso_seg_history_add(struct dp_soc * soc,struct qdf_tso_seg_elem_t * tso_seg,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)196*5113495bSYour Name dp_tx_tso_seg_history_add(struct dp_soc *soc,
197*5113495bSYour Name 			  struct qdf_tso_seg_elem_t *tso_seg,
198*5113495bSYour Name 			  qdf_nbuf_t skb, uint32_t sw_cookie,
199*5113495bSYour Name 			  enum dp_tx_event_type type)
200*5113495bSYour Name {
201*5113495bSYour Name 	int i;
202*5113495bSYour Name 
203*5113495bSYour Name 	for (i = 1; i < tso_seg->seg.num_frags; i++) {
204*5113495bSYour Name 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
205*5113495bSYour Name 				       skb, sw_cookie, type);
206*5113495bSYour Name 	}
207*5113495bSYour Name 
208*5113495bSYour Name 	if (!tso_seg->next)
209*5113495bSYour Name 		dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
210*5113495bSYour Name 				       skb, 0xFFFFFFFF, type);
211*5113495bSYour Name }
212*5113495bSYour Name 
213*5113495bSYour Name static inline void
dp_tx_tso_history_add(struct dp_soc * soc,struct qdf_tso_info_t tso_info,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)214*5113495bSYour Name dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
215*5113495bSYour Name 		      qdf_nbuf_t skb, uint32_t sw_cookie,
216*5113495bSYour Name 		      enum dp_tx_event_type type)
217*5113495bSYour Name {
218*5113495bSYour Name 	struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
219*5113495bSYour Name 	uint32_t num_segs = tso_info.num_segs;
220*5113495bSYour Name 
221*5113495bSYour Name 	while (num_segs) {
222*5113495bSYour Name 		dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
223*5113495bSYour Name 		curr_seg = curr_seg->next;
224*5113495bSYour Name 		num_segs--;
225*5113495bSYour Name 	}
226*5113495bSYour Name }
227*5113495bSYour Name 
228*5113495bSYour Name #else
dp_tx_get_event_type(uint32_t flags)229*5113495bSYour Name static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
230*5113495bSYour Name {
231*5113495bSYour Name 	return DP_TX_DESC_INVAL_EVT;
232*5113495bSYour Name }
233*5113495bSYour Name 
234*5113495bSYour Name static inline void
dp_tx_desc_history_add(struct dp_soc * soc,dma_addr_t paddr,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)235*5113495bSYour Name dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
236*5113495bSYour Name 		       qdf_nbuf_t skb, uint32_t sw_cookie,
237*5113495bSYour Name 		       enum dp_tx_event_type type)
238*5113495bSYour Name {
239*5113495bSYour Name }
240*5113495bSYour Name 
241*5113495bSYour Name static inline void
dp_tx_tso_seg_history_add(struct dp_soc * soc,struct qdf_tso_seg_elem_t * tso_seg,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)242*5113495bSYour Name dp_tx_tso_seg_history_add(struct dp_soc *soc,
243*5113495bSYour Name 			  struct qdf_tso_seg_elem_t *tso_seg,
244*5113495bSYour Name 			  qdf_nbuf_t skb, uint32_t sw_cookie,
245*5113495bSYour Name 			  enum dp_tx_event_type type)
246*5113495bSYour Name {
247*5113495bSYour Name }
248*5113495bSYour Name 
249*5113495bSYour Name static inline void
dp_tx_tso_history_add(struct dp_soc * soc,struct qdf_tso_info_t tso_info,qdf_nbuf_t skb,uint32_t sw_cookie,enum dp_tx_event_type type)250*5113495bSYour Name dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
251*5113495bSYour Name 		      qdf_nbuf_t skb, uint32_t sw_cookie,
252*5113495bSYour Name 		      enum dp_tx_event_type type)
253*5113495bSYour Name {
254*5113495bSYour Name }
255*5113495bSYour Name #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
256*5113495bSYour Name 
257*5113495bSYour Name /**
258*5113495bSYour Name  * dp_is_tput_high() - Check if throughput is high
259*5113495bSYour Name  *
260*5113495bSYour Name  * @soc: core txrx main context
261*5113495bSYour Name  *
262*5113495bSYour Name  * The current function is based of the RTPM tput policy variable where RTPM is
263*5113495bSYour Name  * avoided based on throughput.
264*5113495bSYour Name  */
dp_is_tput_high(struct dp_soc * soc)265*5113495bSYour Name static inline int dp_is_tput_high(struct dp_soc *soc)
266*5113495bSYour Name {
267*5113495bSYour Name 	return dp_get_rtpm_tput_policy_requirement(soc);
268*5113495bSYour Name }
269*5113495bSYour Name 
270*5113495bSYour Name #if defined(FEATURE_TSO)
271*5113495bSYour Name /**
272*5113495bSYour Name  * dp_tx_tso_unmap_segment() - Unmap TSO segment
273*5113495bSYour Name  *
274*5113495bSYour Name  * @soc: core txrx main context
275*5113495bSYour Name  * @seg_desc: tso segment descriptor
276*5113495bSYour Name  * @num_seg_desc: tso number segment descriptor
277*5113495bSYour Name  */
dp_tx_tso_unmap_segment(struct dp_soc * soc,struct qdf_tso_seg_elem_t * seg_desc,struct qdf_tso_num_seg_elem_t * num_seg_desc)278*5113495bSYour Name static void dp_tx_tso_unmap_segment(
279*5113495bSYour Name 		struct dp_soc *soc,
280*5113495bSYour Name 		struct qdf_tso_seg_elem_t *seg_desc,
281*5113495bSYour Name 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
282*5113495bSYour Name {
283*5113495bSYour Name 	TSO_DEBUG("%s: Unmap the tso segment", __func__);
284*5113495bSYour Name 	if (qdf_unlikely(!seg_desc)) {
285*5113495bSYour Name 		DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
286*5113495bSYour Name 			 __func__, __LINE__);
287*5113495bSYour Name 		qdf_assert(0);
288*5113495bSYour Name 	} else if (qdf_unlikely(!num_seg_desc)) {
289*5113495bSYour Name 		DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
290*5113495bSYour Name 			 __func__, __LINE__);
291*5113495bSYour Name 		qdf_assert(0);
292*5113495bSYour Name 	} else {
293*5113495bSYour Name 		bool is_last_seg;
294*5113495bSYour Name 		/* no tso segment left to do dma unmap */
295*5113495bSYour Name 		if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
296*5113495bSYour Name 			return;
297*5113495bSYour Name 
298*5113495bSYour Name 		is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
299*5113495bSYour Name 					true : false;
300*5113495bSYour Name 		qdf_nbuf_unmap_tso_segment(soc->osdev,
301*5113495bSYour Name 					   seg_desc, is_last_seg);
302*5113495bSYour Name 		num_seg_desc->num_seg.tso_cmn_num_seg--;
303*5113495bSYour Name 	}
304*5113495bSYour Name }
305*5113495bSYour Name 
306*5113495bSYour Name /**
307*5113495bSYour Name  * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
308*5113495bSYour Name  *                            back to the freelist
309*5113495bSYour Name  *
310*5113495bSYour Name  * @soc: soc device handle
311*5113495bSYour Name  * @tx_desc: Tx software descriptor
312*5113495bSYour Name  */
dp_tx_tso_desc_release(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)313*5113495bSYour Name static void dp_tx_tso_desc_release(struct dp_soc *soc,
314*5113495bSYour Name 				   struct dp_tx_desc_s *tx_desc)
315*5113495bSYour Name {
316*5113495bSYour Name 	TSO_DEBUG("%s: Free the tso descriptor", __func__);
317*5113495bSYour Name 	if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
318*5113495bSYour Name 		dp_tx_err("SO desc is NULL!");
319*5113495bSYour Name 		qdf_assert(0);
320*5113495bSYour Name 	} else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
321*5113495bSYour Name 		dp_tx_err("TSO num desc is NULL!");
322*5113495bSYour Name 		qdf_assert(0);
323*5113495bSYour Name 	} else {
324*5113495bSYour Name 		struct qdf_tso_num_seg_elem_t *tso_num_desc =
325*5113495bSYour Name 			(struct qdf_tso_num_seg_elem_t *)tx_desc->
326*5113495bSYour Name 				msdu_ext_desc->tso_num_desc;
327*5113495bSYour Name 
328*5113495bSYour Name 		/* Add the tso num segment into the free list */
329*5113495bSYour Name 		if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
330*5113495bSYour Name 			dp_tso_num_seg_free(soc, tx_desc->pool_id,
331*5113495bSYour Name 					    tx_desc->msdu_ext_desc->
332*5113495bSYour Name 					    tso_num_desc);
333*5113495bSYour Name 			tx_desc->msdu_ext_desc->tso_num_desc = NULL;
334*5113495bSYour Name 			DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
335*5113495bSYour Name 		}
336*5113495bSYour Name 
337*5113495bSYour Name 		/* Add the tso segment into the free list*/
338*5113495bSYour Name 		dp_tx_tso_desc_free(soc,
339*5113495bSYour Name 				    tx_desc->pool_id, tx_desc->msdu_ext_desc->
340*5113495bSYour Name 				    tso_desc);
341*5113495bSYour Name 		tx_desc->msdu_ext_desc->tso_desc = NULL;
342*5113495bSYour Name 	}
343*5113495bSYour Name }
344*5113495bSYour Name #else
dp_tx_tso_unmap_segment(struct dp_soc * soc,struct qdf_tso_seg_elem_t * seg_desc,struct qdf_tso_num_seg_elem_t * num_seg_desc)345*5113495bSYour Name static void dp_tx_tso_unmap_segment(
346*5113495bSYour Name 		struct dp_soc *soc,
347*5113495bSYour Name 		struct qdf_tso_seg_elem_t *seg_desc,
348*5113495bSYour Name 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
349*5113495bSYour Name 
350*5113495bSYour Name {
351*5113495bSYour Name }
352*5113495bSYour Name 
dp_tx_tso_desc_release(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)353*5113495bSYour Name static void dp_tx_tso_desc_release(struct dp_soc *soc,
354*5113495bSYour Name 				   struct dp_tx_desc_s *tx_desc)
355*5113495bSYour Name {
356*5113495bSYour Name }
357*5113495bSYour Name #endif
358*5113495bSYour Name 
359*5113495bSYour Name #ifdef WLAN_SUPPORT_PPEDS
360*5113495bSYour Name static inline int
dp_tx_release_ds_tx_desc(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)361*5113495bSYour Name dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
362*5113495bSYour Name 			 uint8_t desc_pool_id)
363*5113495bSYour Name {
364*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) {
365*5113495bSYour Name 		__dp_tx_outstanding_dec(soc);
366*5113495bSYour Name 		dp_tx_desc_free(soc, tx_desc, desc_pool_id);
367*5113495bSYour Name 
368*5113495bSYour Name 		return 1;
369*5113495bSYour Name 	}
370*5113495bSYour Name 
371*5113495bSYour Name 	return 0;
372*5113495bSYour Name }
373*5113495bSYour Name #else
374*5113495bSYour Name static inline int
dp_tx_release_ds_tx_desc(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)375*5113495bSYour Name dp_tx_release_ds_tx_desc(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
376*5113495bSYour Name 			 uint8_t desc_pool_id)
377*5113495bSYour Name {
378*5113495bSYour Name 	return 0;
379*5113495bSYour Name }
380*5113495bSYour Name #endif
381*5113495bSYour Name 
382*5113495bSYour Name void
dp_tx_desc_release(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t desc_pool_id)383*5113495bSYour Name dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
384*5113495bSYour Name 		   uint8_t desc_pool_id)
385*5113495bSYour Name {
386*5113495bSYour Name 	struct dp_pdev *pdev = tx_desc->pdev;
387*5113495bSYour Name 	uint8_t comp_status = 0;
388*5113495bSYour Name 
389*5113495bSYour Name 	if (dp_tx_release_ds_tx_desc(soc, tx_desc, desc_pool_id))
390*5113495bSYour Name 		return;
391*5113495bSYour Name 
392*5113495bSYour Name 	qdf_assert(pdev);
393*5113495bSYour Name 
394*5113495bSYour Name 	soc = pdev->soc;
395*5113495bSYour Name 
396*5113495bSYour Name 	dp_tx_outstanding_dec(pdev);
397*5113495bSYour Name 
398*5113495bSYour Name 	if (tx_desc->msdu_ext_desc) {
399*5113495bSYour Name 		if (tx_desc->frm_type == dp_tx_frm_tso)
400*5113495bSYour Name 			dp_tx_tso_desc_release(soc, tx_desc);
401*5113495bSYour Name 
402*5113495bSYour Name 		if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
403*5113495bSYour Name 			dp_tx_me_free_buf(tx_desc->pdev,
404*5113495bSYour Name 					  tx_desc->msdu_ext_desc->me_buffer);
405*5113495bSYour Name 
406*5113495bSYour Name 		dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
407*5113495bSYour Name 
408*5113495bSYour Name 		tx_desc->msdu_ext_desc = NULL;
409*5113495bSYour Name 	}
410*5113495bSYour Name 
411*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
412*5113495bSYour Name 		qdf_atomic_dec(&soc->num_tx_exception);
413*5113495bSYour Name 
414*5113495bSYour Name 	if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
415*5113495bSYour Name 				tx_desc->buffer_src)
416*5113495bSYour Name 		comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
417*5113495bSYour Name 							     soc->hal_soc);
418*5113495bSYour Name 	else
419*5113495bSYour Name 		comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
420*5113495bSYour Name 	if (soc->dp_debug_log_en) {
421*5113495bSYour Name 		dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
422*5113495bSYour Name 			    tx_desc->id, comp_status,
423*5113495bSYour Name 			    qdf_atomic_read(&pdev->num_tx_outstanding));
424*5113495bSYour Name 	}
425*5113495bSYour Name 
426*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_SPECIAL)
427*5113495bSYour Name 		dp_tx_spcl_desc_free(soc, tx_desc, desc_pool_id);
428*5113495bSYour Name 	else
429*5113495bSYour Name 		dp_tx_desc_free(soc, tx_desc, desc_pool_id);
430*5113495bSYour Name 	return;
431*5113495bSYour Name }
432*5113495bSYour Name 
433*5113495bSYour Name /**
434*5113495bSYour Name  * dp_tx_prepare_htt_metadata() - Prepare HTT metadata for special frames
435*5113495bSYour Name  * @vdev: DP vdev Handle
436*5113495bSYour Name  * @nbuf: skb
437*5113495bSYour Name  * @msdu_info: msdu_info required to create HTT metadata
438*5113495bSYour Name  *
439*5113495bSYour Name  * Prepares and fills HTT metadata in the frame pre-header for special frames
440*5113495bSYour Name  * that should be transmitted using varying transmit parameters.
441*5113495bSYour Name  * There are 2 VDEV modes that currently needs this special metadata -
442*5113495bSYour Name  *  1) Mesh Mode
443*5113495bSYour Name  *  2) DSRC Mode
444*5113495bSYour Name  *
445*5113495bSYour Name  * Return: HTT metadata size
446*5113495bSYour Name  *
447*5113495bSYour Name  */
dp_tx_prepare_htt_metadata(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)448*5113495bSYour Name static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
449*5113495bSYour Name 					  struct dp_tx_msdu_info_s *msdu_info)
450*5113495bSYour Name {
451*5113495bSYour Name 	uint32_t *meta_data = msdu_info->meta_data;
452*5113495bSYour Name 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
453*5113495bSYour Name 				(struct htt_tx_msdu_desc_ext2_t *) meta_data;
454*5113495bSYour Name 
455*5113495bSYour Name 	uint8_t htt_desc_size;
456*5113495bSYour Name 
457*5113495bSYour Name 	/* Size rounded of multiple of 8 bytes */
458*5113495bSYour Name 	uint8_t htt_desc_size_aligned;
459*5113495bSYour Name 
460*5113495bSYour Name 	uint8_t *hdr = NULL;
461*5113495bSYour Name 
462*5113495bSYour Name 	/*
463*5113495bSYour Name 	 * Metadata - HTT MSDU Extension header
464*5113495bSYour Name 	 */
465*5113495bSYour Name 	htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
466*5113495bSYour Name 	htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
467*5113495bSYour Name 
468*5113495bSYour Name 	if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
469*5113495bSYour Name 	    HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
470*5113495bSYour Name 							   meta_data[0]) ||
471*5113495bSYour Name 	    msdu_info->exception_fw) {
472*5113495bSYour Name 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
473*5113495bSYour Name 				 htt_desc_size_aligned)) {
474*5113495bSYour Name 			nbuf = qdf_nbuf_realloc_headroom(nbuf,
475*5113495bSYour Name 							 htt_desc_size_aligned);
476*5113495bSYour Name 			if (!nbuf) {
477*5113495bSYour Name 				/*
478*5113495bSYour Name 				 * qdf_nbuf_realloc_headroom won't do skb_clone
479*5113495bSYour Name 				 * as skb_realloc_headroom does. so, no free is
480*5113495bSYour Name 				 * needed here.
481*5113495bSYour Name 				 */
482*5113495bSYour Name 				DP_STATS_INC(vdev,
483*5113495bSYour Name 					     tx_i[msdu_info->xmit_type].dropped.headroom_insufficient,
484*5113495bSYour Name 					     1);
485*5113495bSYour Name 				qdf_print(" %s[%d] skb_realloc_headroom failed",
486*5113495bSYour Name 					  __func__, __LINE__);
487*5113495bSYour Name 				return 0;
488*5113495bSYour Name 			}
489*5113495bSYour Name 		}
490*5113495bSYour Name 		/* Fill and add HTT metaheader */
491*5113495bSYour Name 		hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
492*5113495bSYour Name 		if (!hdr) {
493*5113495bSYour Name 			dp_tx_err("Error in filling HTT metadata");
494*5113495bSYour Name 
495*5113495bSYour Name 			return 0;
496*5113495bSYour Name 		}
497*5113495bSYour Name 		qdf_mem_copy(hdr, desc_ext, htt_desc_size);
498*5113495bSYour Name 
499*5113495bSYour Name 	} else if (vdev->opmode == wlan_op_mode_ocb) {
500*5113495bSYour Name 		/* Todo - Add support for DSRC */
501*5113495bSYour Name 	}
502*5113495bSYour Name 
503*5113495bSYour Name 	return htt_desc_size_aligned;
504*5113495bSYour Name }
505*5113495bSYour Name 
506*5113495bSYour Name /**
507*5113495bSYour Name  * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
508*5113495bSYour Name  * @tso_seg: TSO segment to process
509*5113495bSYour Name  * @ext_desc: Pointer to MSDU extension descriptor
510*5113495bSYour Name  *
511*5113495bSYour Name  * Return: void
512*5113495bSYour Name  */
513*5113495bSYour Name #if defined(FEATURE_TSO)
dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t * tso_seg,void * ext_desc)514*5113495bSYour Name static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
515*5113495bSYour Name 		void *ext_desc)
516*5113495bSYour Name {
517*5113495bSYour Name 	uint8_t num_frag;
518*5113495bSYour Name 	uint32_t tso_flags;
519*5113495bSYour Name 
520*5113495bSYour Name 	/*
521*5113495bSYour Name 	 * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
522*5113495bSYour Name 	 * tcp_flag_mask
523*5113495bSYour Name 	 *
524*5113495bSYour Name 	 * Checksum enable flags are set in TCL descriptor and not in Extension
525*5113495bSYour Name 	 * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
526*5113495bSYour Name 	 */
527*5113495bSYour Name 	tso_flags = *(uint32_t *) &tso_seg->tso_flags;
528*5113495bSYour Name 
529*5113495bSYour Name 	hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
530*5113495bSYour Name 
531*5113495bSYour Name 	hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
532*5113495bSYour Name 		tso_seg->tso_flags.ip_len);
533*5113495bSYour Name 
534*5113495bSYour Name 	hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
535*5113495bSYour Name 	hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
536*5113495bSYour Name 
537*5113495bSYour Name 	for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
538*5113495bSYour Name 		uint32_t lo = 0;
539*5113495bSYour Name 		uint32_t hi = 0;
540*5113495bSYour Name 
541*5113495bSYour Name 		qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
542*5113495bSYour Name 				  (tso_seg->tso_frags[num_frag].length));
543*5113495bSYour Name 
544*5113495bSYour Name 		qdf_dmaaddr_to_32s(
545*5113495bSYour Name 			tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
546*5113495bSYour Name 		hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
547*5113495bSYour Name 			tso_seg->tso_frags[num_frag].length);
548*5113495bSYour Name 	}
549*5113495bSYour Name 
550*5113495bSYour Name 	return;
551*5113495bSYour Name }
552*5113495bSYour Name #else
dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t * tso_seg,void * ext_desc)553*5113495bSYour Name static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
554*5113495bSYour Name 		void *ext_desc)
555*5113495bSYour Name {
556*5113495bSYour Name 	return;
557*5113495bSYour Name }
558*5113495bSYour Name #endif
559*5113495bSYour Name 
560*5113495bSYour Name #if defined(FEATURE_TSO)
561*5113495bSYour Name /**
562*5113495bSYour Name  * dp_tx_free_tso_seg_list() - Loop through the tso segments
563*5113495bSYour Name  *                             allocated and free them
564*5113495bSYour Name  * @soc: soc handle
565*5113495bSYour Name  * @free_seg: list of tso segments
566*5113495bSYour Name  * @msdu_info: msdu descriptor
567*5113495bSYour Name  *
568*5113495bSYour Name  * Return: void
569*5113495bSYour Name  */
dp_tx_free_tso_seg_list(struct dp_soc * soc,struct qdf_tso_seg_elem_t * free_seg,struct dp_tx_msdu_info_s * msdu_info)570*5113495bSYour Name static void dp_tx_free_tso_seg_list(
571*5113495bSYour Name 		struct dp_soc *soc,
572*5113495bSYour Name 		struct qdf_tso_seg_elem_t *free_seg,
573*5113495bSYour Name 		struct dp_tx_msdu_info_s *msdu_info)
574*5113495bSYour Name {
575*5113495bSYour Name 	struct qdf_tso_seg_elem_t *next_seg;
576*5113495bSYour Name 
577*5113495bSYour Name 	while (free_seg) {
578*5113495bSYour Name 		next_seg = free_seg->next;
579*5113495bSYour Name 		dp_tx_tso_desc_free(soc,
580*5113495bSYour Name 				    msdu_info->tx_queue.desc_pool_id,
581*5113495bSYour Name 				    free_seg);
582*5113495bSYour Name 		free_seg = next_seg;
583*5113495bSYour Name 	}
584*5113495bSYour Name }
585*5113495bSYour Name 
586*5113495bSYour Name /**
587*5113495bSYour Name  * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
588*5113495bSYour Name  *                                 allocated and free them
589*5113495bSYour Name  * @soc:  soc handle
590*5113495bSYour Name  * @free_num_seg: list of tso number segments
591*5113495bSYour Name  * @msdu_info: msdu descriptor
592*5113495bSYour Name  *
593*5113495bSYour Name  * Return: void
594*5113495bSYour Name  */
dp_tx_free_tso_num_seg_list(struct dp_soc * soc,struct qdf_tso_num_seg_elem_t * free_num_seg,struct dp_tx_msdu_info_s * msdu_info)595*5113495bSYour Name static void dp_tx_free_tso_num_seg_list(
596*5113495bSYour Name 		struct dp_soc *soc,
597*5113495bSYour Name 		struct qdf_tso_num_seg_elem_t *free_num_seg,
598*5113495bSYour Name 		struct dp_tx_msdu_info_s *msdu_info)
599*5113495bSYour Name {
600*5113495bSYour Name 	struct qdf_tso_num_seg_elem_t *next_num_seg;
601*5113495bSYour Name 
602*5113495bSYour Name 	while (free_num_seg) {
603*5113495bSYour Name 		next_num_seg = free_num_seg->next;
604*5113495bSYour Name 		dp_tso_num_seg_free(soc,
605*5113495bSYour Name 				    msdu_info->tx_queue.desc_pool_id,
606*5113495bSYour Name 				    free_num_seg);
607*5113495bSYour Name 		free_num_seg = next_num_seg;
608*5113495bSYour Name 	}
609*5113495bSYour Name }
610*5113495bSYour Name 
611*5113495bSYour Name /**
612*5113495bSYour Name  * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
613*5113495bSYour Name  *                              do dma unmap for each segment
614*5113495bSYour Name  * @soc: soc handle
615*5113495bSYour Name  * @free_seg: list of tso segments
616*5113495bSYour Name  * @num_seg_desc: tso number segment descriptor
617*5113495bSYour Name  *
618*5113495bSYour Name  * Return: void
619*5113495bSYour Name  */
dp_tx_unmap_tso_seg_list(struct dp_soc * soc,struct qdf_tso_seg_elem_t * free_seg,struct qdf_tso_num_seg_elem_t * num_seg_desc)620*5113495bSYour Name static void dp_tx_unmap_tso_seg_list(
621*5113495bSYour Name 		struct dp_soc *soc,
622*5113495bSYour Name 		struct qdf_tso_seg_elem_t *free_seg,
623*5113495bSYour Name 		struct qdf_tso_num_seg_elem_t *num_seg_desc)
624*5113495bSYour Name {
625*5113495bSYour Name 	struct qdf_tso_seg_elem_t *next_seg;
626*5113495bSYour Name 
627*5113495bSYour Name 	if (qdf_unlikely(!num_seg_desc)) {
628*5113495bSYour Name 		DP_TRACE(ERROR, "TSO number seg desc is NULL!");
629*5113495bSYour Name 		return;
630*5113495bSYour Name 	}
631*5113495bSYour Name 
632*5113495bSYour Name 	while (free_seg) {
633*5113495bSYour Name 		next_seg = free_seg->next;
634*5113495bSYour Name 		dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
635*5113495bSYour Name 		free_seg = next_seg;
636*5113495bSYour Name 	}
637*5113495bSYour Name }
638*5113495bSYour Name 
639*5113495bSYour Name #ifdef FEATURE_TSO_STATS
640*5113495bSYour Name /**
641*5113495bSYour Name  * dp_tso_get_stats_idx() - Retrieve the tso packet id
642*5113495bSYour Name  * @pdev: pdev handle
643*5113495bSYour Name  *
644*5113495bSYour Name  * Return: id
645*5113495bSYour Name  */
dp_tso_get_stats_idx(struct dp_pdev * pdev)646*5113495bSYour Name static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
647*5113495bSYour Name {
648*5113495bSYour Name 	uint32_t stats_idx;
649*5113495bSYour Name 
650*5113495bSYour Name 	stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
651*5113495bSYour Name 						% CDP_MAX_TSO_PACKETS);
652*5113495bSYour Name 	return stats_idx;
653*5113495bSYour Name }
654*5113495bSYour Name #else
dp_tso_get_stats_idx(struct dp_pdev * pdev)655*5113495bSYour Name static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
656*5113495bSYour Name {
657*5113495bSYour Name 	return 0;
658*5113495bSYour Name }
659*5113495bSYour Name #endif /* FEATURE_TSO_STATS */
660*5113495bSYour Name 
661*5113495bSYour Name /**
662*5113495bSYour Name  * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
663*5113495bSYour Name  *				     free the tso segments descriptor and
664*5113495bSYour Name  *				     tso num segments descriptor
665*5113495bSYour Name  * @soc:  soc handle
666*5113495bSYour Name  * @msdu_info: msdu descriptor
667*5113495bSYour Name  * @tso_seg_unmap: flag to show if dma unmap is necessary
668*5113495bSYour Name  *
669*5113495bSYour Name  * Return: void
670*5113495bSYour Name  */
dp_tx_free_remaining_tso_desc(struct dp_soc * soc,struct dp_tx_msdu_info_s * msdu_info,bool tso_seg_unmap)671*5113495bSYour Name static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
672*5113495bSYour Name 					  struct dp_tx_msdu_info_s *msdu_info,
673*5113495bSYour Name 					  bool tso_seg_unmap)
674*5113495bSYour Name {
675*5113495bSYour Name 	struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
676*5113495bSYour Name 	struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
677*5113495bSYour Name 	struct qdf_tso_num_seg_elem_t *tso_num_desc =
678*5113495bSYour Name 					tso_info->tso_num_seg_list;
679*5113495bSYour Name 
680*5113495bSYour Name 	/* do dma unmap for each segment */
681*5113495bSYour Name 	if (tso_seg_unmap)
682*5113495bSYour Name 		dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
683*5113495bSYour Name 
684*5113495bSYour Name 	/* free all tso number segment descriptor though looks only have 1 */
685*5113495bSYour Name 	dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
686*5113495bSYour Name 
687*5113495bSYour Name 	/* free all tso segment descriptor */
688*5113495bSYour Name 	dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
689*5113495bSYour Name }
690*5113495bSYour Name 
691*5113495bSYour Name /**
692*5113495bSYour Name  * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
693*5113495bSYour Name  * @vdev: virtual device handle
694*5113495bSYour Name  * @msdu: network buffer
695*5113495bSYour Name  * @msdu_info: meta data associated with the msdu
696*5113495bSYour Name  *
697*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS success
698*5113495bSYour Name  */
dp_tx_prepare_tso(struct dp_vdev * vdev,qdf_nbuf_t msdu,struct dp_tx_msdu_info_s * msdu_info)699*5113495bSYour Name static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
700*5113495bSYour Name 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
701*5113495bSYour Name {
702*5113495bSYour Name 	struct qdf_tso_seg_elem_t *tso_seg;
703*5113495bSYour Name 	int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
704*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
705*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
706*5113495bSYour Name 	struct qdf_tso_info_t *tso_info;
707*5113495bSYour Name 	struct qdf_tso_num_seg_elem_t *tso_num_seg;
708*5113495bSYour Name 	tso_info = &msdu_info->u.tso_info;
709*5113495bSYour Name 	tso_info->curr_seg = NULL;
710*5113495bSYour Name 	tso_info->tso_seg_list = NULL;
711*5113495bSYour Name 	tso_info->num_segs = num_seg;
712*5113495bSYour Name 	msdu_info->frm_type = dp_tx_frm_tso;
713*5113495bSYour Name 	tso_info->tso_num_seg_list = NULL;
714*5113495bSYour Name 
715*5113495bSYour Name 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
716*5113495bSYour Name 
717*5113495bSYour Name 	while (num_seg) {
718*5113495bSYour Name 		tso_seg = dp_tx_tso_desc_alloc(
719*5113495bSYour Name 				soc, msdu_info->tx_queue.desc_pool_id);
720*5113495bSYour Name 		if (tso_seg) {
721*5113495bSYour Name 			tso_seg->next = tso_info->tso_seg_list;
722*5113495bSYour Name 			tso_info->tso_seg_list = tso_seg;
723*5113495bSYour Name 			num_seg--;
724*5113495bSYour Name 		} else {
725*5113495bSYour Name 			dp_err_rl("Failed to alloc tso seg desc");
726*5113495bSYour Name 			DP_STATS_INC_PKT(vdev->pdev,
727*5113495bSYour Name 					 tso_stats.tso_no_mem_dropped, 1,
728*5113495bSYour Name 					 qdf_nbuf_len(msdu));
729*5113495bSYour Name 			dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
730*5113495bSYour Name 
731*5113495bSYour Name 			return QDF_STATUS_E_NOMEM;
732*5113495bSYour Name 		}
733*5113495bSYour Name 	}
734*5113495bSYour Name 
735*5113495bSYour Name 	TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
736*5113495bSYour Name 
737*5113495bSYour Name 	tso_num_seg = dp_tso_num_seg_alloc(soc,
738*5113495bSYour Name 			msdu_info->tx_queue.desc_pool_id);
739*5113495bSYour Name 
740*5113495bSYour Name 	if (tso_num_seg) {
741*5113495bSYour Name 		tso_num_seg->next = tso_info->tso_num_seg_list;
742*5113495bSYour Name 		tso_info->tso_num_seg_list = tso_num_seg;
743*5113495bSYour Name 	} else {
744*5113495bSYour Name 		DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
745*5113495bSYour Name 			 __func__);
746*5113495bSYour Name 		dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
747*5113495bSYour Name 
748*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
749*5113495bSYour Name 	}
750*5113495bSYour Name 
751*5113495bSYour Name 	msdu_info->num_seg =
752*5113495bSYour Name 		qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
753*5113495bSYour Name 
754*5113495bSYour Name 	TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
755*5113495bSYour Name 			msdu_info->num_seg);
756*5113495bSYour Name 
757*5113495bSYour Name 	if (!(msdu_info->num_seg)) {
758*5113495bSYour Name 		/*
759*5113495bSYour Name 		 * Free allocated TSO seg desc and number seg desc,
760*5113495bSYour Name 		 * do unmap for segments if dma map has done.
761*5113495bSYour Name 		 */
762*5113495bSYour Name 		DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
763*5113495bSYour Name 		dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
764*5113495bSYour Name 
765*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
766*5113495bSYour Name 	}
767*5113495bSYour Name 	dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
768*5113495bSYour Name 			      msdu, 0, DP_TX_DESC_MAP);
769*5113495bSYour Name 
770*5113495bSYour Name 	tso_info->curr_seg = tso_info->tso_seg_list;
771*5113495bSYour Name 
772*5113495bSYour Name 	tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
773*5113495bSYour Name 	dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
774*5113495bSYour Name 			     msdu, msdu_info->num_seg);
775*5113495bSYour Name 	dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
776*5113495bSYour Name 				    tso_info->msdu_stats_idx);
777*5113495bSYour Name 	dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
778*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
779*5113495bSYour Name }
780*5113495bSYour Name #else
dp_tx_prepare_tso(struct dp_vdev * vdev,qdf_nbuf_t msdu,struct dp_tx_msdu_info_s * msdu_info)781*5113495bSYour Name static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
782*5113495bSYour Name 		qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
783*5113495bSYour Name {
784*5113495bSYour Name 	return QDF_STATUS_E_NOMEM;
785*5113495bSYour Name }
786*5113495bSYour Name #endif
787*5113495bSYour Name 
788*5113495bSYour Name QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
789*5113495bSYour Name 			(DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
790*5113495bSYour Name 			 sizeof(struct htt_tx_msdu_desc_ext2_t)));
791*5113495bSYour Name 
792*5113495bSYour Name /**
793*5113495bSYour Name  * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
794*5113495bSYour Name  * @vdev: DP Vdev handle
795*5113495bSYour Name  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
796*5113495bSYour Name  * @desc_pool_id: Descriptor Pool ID
797*5113495bSYour Name  *
798*5113495bSYour Name  * Return:
799*5113495bSYour Name  */
800*5113495bSYour Name static
dp_tx_prepare_ext_desc(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,uint8_t desc_pool_id)801*5113495bSYour Name struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
802*5113495bSYour Name 		struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
803*5113495bSYour Name {
804*5113495bSYour Name 	uint8_t i;
805*5113495bSYour Name 	uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
806*5113495bSYour Name 	struct dp_tx_seg_info_s *seg_info;
807*5113495bSYour Name 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
808*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
809*5113495bSYour Name 
810*5113495bSYour Name 	/* Allocate an extension descriptor */
811*5113495bSYour Name 	msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
812*5113495bSYour Name 	qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
813*5113495bSYour Name 
814*5113495bSYour Name 	if (!msdu_ext_desc) {
815*5113495bSYour Name 		DP_STATS_INC(vdev,
816*5113495bSYour Name 			     tx_i[msdu_info->xmit_type].dropped.desc_na.num, 1);
817*5113495bSYour Name 		return NULL;
818*5113495bSYour Name 	}
819*5113495bSYour Name 
820*5113495bSYour Name 	if (msdu_info->exception_fw &&
821*5113495bSYour Name 			qdf_unlikely(vdev->mesh_vdev)) {
822*5113495bSYour Name 		qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
823*5113495bSYour Name 				&msdu_info->meta_data[0],
824*5113495bSYour Name 				sizeof(struct htt_tx_msdu_desc_ext2_t));
825*5113495bSYour Name 		qdf_atomic_inc(&soc->num_tx_exception);
826*5113495bSYour Name 		msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
827*5113495bSYour Name 	}
828*5113495bSYour Name 
829*5113495bSYour Name 	switch (msdu_info->frm_type) {
830*5113495bSYour Name 	case dp_tx_frm_sg:
831*5113495bSYour Name 	case dp_tx_frm_me:
832*5113495bSYour Name 	case dp_tx_frm_raw:
833*5113495bSYour Name 		seg_info = msdu_info->u.sg_info.curr_seg;
834*5113495bSYour Name 		/* Update the buffer pointers in MSDU Extension Descriptor */
835*5113495bSYour Name 		for (i = 0; i < seg_info->frag_cnt; i++) {
836*5113495bSYour Name 			hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
837*5113495bSYour Name 				seg_info->frags[i].paddr_lo,
838*5113495bSYour Name 				seg_info->frags[i].paddr_hi,
839*5113495bSYour Name 				seg_info->frags[i].len);
840*5113495bSYour Name 		}
841*5113495bSYour Name 
842*5113495bSYour Name 		break;
843*5113495bSYour Name 
844*5113495bSYour Name 	case dp_tx_frm_tso:
845*5113495bSYour Name 		dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
846*5113495bSYour Name 				&cached_ext_desc[0]);
847*5113495bSYour Name 		break;
848*5113495bSYour Name 
849*5113495bSYour Name 
850*5113495bSYour Name 	default:
851*5113495bSYour Name 		break;
852*5113495bSYour Name 	}
853*5113495bSYour Name 
854*5113495bSYour Name 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
855*5113495bSYour Name 			   cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
856*5113495bSYour Name 
857*5113495bSYour Name 	hal_tx_ext_desc_sync(&cached_ext_desc[0],
858*5113495bSYour Name 			msdu_ext_desc->vaddr);
859*5113495bSYour Name 
860*5113495bSYour Name 	return msdu_ext_desc;
861*5113495bSYour Name }
862*5113495bSYour Name 
863*5113495bSYour Name /**
864*5113495bSYour Name  * dp_tx_trace_pkt() - Trace TX packet at DP layer
865*5113495bSYour Name  * @soc: datapath SOC
866*5113495bSYour Name  * @skb: skb to be traced
867*5113495bSYour Name  * @msdu_id: msdu_id of the packet
868*5113495bSYour Name  * @vdev_id: vdev_id of the packet
869*5113495bSYour Name  * @op_mode: Vdev Operation mode
870*5113495bSYour Name  *
871*5113495bSYour Name  * Return: None
872*5113495bSYour Name  */
873*5113495bSYour Name #ifdef DP_DISABLE_TX_PKT_TRACE
dp_tx_trace_pkt(struct dp_soc * soc,qdf_nbuf_t skb,uint16_t msdu_id,uint8_t vdev_id,enum QDF_OPMODE op_mode)874*5113495bSYour Name static void dp_tx_trace_pkt(struct dp_soc *soc,
875*5113495bSYour Name 			    qdf_nbuf_t skb, uint16_t msdu_id,
876*5113495bSYour Name 			    uint8_t vdev_id, enum QDF_OPMODE op_mode)
877*5113495bSYour Name {
878*5113495bSYour Name }
879*5113495bSYour Name #else
dp_tx_trace_pkt(struct dp_soc * soc,qdf_nbuf_t skb,uint16_t msdu_id,uint8_t vdev_id,enum QDF_OPMODE op_mode)880*5113495bSYour Name static void dp_tx_trace_pkt(struct dp_soc *soc,
881*5113495bSYour Name 			    qdf_nbuf_t skb, uint16_t msdu_id,
882*5113495bSYour Name 			    uint8_t vdev_id, enum QDF_OPMODE op_mode)
883*5113495bSYour Name {
884*5113495bSYour Name 	if (dp_is_tput_high(soc))
885*5113495bSYour Name 		return;
886*5113495bSYour Name 
887*5113495bSYour Name 	QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
888*5113495bSYour Name 	QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
889*5113495bSYour Name 	DPTRACE(qdf_dp_trace_ptr(skb,
890*5113495bSYour Name 				 QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
891*5113495bSYour Name 				 QDF_TRACE_DEFAULT_PDEV_ID,
892*5113495bSYour Name 				 qdf_nbuf_data_addr(skb),
893*5113495bSYour Name 				 sizeof(qdf_nbuf_data(skb)),
894*5113495bSYour Name 				 msdu_id, vdev_id, 0,
895*5113495bSYour Name 				 op_mode));
896*5113495bSYour Name 
897*5113495bSYour Name 	qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID,
898*5113495bSYour Name 			     op_mode);
899*5113495bSYour Name 
900*5113495bSYour Name 	DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
901*5113495bSYour Name 				      QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
902*5113495bSYour Name 				      msdu_id, QDF_TX));
903*5113495bSYour Name }
904*5113495bSYour Name #endif
905*5113495bSYour Name 
906*5113495bSYour Name #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
907*5113495bSYour Name /**
908*5113495bSYour Name  * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
909*5113495bSYour Name  *				      exception by the upper layer (OS_IF)
910*5113495bSYour Name  * @soc: DP soc handle
911*5113495bSYour Name  * @nbuf: packet to be transmitted
912*5113495bSYour Name  *
913*5113495bSYour Name  * Return: 1 if the packet is marked as exception,
914*5113495bSYour Name  *	   0, if the packet is not marked as exception.
915*5113495bSYour Name  */
dp_tx_is_nbuf_marked_exception(struct dp_soc * soc,qdf_nbuf_t nbuf)916*5113495bSYour Name static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
917*5113495bSYour Name 						 qdf_nbuf_t nbuf)
918*5113495bSYour Name {
919*5113495bSYour Name 	return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
920*5113495bSYour Name }
921*5113495bSYour Name #else
dp_tx_is_nbuf_marked_exception(struct dp_soc * soc,qdf_nbuf_t nbuf)922*5113495bSYour Name static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
923*5113495bSYour Name 						 qdf_nbuf_t nbuf)
924*5113495bSYour Name {
925*5113495bSYour Name 	return 0;
926*5113495bSYour Name }
927*5113495bSYour Name #endif
928*5113495bSYour Name 
929*5113495bSYour Name #ifdef DP_TRAFFIC_END_INDICATION
930*5113495bSYour Name /**
931*5113495bSYour Name  * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
932*5113495bSYour Name  *                                          as indication to fw to inform that
933*5113495bSYour Name  *                                          data stream has ended
934*5113495bSYour Name  * @vdev: DP vdev handle
935*5113495bSYour Name  * @nbuf: original buffer from network stack
936*5113495bSYour Name  *
937*5113495bSYour Name  * Return: NULL on failure,
938*5113495bSYour Name  *         nbuf on success
939*5113495bSYour Name  */
940*5113495bSYour Name static inline qdf_nbuf_t
dp_tx_get_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t nbuf)941*5113495bSYour Name dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
942*5113495bSYour Name 				     qdf_nbuf_t nbuf)
943*5113495bSYour Name {
944*5113495bSYour Name 	/* Packet length should be enough to copy upto L3 header */
945*5113495bSYour Name 	uint8_t end_nbuf_len = 64;
946*5113495bSYour Name 	uint8_t htt_desc_size_aligned;
947*5113495bSYour Name 	uint8_t htt_desc_size;
948*5113495bSYour Name 	qdf_nbuf_t end_nbuf;
949*5113495bSYour Name 
950*5113495bSYour Name 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
951*5113495bSYour Name 			 QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
952*5113495bSYour Name 		htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
953*5113495bSYour Name 		htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
954*5113495bSYour Name 
955*5113495bSYour Name 		end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
956*5113495bSYour Name 		if (!end_nbuf) {
957*5113495bSYour Name 			end_nbuf = qdf_nbuf_alloc(NULL,
958*5113495bSYour Name 						  (htt_desc_size_aligned +
959*5113495bSYour Name 						  end_nbuf_len),
960*5113495bSYour Name 						  htt_desc_size_aligned,
961*5113495bSYour Name 						  8, false);
962*5113495bSYour Name 			if (!end_nbuf) {
963*5113495bSYour Name 				dp_err("Packet allocation failed");
964*5113495bSYour Name 				goto out;
965*5113495bSYour Name 			}
966*5113495bSYour Name 		} else {
967*5113495bSYour Name 			qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
968*5113495bSYour Name 		}
969*5113495bSYour Name 		qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
970*5113495bSYour Name 			     end_nbuf_len);
971*5113495bSYour Name 		qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
972*5113495bSYour Name 
973*5113495bSYour Name 		return end_nbuf;
974*5113495bSYour Name 	}
975*5113495bSYour Name out:
976*5113495bSYour Name 	return NULL;
977*5113495bSYour Name }
978*5113495bSYour Name 
979*5113495bSYour Name /**
980*5113495bSYour Name  * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
981*5113495bSYour Name  *                                           via exception path.
982*5113495bSYour Name  * @vdev: DP vdev handle
983*5113495bSYour Name  * @end_nbuf: skb to send as indication
984*5113495bSYour Name  * @msdu_info: msdu_info of original nbuf
985*5113495bSYour Name  * @peer_id: peer id
986*5113495bSYour Name  *
987*5113495bSYour Name  * Return: None
988*5113495bSYour Name  */
989*5113495bSYour Name static inline void
dp_tx_send_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t end_nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id)990*5113495bSYour Name dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
991*5113495bSYour Name 				      qdf_nbuf_t end_nbuf,
992*5113495bSYour Name 				      struct dp_tx_msdu_info_s *msdu_info,
993*5113495bSYour Name 				      uint16_t peer_id)
994*5113495bSYour Name {
995*5113495bSYour Name 	struct dp_tx_msdu_info_s e_msdu_info = {0};
996*5113495bSYour Name 	qdf_nbuf_t nbuf;
997*5113495bSYour Name 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
998*5113495bSYour Name 		(struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
999*5113495bSYour Name 	e_msdu_info.tx_queue = msdu_info->tx_queue;
1000*5113495bSYour Name 	e_msdu_info.tid = msdu_info->tid;
1001*5113495bSYour Name 	e_msdu_info.exception_fw = 1;
1002*5113495bSYour Name 	e_msdu_info.xmit_type = msdu_info->xmit_type;
1003*5113495bSYour Name 	desc_ext->host_tx_desc_pool = 1;
1004*5113495bSYour Name 	desc_ext->traffic_end_indication = 1;
1005*5113495bSYour Name 	nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
1006*5113495bSYour Name 				      peer_id, NULL);
1007*5113495bSYour Name 	if (nbuf) {
1008*5113495bSYour Name 		dp_err("Traffic end indication packet tx failed");
1009*5113495bSYour Name 		qdf_nbuf_free(nbuf);
1010*5113495bSYour Name 	}
1011*5113495bSYour Name }
1012*5113495bSYour Name 
1013*5113495bSYour Name /**
1014*5113495bSYour Name  * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
1015*5113495bSYour Name  *                                                mark it traffic end indication
1016*5113495bSYour Name  *                                                packet.
1017*5113495bSYour Name  * @tx_desc: Tx descriptor pointer
1018*5113495bSYour Name  * @msdu_info: msdu_info structure pointer
1019*5113495bSYour Name  *
1020*5113495bSYour Name  * Return: None
1021*5113495bSYour Name  */
1022*5113495bSYour Name static inline void
dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s * tx_desc,struct dp_tx_msdu_info_s * msdu_info)1023*5113495bSYour Name dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1024*5113495bSYour Name 					   struct dp_tx_msdu_info_s *msdu_info)
1025*5113495bSYour Name {
1026*5113495bSYour Name 	struct htt_tx_msdu_desc_ext2_t *desc_ext =
1027*5113495bSYour Name 		(struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
1028*5113495bSYour Name 
1029*5113495bSYour Name 	if (qdf_unlikely(desc_ext->traffic_end_indication))
1030*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
1031*5113495bSYour Name }
1032*5113495bSYour Name 
1033*5113495bSYour Name /**
1034*5113495bSYour Name  * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
1035*5113495bSYour Name  *                                              freeing which are associated
1036*5113495bSYour Name  *                                              with traffic end indication
1037*5113495bSYour Name  *                                              flagged descriptor.
1038*5113495bSYour Name  * @soc: dp soc handle
1039*5113495bSYour Name  * @desc: Tx descriptor pointer
1040*5113495bSYour Name  * @nbuf: buffer pointer
1041*5113495bSYour Name  *
1042*5113495bSYour Name  * Return: True if packet gets enqueued else false
1043*5113495bSYour Name  */
1044*5113495bSYour Name static bool
dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc * soc,struct dp_tx_desc_s * desc,qdf_nbuf_t nbuf)1045*5113495bSYour Name dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1046*5113495bSYour Name 					 struct dp_tx_desc_s *desc,
1047*5113495bSYour Name 					 qdf_nbuf_t nbuf)
1048*5113495bSYour Name {
1049*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
1050*5113495bSYour Name 
1051*5113495bSYour Name 	if (qdf_unlikely((desc->flags &
1052*5113495bSYour Name 			  DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
1053*5113495bSYour Name 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
1054*5113495bSYour Name 					     DP_MOD_ID_TX_COMP);
1055*5113495bSYour Name 		if (vdev) {
1056*5113495bSYour Name 			qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
1057*5113495bSYour Name 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
1058*5113495bSYour Name 			return true;
1059*5113495bSYour Name 		}
1060*5113495bSYour Name 	}
1061*5113495bSYour Name 	return false;
1062*5113495bSYour Name }
1063*5113495bSYour Name 
1064*5113495bSYour Name /**
1065*5113495bSYour Name  * dp_tx_traffic_end_indication_is_enabled() - get the feature
1066*5113495bSYour Name  *                                             enable/disable status
1067*5113495bSYour Name  * @vdev: dp vdev handle
1068*5113495bSYour Name  *
1069*5113495bSYour Name  * Return: True if feature is enable else false
1070*5113495bSYour Name  */
1071*5113495bSYour Name static inline bool
dp_tx_traffic_end_indication_is_enabled(struct dp_vdev * vdev)1072*5113495bSYour Name dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1073*5113495bSYour Name {
1074*5113495bSYour Name 	return qdf_unlikely(vdev->traffic_end_ind_en);
1075*5113495bSYour Name }
1076*5113495bSYour Name 
1077*5113495bSYour Name static inline qdf_nbuf_t
dp_tx_send_msdu_single_wrapper(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id,qdf_nbuf_t end_nbuf)1078*5113495bSYour Name dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1079*5113495bSYour Name 			       struct dp_tx_msdu_info_s *msdu_info,
1080*5113495bSYour Name 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1081*5113495bSYour Name {
1082*5113495bSYour Name 	if (dp_tx_traffic_end_indication_is_enabled(vdev))
1083*5113495bSYour Name 		end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
1084*5113495bSYour Name 
1085*5113495bSYour Name 	nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1086*5113495bSYour Name 
1087*5113495bSYour Name 	if (qdf_unlikely(end_nbuf))
1088*5113495bSYour Name 		dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
1089*5113495bSYour Name 						      msdu_info, peer_id);
1090*5113495bSYour Name 	return nbuf;
1091*5113495bSYour Name }
1092*5113495bSYour Name #else
1093*5113495bSYour Name static inline qdf_nbuf_t
dp_tx_get_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1094*5113495bSYour Name dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
1095*5113495bSYour Name 				     qdf_nbuf_t nbuf)
1096*5113495bSYour Name {
1097*5113495bSYour Name 	return NULL;
1098*5113495bSYour Name }
1099*5113495bSYour Name 
1100*5113495bSYour Name static inline void
dp_tx_send_traffic_end_indication_pkt(struct dp_vdev * vdev,qdf_nbuf_t end_nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id)1101*5113495bSYour Name dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
1102*5113495bSYour Name 				      qdf_nbuf_t end_nbuf,
1103*5113495bSYour Name 				      struct dp_tx_msdu_info_s *msdu_info,
1104*5113495bSYour Name 				      uint16_t peer_id)
1105*5113495bSYour Name {}
1106*5113495bSYour Name 
1107*5113495bSYour Name static inline void
dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s * tx_desc,struct dp_tx_msdu_info_s * msdu_info)1108*5113495bSYour Name dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
1109*5113495bSYour Name 					   struct dp_tx_msdu_info_s *msdu_info)
1110*5113495bSYour Name {}
1111*5113495bSYour Name 
1112*5113495bSYour Name static inline bool
dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc * soc,struct dp_tx_desc_s * desc,qdf_nbuf_t nbuf)1113*5113495bSYour Name dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
1114*5113495bSYour Name 					 struct dp_tx_desc_s *desc,
1115*5113495bSYour Name 					 qdf_nbuf_t nbuf)
1116*5113495bSYour Name {
1117*5113495bSYour Name 	return false;
1118*5113495bSYour Name }
1119*5113495bSYour Name 
1120*5113495bSYour Name static inline bool
dp_tx_traffic_end_indication_is_enabled(struct dp_vdev * vdev)1121*5113495bSYour Name dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
1122*5113495bSYour Name {
1123*5113495bSYour Name 	return false;
1124*5113495bSYour Name }
1125*5113495bSYour Name 
1126*5113495bSYour Name static inline qdf_nbuf_t
dp_tx_send_msdu_single_wrapper(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id,qdf_nbuf_t end_nbuf)1127*5113495bSYour Name dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1128*5113495bSYour Name 			       struct dp_tx_msdu_info_s *msdu_info,
1129*5113495bSYour Name 			       uint16_t peer_id, qdf_nbuf_t end_nbuf)
1130*5113495bSYour Name {
1131*5113495bSYour Name 	return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
1132*5113495bSYour Name }
1133*5113495bSYour Name #endif
1134*5113495bSYour Name 
1135*5113495bSYour Name #if defined(QCA_SUPPORT_WDS_EXTENDED)
1136*5113495bSYour Name static bool
dp_tx_is_wds_ast_override_en(struct dp_soc * soc,struct cdp_tx_exception_metadata * tx_exc_metadata)1137*5113495bSYour Name dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1138*5113495bSYour Name 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1139*5113495bSYour Name {
1140*5113495bSYour Name 	if (soc->features.wds_ext_ast_override_enable &&
1141*5113495bSYour Name 	    tx_exc_metadata && tx_exc_metadata->is_wds_extended)
1142*5113495bSYour Name 		return true;
1143*5113495bSYour Name 
1144*5113495bSYour Name 	return false;
1145*5113495bSYour Name }
1146*5113495bSYour Name #else
1147*5113495bSYour Name static bool
dp_tx_is_wds_ast_override_en(struct dp_soc * soc,struct cdp_tx_exception_metadata * tx_exc_metadata)1148*5113495bSYour Name dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
1149*5113495bSYour Name 			     struct cdp_tx_exception_metadata *tx_exc_metadata)
1150*5113495bSYour Name {
1151*5113495bSYour Name 	return false;
1152*5113495bSYour Name }
1153*5113495bSYour Name #endif
1154*5113495bSYour Name 
1155*5113495bSYour Name /**
1156*5113495bSYour Name  * dp_tx_prepare_desc_single() - Allocate and prepare Tx descriptor
1157*5113495bSYour Name  * @vdev: DP vdev handle
1158*5113495bSYour Name  * @nbuf: skb
1159*5113495bSYour Name  * @desc_pool_id: Descriptor pool ID
1160*5113495bSYour Name  * @msdu_info: Metadata to the fw
1161*5113495bSYour Name  * @tx_exc_metadata: Handle that holds exception path metadata
1162*5113495bSYour Name  *
1163*5113495bSYour Name  * Allocate and prepare Tx descriptor with msdu information.
1164*5113495bSYour Name  *
1165*5113495bSYour Name  * Return: Pointer to Tx Descriptor on success,
1166*5113495bSYour Name  *         NULL on failure
1167*5113495bSYour Name  */
1168*5113495bSYour Name static
dp_tx_prepare_desc_single(struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t desc_pool_id,struct dp_tx_msdu_info_s * msdu_info,struct cdp_tx_exception_metadata * tx_exc_metadata)1169*5113495bSYour Name struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
1170*5113495bSYour Name 		qdf_nbuf_t nbuf, uint8_t desc_pool_id,
1171*5113495bSYour Name 		struct dp_tx_msdu_info_s *msdu_info,
1172*5113495bSYour Name 		struct cdp_tx_exception_metadata *tx_exc_metadata)
1173*5113495bSYour Name {
1174*5113495bSYour Name 	uint8_t align_pad;
1175*5113495bSYour Name 	uint8_t is_exception = 0;
1176*5113495bSYour Name 	uint8_t htt_hdr_size;
1177*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
1178*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
1179*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
1180*5113495bSYour Name 	uint8_t xmit_type = msdu_info->xmit_type;
1181*5113495bSYour Name 
1182*5113495bSYour Name 	if (dp_tx_limit_check(vdev, nbuf))
1183*5113495bSYour Name 		return NULL;
1184*5113495bSYour Name 
1185*5113495bSYour Name 	/* Allocate software Tx descriptor */
1186*5113495bSYour Name 	if (nbuf->protocol == QDF_NBUF_TRAC_EAPOL_ETH_TYPE)
1187*5113495bSYour Name 		tx_desc = dp_tx_spcl_desc_alloc(soc, desc_pool_id);
1188*5113495bSYour Name 	else
1189*5113495bSYour Name 		tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1190*5113495bSYour Name 
1191*5113495bSYour Name 	if (qdf_unlikely(!tx_desc)) {
1192*5113495bSYour Name 		DP_STATS_INC(vdev,
1193*5113495bSYour Name 			     tx_i[xmit_type].dropped.desc_na.num, 1);
1194*5113495bSYour Name 		DP_STATS_INC(vdev,
1195*5113495bSYour Name 			     tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1196*5113495bSYour Name 			     1);
1197*5113495bSYour Name 		return NULL;
1198*5113495bSYour Name 	}
1199*5113495bSYour Name 
1200*5113495bSYour Name 	dp_tx_outstanding_inc(pdev);
1201*5113495bSYour Name 
1202*5113495bSYour Name 	/* Initialize the SW tx descriptor */
1203*5113495bSYour Name 	tx_desc->nbuf = nbuf;
1204*5113495bSYour Name 	tx_desc->frm_type = dp_tx_frm_std;
1205*5113495bSYour Name 	tx_desc->tx_encap_type = ((tx_exc_metadata &&
1206*5113495bSYour Name 		(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
1207*5113495bSYour Name 		tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
1208*5113495bSYour Name 	tx_desc->vdev_id = vdev->vdev_id;
1209*5113495bSYour Name 	tx_desc->pdev = pdev;
1210*5113495bSYour Name 	tx_desc->msdu_ext_desc = NULL;
1211*5113495bSYour Name 	tx_desc->pkt_offset = 0;
1212*5113495bSYour Name 	tx_desc->length = qdf_nbuf_headlen(nbuf);
1213*5113495bSYour Name 
1214*5113495bSYour Name 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id,
1215*5113495bSYour Name 			vdev->qdf_opmode);
1216*5113495bSYour Name 
1217*5113495bSYour Name 	if (qdf_unlikely(vdev->multipass_en)) {
1218*5113495bSYour Name 		if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
1219*5113495bSYour Name 			goto failure;
1220*5113495bSYour Name 	}
1221*5113495bSYour Name 
1222*5113495bSYour Name 	/* Packets marked by upper layer (OS-IF) to be sent to FW */
1223*5113495bSYour Name 	if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
1224*5113495bSYour Name 		is_exception = 1;
1225*5113495bSYour Name 
1226*5113495bSYour Name 	/* for BE chipsets if wds extension was enbled will not mark FW
1227*5113495bSYour Name 	 * in desc will mark ast index based search for ast index.
1228*5113495bSYour Name 	 */
1229*5113495bSYour Name 	if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
1230*5113495bSYour Name 		return tx_desc;
1231*5113495bSYour Name 
1232*5113495bSYour Name 	/*
1233*5113495bSYour Name 	 * For special modes (vdev_type == ocb or mesh), data frames should be
1234*5113495bSYour Name 	 * transmitted using varying transmit parameters (tx spec) which include
1235*5113495bSYour Name 	 * transmit rate, power, priority, channel, channel bandwidth , nss etc.
1236*5113495bSYour Name 	 * These are filled in HTT MSDU descriptor and sent in frame pre-header.
1237*5113495bSYour Name 	 * These frames are sent as exception packets to firmware.
1238*5113495bSYour Name 	 *
1239*5113495bSYour Name 	 * HW requirement is that metadata should always point to a
1240*5113495bSYour Name 	 * 8-byte aligned address. So we add alignment pad to start of buffer.
1241*5113495bSYour Name 	 *  HTT Metadata should be ensured to be multiple of 8-bytes,
1242*5113495bSYour Name 	 *  to get 8-byte aligned start address along with align_pad added
1243*5113495bSYour Name 	 *
1244*5113495bSYour Name 	 *  |-----------------------------|
1245*5113495bSYour Name 	 *  |                             |
1246*5113495bSYour Name 	 *  |-----------------------------| <-----Buffer Pointer Address given
1247*5113495bSYour Name 	 *  |                             |  ^    in HW descriptor (aligned)
1248*5113495bSYour Name 	 *  |       HTT Metadata          |  |
1249*5113495bSYour Name 	 *  |                             |  |
1250*5113495bSYour Name 	 *  |                             |  | Packet Offset given in descriptor
1251*5113495bSYour Name 	 *  |                             |  |
1252*5113495bSYour Name 	 *  |-----------------------------|  |
1253*5113495bSYour Name 	 *  |       Alignment Pad         |  v
1254*5113495bSYour Name 	 *  |-----------------------------| <----- Actual buffer start address
1255*5113495bSYour Name 	 *  |        SKB Data             |           (Unaligned)
1256*5113495bSYour Name 	 *  |                             |
1257*5113495bSYour Name 	 *  |                             |
1258*5113495bSYour Name 	 *  |                             |
1259*5113495bSYour Name 	 *  |                             |
1260*5113495bSYour Name 	 *  |                             |
1261*5113495bSYour Name 	 *  |-----------------------------|
1262*5113495bSYour Name 	 */
1263*5113495bSYour Name 	if (qdf_unlikely((msdu_info->exception_fw)) ||
1264*5113495bSYour Name 				(vdev->opmode == wlan_op_mode_ocb) ||
1265*5113495bSYour Name 				(tx_exc_metadata &&
1266*5113495bSYour Name 				tx_exc_metadata->is_tx_sniffer)) {
1267*5113495bSYour Name 		align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
1268*5113495bSYour Name 
1269*5113495bSYour Name 		if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
1270*5113495bSYour Name 			DP_STATS_INC(vdev,
1271*5113495bSYour Name 				     tx_i[xmit_type].dropped.headroom_insufficient,
1272*5113495bSYour Name 				     1);
1273*5113495bSYour Name 			goto failure;
1274*5113495bSYour Name 		}
1275*5113495bSYour Name 
1276*5113495bSYour Name 		if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
1277*5113495bSYour Name 			dp_tx_err("qdf_nbuf_push_head failed");
1278*5113495bSYour Name 			goto failure;
1279*5113495bSYour Name 		}
1280*5113495bSYour Name 
1281*5113495bSYour Name 		htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
1282*5113495bSYour Name 				msdu_info);
1283*5113495bSYour Name 		if (htt_hdr_size == 0)
1284*5113495bSYour Name 			goto failure;
1285*5113495bSYour Name 
1286*5113495bSYour Name 		tx_desc->length = qdf_nbuf_headlen(nbuf);
1287*5113495bSYour Name 		tx_desc->pkt_offset = align_pad + htt_hdr_size;
1288*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1289*5113495bSYour Name 		dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
1290*5113495bSYour Name 							   msdu_info);
1291*5113495bSYour Name 		is_exception = 1;
1292*5113495bSYour Name 		tx_desc->length -= tx_desc->pkt_offset;
1293*5113495bSYour Name 	}
1294*5113495bSYour Name 
1295*5113495bSYour Name #if !TQM_BYPASS_WAR
1296*5113495bSYour Name 	if (is_exception || tx_exc_metadata)
1297*5113495bSYour Name #endif
1298*5113495bSYour Name 	{
1299*5113495bSYour Name 		/* Temporary WAR due to TQM VP issues */
1300*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1301*5113495bSYour Name 		qdf_atomic_inc(&soc->num_tx_exception);
1302*5113495bSYour Name 	}
1303*5113495bSYour Name 
1304*5113495bSYour Name 	return tx_desc;
1305*5113495bSYour Name 
1306*5113495bSYour Name failure:
1307*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1308*5113495bSYour Name 	return NULL;
1309*5113495bSYour Name }
1310*5113495bSYour Name 
1311*5113495bSYour Name /**
1312*5113495bSYour Name  * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment
1313*5113495bSYour Name  *                        frame
1314*5113495bSYour Name  * @vdev: DP vdev handle
1315*5113495bSYour Name  * @nbuf: skb
1316*5113495bSYour Name  * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
1317*5113495bSYour Name  * @desc_pool_id : Descriptor Pool ID
1318*5113495bSYour Name  *
1319*5113495bSYour Name  * Allocate and prepare Tx descriptor with msdu and fragment descritor
1320*5113495bSYour Name  * information. For frames with fragments, allocate and prepare
1321*5113495bSYour Name  * an MSDU extension descriptor
1322*5113495bSYour Name  *
1323*5113495bSYour Name  * Return: Pointer to Tx Descriptor on success,
1324*5113495bSYour Name  *         NULL on failure
1325*5113495bSYour Name  */
dp_tx_prepare_desc(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint8_t desc_pool_id)1326*5113495bSYour Name static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
1327*5113495bSYour Name 		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
1328*5113495bSYour Name 		uint8_t desc_pool_id)
1329*5113495bSYour Name {
1330*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
1331*5113495bSYour Name 	struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
1332*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
1333*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
1334*5113495bSYour Name 
1335*5113495bSYour Name 	if (dp_tx_limit_check(vdev, nbuf))
1336*5113495bSYour Name 		return NULL;
1337*5113495bSYour Name 
1338*5113495bSYour Name 	/* Allocate software Tx descriptor */
1339*5113495bSYour Name 	if (nbuf->protocol == QDF_NBUF_TRAC_EAPOL_ETH_TYPE)
1340*5113495bSYour Name 		tx_desc = dp_tx_spcl_desc_alloc(soc, desc_pool_id);
1341*5113495bSYour Name 	else
1342*5113495bSYour Name 		tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1343*5113495bSYour Name 
1344*5113495bSYour Name 	if (!tx_desc) {
1345*5113495bSYour Name 		DP_STATS_INC(vdev,
1346*5113495bSYour Name 			     tx_i[msdu_info->xmit_type].dropped.desc_na.num, 1);
1347*5113495bSYour Name 		return NULL;
1348*5113495bSYour Name 	}
1349*5113495bSYour Name 	dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
1350*5113495bSYour Name 				  nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
1351*5113495bSYour Name 
1352*5113495bSYour Name 	dp_tx_outstanding_inc(pdev);
1353*5113495bSYour Name 
1354*5113495bSYour Name 	/* Initialize the SW tx descriptor */
1355*5113495bSYour Name 	tx_desc->nbuf = nbuf;
1356*5113495bSYour Name 	tx_desc->frm_type = msdu_info->frm_type;
1357*5113495bSYour Name 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1358*5113495bSYour Name 	tx_desc->vdev_id = vdev->vdev_id;
1359*5113495bSYour Name 	tx_desc->pdev = pdev;
1360*5113495bSYour Name 	tx_desc->pkt_offset = 0;
1361*5113495bSYour Name 
1362*5113495bSYour Name 	dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id,
1363*5113495bSYour Name 			vdev->qdf_opmode);
1364*5113495bSYour Name 
1365*5113495bSYour Name 	/* Handle scattered frames - TSO/SG/ME */
1366*5113495bSYour Name 	/* Allocate and prepare an extension descriptor for scattered frames */
1367*5113495bSYour Name 	msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
1368*5113495bSYour Name 	if (!msdu_ext_desc) {
1369*5113495bSYour Name 		dp_tx_info("Tx Extension Descriptor Alloc Fail");
1370*5113495bSYour Name 		goto failure;
1371*5113495bSYour Name 	}
1372*5113495bSYour Name 
1373*5113495bSYour Name #if !TQM_BYPASS_WAR
1374*5113495bSYour Name 	if (qdf_unlikely(msdu_info->exception_fw) ||
1375*5113495bSYour Name 	    dp_tx_is_nbuf_marked_exception(soc, nbuf))
1376*5113495bSYour Name #endif
1377*5113495bSYour Name 	{
1378*5113495bSYour Name 		/* Temporary WAR due to TQM VP issues */
1379*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
1380*5113495bSYour Name 		qdf_atomic_inc(&soc->num_tx_exception);
1381*5113495bSYour Name 	}
1382*5113495bSYour Name 
1383*5113495bSYour Name 
1384*5113495bSYour Name 	tx_desc->msdu_ext_desc = msdu_ext_desc;
1385*5113495bSYour Name 	tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
1386*5113495bSYour Name 
1387*5113495bSYour Name 	msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
1388*5113495bSYour Name 	msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
1389*5113495bSYour Name 
1390*5113495bSYour Name 	tx_desc->dma_addr = msdu_ext_desc->paddr;
1391*5113495bSYour Name 
1392*5113495bSYour Name 	if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
1393*5113495bSYour Name 		tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
1394*5113495bSYour Name 	else
1395*5113495bSYour Name 		tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
1396*5113495bSYour Name 
1397*5113495bSYour Name 	return tx_desc;
1398*5113495bSYour Name failure:
1399*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
1400*5113495bSYour Name 	return NULL;
1401*5113495bSYour Name }
1402*5113495bSYour Name 
1403*5113495bSYour Name /**
1404*5113495bSYour Name  * dp_tx_prepare_raw() - Prepare RAW packet TX
1405*5113495bSYour Name  * @vdev: DP vdev handle
1406*5113495bSYour Name  * @nbuf: buffer pointer
1407*5113495bSYour Name  * @seg_info: Pointer to Segment info Descriptor to be prepared
1408*5113495bSYour Name  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
1409*5113495bSYour Name  *     descriptor
1410*5113495bSYour Name  *
1411*5113495bSYour Name  * Return:
1412*5113495bSYour Name  */
dp_tx_prepare_raw(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_seg_info_s * seg_info,struct dp_tx_msdu_info_s * msdu_info)1413*5113495bSYour Name static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1414*5113495bSYour Name 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
1415*5113495bSYour Name {
1416*5113495bSYour Name 	qdf_nbuf_t curr_nbuf = NULL;
1417*5113495bSYour Name 	uint16_t total_len = 0;
1418*5113495bSYour Name 	qdf_dma_addr_t paddr;
1419*5113495bSYour Name 	int32_t i;
1420*5113495bSYour Name 	int32_t mapped_buf_num = 0;
1421*5113495bSYour Name 
1422*5113495bSYour Name 	struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
1423*5113495bSYour Name 	qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
1424*5113495bSYour Name 
1425*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].raw.raw_pkt,
1426*5113495bSYour Name 			 1, qdf_nbuf_len(nbuf));
1427*5113495bSYour Name 
1428*5113495bSYour Name 	/* Continue only if frames are of DATA type */
1429*5113495bSYour Name 	if (!DP_FRAME_IS_DATA(qos_wh)) {
1430*5113495bSYour Name 		DP_STATS_INC(vdev,
1431*5113495bSYour Name 			     tx_i[msdu_info->xmit_type].raw.invalid_raw_pkt_datatype,
1432*5113495bSYour Name 			     1);
1433*5113495bSYour Name 		dp_tx_debug("Pkt. recd is of not data type");
1434*5113495bSYour Name 		goto error;
1435*5113495bSYour Name 	}
1436*5113495bSYour Name 	/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
1437*5113495bSYour Name 	if (vdev->raw_mode_war &&
1438*5113495bSYour Name 	    (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
1439*5113495bSYour Name 	    (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
1440*5113495bSYour Name 		qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
1441*5113495bSYour Name 
1442*5113495bSYour Name 	for (curr_nbuf = nbuf, i = 0; curr_nbuf;
1443*5113495bSYour Name 			curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
1444*5113495bSYour Name 		/*
1445*5113495bSYour Name 		 * Number of nbuf's must not exceed the size of the frags
1446*5113495bSYour Name 		 * array in seg_info.
1447*5113495bSYour Name 		 */
1448*5113495bSYour Name 		if (i >= DP_TX_MAX_NUM_FRAGS) {
1449*5113495bSYour Name 			dp_err_rl("nbuf cnt exceeds the max number of segs");
1450*5113495bSYour Name 			DP_STATS_INC(vdev,
1451*5113495bSYour Name 				     tx_i[msdu_info->xmit_type].raw.num_frags_overflow_err,
1452*5113495bSYour Name 				     1);
1453*5113495bSYour Name 			goto error;
1454*5113495bSYour Name 		}
1455*5113495bSYour Name 		if (QDF_STATUS_SUCCESS !=
1456*5113495bSYour Name 			qdf_nbuf_map_nbytes_single(vdev->osdev,
1457*5113495bSYour Name 						   curr_nbuf,
1458*5113495bSYour Name 						   QDF_DMA_TO_DEVICE,
1459*5113495bSYour Name 						   curr_nbuf->len)) {
1460*5113495bSYour Name 			dp_tx_err("%s dma map error ", __func__);
1461*5113495bSYour Name 			DP_STATS_INC(vdev,
1462*5113495bSYour Name 				     tx_i[msdu_info->xmit_type].raw.dma_map_error,
1463*5113495bSYour Name 				     1);
1464*5113495bSYour Name 			goto error;
1465*5113495bSYour Name 		}
1466*5113495bSYour Name 		/* Update the count of mapped nbuf's */
1467*5113495bSYour Name 		mapped_buf_num++;
1468*5113495bSYour Name 		paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
1469*5113495bSYour Name 		seg_info->frags[i].paddr_lo = paddr;
1470*5113495bSYour Name 		seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
1471*5113495bSYour Name 		seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
1472*5113495bSYour Name 		seg_info->frags[i].vaddr = (void *) curr_nbuf;
1473*5113495bSYour Name 		total_len += qdf_nbuf_len(curr_nbuf);
1474*5113495bSYour Name 	}
1475*5113495bSYour Name 
1476*5113495bSYour Name 	seg_info->frag_cnt = i;
1477*5113495bSYour Name 	seg_info->total_len = total_len;
1478*5113495bSYour Name 	seg_info->next = NULL;
1479*5113495bSYour Name 
1480*5113495bSYour Name 	sg_info->curr_seg = seg_info;
1481*5113495bSYour Name 
1482*5113495bSYour Name 	msdu_info->frm_type = dp_tx_frm_raw;
1483*5113495bSYour Name 	msdu_info->num_seg = 1;
1484*5113495bSYour Name 
1485*5113495bSYour Name 	return nbuf;
1486*5113495bSYour Name 
1487*5113495bSYour Name error:
1488*5113495bSYour Name 	i = 0;
1489*5113495bSYour Name 	while (nbuf) {
1490*5113495bSYour Name 		curr_nbuf = nbuf;
1491*5113495bSYour Name 		if (i < mapped_buf_num) {
1492*5113495bSYour Name 			qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
1493*5113495bSYour Name 						     QDF_DMA_TO_DEVICE,
1494*5113495bSYour Name 						     curr_nbuf->len);
1495*5113495bSYour Name 			i++;
1496*5113495bSYour Name 		}
1497*5113495bSYour Name 		nbuf = qdf_nbuf_next(nbuf);
1498*5113495bSYour Name 		qdf_nbuf_free(curr_nbuf);
1499*5113495bSYour Name 	}
1500*5113495bSYour Name 	return NULL;
1501*5113495bSYour Name 
1502*5113495bSYour Name }
1503*5113495bSYour Name 
1504*5113495bSYour Name /**
1505*5113495bSYour Name  * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
1506*5113495bSYour Name  * @soc: DP soc handle
1507*5113495bSYour Name  * @nbuf: Buffer pointer
1508*5113495bSYour Name  *
1509*5113495bSYour Name  * unmap the chain of nbufs that belong to this RAW frame.
1510*5113495bSYour Name  *
1511*5113495bSYour Name  * Return: None
1512*5113495bSYour Name  */
dp_tx_raw_prepare_unset(struct dp_soc * soc,qdf_nbuf_t nbuf)1513*5113495bSYour Name static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
1514*5113495bSYour Name 				    qdf_nbuf_t nbuf)
1515*5113495bSYour Name {
1516*5113495bSYour Name 	qdf_nbuf_t cur_nbuf = nbuf;
1517*5113495bSYour Name 
1518*5113495bSYour Name 	do {
1519*5113495bSYour Name 		qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
1520*5113495bSYour Name 					     QDF_DMA_TO_DEVICE,
1521*5113495bSYour Name 					     cur_nbuf->len);
1522*5113495bSYour Name 		cur_nbuf = qdf_nbuf_next(cur_nbuf);
1523*5113495bSYour Name 	} while (cur_nbuf);
1524*5113495bSYour Name }
1525*5113495bSYour Name 
1526*5113495bSYour Name #ifdef VDEV_PEER_PROTOCOL_COUNT
dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev * vdev_hdl,qdf_nbuf_t nbuf)1527*5113495bSYour Name void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
1528*5113495bSYour Name 					       qdf_nbuf_t nbuf)
1529*5113495bSYour Name {
1530*5113495bSYour Name 	qdf_nbuf_t nbuf_local;
1531*5113495bSYour Name 	struct dp_vdev *vdev_local = vdev_hdl;
1532*5113495bSYour Name 
1533*5113495bSYour Name 	do {
1534*5113495bSYour Name 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
1535*5113495bSYour Name 			break;
1536*5113495bSYour Name 		nbuf_local = nbuf;
1537*5113495bSYour Name 		if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
1538*5113495bSYour Name 			 htt_cmn_pkt_type_raw))
1539*5113495bSYour Name 			break;
1540*5113495bSYour Name 		else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
1541*5113495bSYour Name 			break;
1542*5113495bSYour Name 		else if (qdf_nbuf_is_tso((nbuf_local)))
1543*5113495bSYour Name 			break;
1544*5113495bSYour Name 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
1545*5113495bSYour Name 						       (nbuf_local),
1546*5113495bSYour Name 						       NULL, 1, 0);
1547*5113495bSYour Name 	} while (0);
1548*5113495bSYour Name }
1549*5113495bSYour Name #endif
1550*5113495bSYour Name 
1551*5113495bSYour Name #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
dp_tx_update_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t ring_id)1552*5113495bSYour Name void dp_tx_update_stats(struct dp_soc *soc,
1553*5113495bSYour Name 			struct dp_tx_desc_s *tx_desc,
1554*5113495bSYour Name 			uint8_t ring_id)
1555*5113495bSYour Name {
1556*5113495bSYour Name 	uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
1557*5113495bSYour Name 
1558*5113495bSYour Name 	DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
1559*5113495bSYour Name }
1560*5113495bSYour Name 
1561*5113495bSYour Name int
dp_tx_attempt_coalescing(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint8_t tid,struct dp_tx_msdu_info_s * msdu_info,uint8_t ring_id)1562*5113495bSYour Name dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1563*5113495bSYour Name 			 struct dp_tx_desc_s *tx_desc,
1564*5113495bSYour Name 			 uint8_t tid,
1565*5113495bSYour Name 			 struct dp_tx_msdu_info_s *msdu_info,
1566*5113495bSYour Name 			 uint8_t ring_id)
1567*5113495bSYour Name {
1568*5113495bSYour Name 	struct dp_swlm *swlm = &soc->swlm;
1569*5113495bSYour Name 	union swlm_data swlm_query_data;
1570*5113495bSYour Name 	struct dp_swlm_tcl_data tcl_data;
1571*5113495bSYour Name 	QDF_STATUS status;
1572*5113495bSYour Name 	int ret;
1573*5113495bSYour Name 
1574*5113495bSYour Name 	if (!swlm->is_enabled)
1575*5113495bSYour Name 		return msdu_info->skip_hp_update;
1576*5113495bSYour Name 
1577*5113495bSYour Name 	tcl_data.nbuf = tx_desc->nbuf;
1578*5113495bSYour Name 	tcl_data.tid = tid;
1579*5113495bSYour Name 	tcl_data.ring_id = ring_id;
1580*5113495bSYour Name 	tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
1581*5113495bSYour Name 	tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
1582*5113495bSYour Name 	swlm_query_data.tcl_data = &tcl_data;
1583*5113495bSYour Name 
1584*5113495bSYour Name 	status = dp_swlm_tcl_pre_check(soc, &tcl_data);
1585*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1586*5113495bSYour Name 		dp_swlm_tcl_reset_session_data(soc, ring_id);
1587*5113495bSYour Name 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1588*5113495bSYour Name 		return 0;
1589*5113495bSYour Name 	}
1590*5113495bSYour Name 
1591*5113495bSYour Name 	ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
1592*5113495bSYour Name 	if (ret) {
1593*5113495bSYour Name 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
1594*5113495bSYour Name 	} else {
1595*5113495bSYour Name 		DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
1596*5113495bSYour Name 	}
1597*5113495bSYour Name 
1598*5113495bSYour Name 	return ret;
1599*5113495bSYour Name }
1600*5113495bSYour Name 
1601*5113495bSYour Name void
dp_tx_ring_access_end(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1602*5113495bSYour Name dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1603*5113495bSYour Name 		      int coalesce)
1604*5113495bSYour Name {
1605*5113495bSYour Name 	if (coalesce)
1606*5113495bSYour Name 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1607*5113495bSYour Name 	else
1608*5113495bSYour Name 		dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1609*5113495bSYour Name }
1610*5113495bSYour Name 
1611*5113495bSYour Name static inline void
dp_tx_is_hp_update_required(uint32_t i,struct dp_tx_msdu_info_s * msdu_info)1612*5113495bSYour Name dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1613*5113495bSYour Name {
1614*5113495bSYour Name 	if (((i + 1) < msdu_info->num_seg))
1615*5113495bSYour Name 		msdu_info->skip_hp_update = 1;
1616*5113495bSYour Name 	else
1617*5113495bSYour Name 		msdu_info->skip_hp_update = 0;
1618*5113495bSYour Name }
1619*5113495bSYour Name 
1620*5113495bSYour Name static inline void
dp_flush_tcp_hp(struct dp_soc * soc,uint8_t ring_id)1621*5113495bSYour Name dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
1622*5113495bSYour Name {
1623*5113495bSYour Name 	hal_ring_handle_t hal_ring_hdl =
1624*5113495bSYour Name 		dp_tx_get_hal_ring_hdl(soc, ring_id);
1625*5113495bSYour Name 
1626*5113495bSYour Name 	if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
1627*5113495bSYour Name 		dp_err("Fillmore: SRNG access start failed");
1628*5113495bSYour Name 		return;
1629*5113495bSYour Name 	}
1630*5113495bSYour Name 
1631*5113495bSYour Name 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
1632*5113495bSYour Name }
1633*5113495bSYour Name 
1634*5113495bSYour Name static inline void
dp_tx_check_and_flush_hp(struct dp_soc * soc,QDF_STATUS status,struct dp_tx_msdu_info_s * msdu_info)1635*5113495bSYour Name dp_tx_check_and_flush_hp(struct dp_soc *soc,
1636*5113495bSYour Name 			 QDF_STATUS status,
1637*5113495bSYour Name 			 struct dp_tx_msdu_info_s *msdu_info)
1638*5113495bSYour Name {
1639*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
1640*5113495bSYour Name 		dp_flush_tcp_hp(soc,
1641*5113495bSYour Name 			(msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
1642*5113495bSYour Name 	}
1643*5113495bSYour Name }
1644*5113495bSYour Name #else
1645*5113495bSYour Name static inline void
dp_tx_is_hp_update_required(uint32_t i,struct dp_tx_msdu_info_s * msdu_info)1646*5113495bSYour Name dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
1647*5113495bSYour Name {
1648*5113495bSYour Name }
1649*5113495bSYour Name 
1650*5113495bSYour Name static inline void
dp_tx_check_and_flush_hp(struct dp_soc * soc,QDF_STATUS status,struct dp_tx_msdu_info_s * msdu_info)1651*5113495bSYour Name dp_tx_check_and_flush_hp(struct dp_soc *soc,
1652*5113495bSYour Name 			 QDF_STATUS status,
1653*5113495bSYour Name 			 struct dp_tx_msdu_info_s *msdu_info)
1654*5113495bSYour Name {
1655*5113495bSYour Name }
1656*5113495bSYour Name #endif
1657*5113495bSYour Name 
1658*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
1659*5113495bSYour Name void
dp_tx_ring_access_end_wrapper(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1660*5113495bSYour Name dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1661*5113495bSYour Name 			      hal_ring_handle_t hal_ring_hdl,
1662*5113495bSYour Name 			      int coalesce)
1663*5113495bSYour Name {
1664*5113495bSYour Name 	int ret;
1665*5113495bSYour Name 
1666*5113495bSYour Name 	/*
1667*5113495bSYour Name 	 * Avoid runtime get and put APIs under high throughput scenarios.
1668*5113495bSYour Name 	 */
1669*5113495bSYour Name 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
1670*5113495bSYour Name 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1671*5113495bSYour Name 		return;
1672*5113495bSYour Name 	}
1673*5113495bSYour Name 
1674*5113495bSYour Name 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
1675*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(ret)) {
1676*5113495bSYour Name 		if (hif_system_pm_state_check(soc->hif_handle)) {
1677*5113495bSYour Name 			dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1678*5113495bSYour Name 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1679*5113495bSYour Name 			hal_srng_inc_flush_cnt(hal_ring_hdl);
1680*5113495bSYour Name 		} else {
1681*5113495bSYour Name 			dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1682*5113495bSYour Name 		}
1683*5113495bSYour Name 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
1684*5113495bSYour Name 	} else {
1685*5113495bSYour Name 		dp_runtime_get(soc);
1686*5113495bSYour Name 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1687*5113495bSYour Name 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1688*5113495bSYour Name 		qdf_atomic_inc(&soc->tx_pending_rtpm);
1689*5113495bSYour Name 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1690*5113495bSYour Name 		dp_runtime_put(soc);
1691*5113495bSYour Name 	}
1692*5113495bSYour Name }
1693*5113495bSYour Name #else
1694*5113495bSYour Name 
1695*5113495bSYour Name #ifdef DP_POWER_SAVE
1696*5113495bSYour Name void
dp_tx_ring_access_end_wrapper(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1697*5113495bSYour Name dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1698*5113495bSYour Name 			      hal_ring_handle_t hal_ring_hdl,
1699*5113495bSYour Name 			      int coalesce)
1700*5113495bSYour Name {
1701*5113495bSYour Name 	if (hif_system_pm_state_check(soc->hif_handle)) {
1702*5113495bSYour Name 		dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
1703*5113495bSYour Name 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
1704*5113495bSYour Name 		hal_srng_inc_flush_cnt(hal_ring_hdl);
1705*5113495bSYour Name 	} else {
1706*5113495bSYour Name 		dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1707*5113495bSYour Name 	}
1708*5113495bSYour Name }
1709*5113495bSYour Name #endif
1710*5113495bSYour Name #endif
1711*5113495bSYour Name 
1712*5113495bSYour Name /**
1713*5113495bSYour Name  * dp_tx_get_tid() - Obtain TID to be used for this frame
1714*5113495bSYour Name  * @vdev: DP vdev handle
1715*5113495bSYour Name  * @nbuf: skb
1716*5113495bSYour Name  * @msdu_info: msdu descriptor
1717*5113495bSYour Name  *
1718*5113495bSYour Name  * Extract the DSCP or PCP information from frame and map into TID value.
1719*5113495bSYour Name  *
1720*5113495bSYour Name  * Return: void
1721*5113495bSYour Name  */
dp_tx_get_tid(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)1722*5113495bSYour Name static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1723*5113495bSYour Name 			  struct dp_tx_msdu_info_s *msdu_info)
1724*5113495bSYour Name {
1725*5113495bSYour Name 	uint8_t tos = 0, dscp_tid_override = 0;
1726*5113495bSYour Name 	uint8_t *hdr_ptr, *L3datap;
1727*5113495bSYour Name 	uint8_t is_mcast = 0;
1728*5113495bSYour Name 	qdf_ether_header_t *eh = NULL;
1729*5113495bSYour Name 	qdf_ethervlan_header_t *evh = NULL;
1730*5113495bSYour Name 	uint16_t   ether_type;
1731*5113495bSYour Name 	qdf_llc_t *llcHdr;
1732*5113495bSYour Name 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
1733*5113495bSYour Name 
1734*5113495bSYour Name 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1735*5113495bSYour Name 	if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1736*5113495bSYour Name 		eh = (qdf_ether_header_t *)nbuf->data;
1737*5113495bSYour Name 		hdr_ptr = (uint8_t *)(eh->ether_dhost);
1738*5113495bSYour Name 		L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
1739*5113495bSYour Name 	} else {
1740*5113495bSYour Name 		qdf_dot3_qosframe_t *qos_wh =
1741*5113495bSYour Name 			(qdf_dot3_qosframe_t *) nbuf->data;
1742*5113495bSYour Name 		msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
1743*5113495bSYour Name 			qos_wh->i_qos[0] & DP_QOS_TID : 0;
1744*5113495bSYour Name 		return;
1745*5113495bSYour Name 	}
1746*5113495bSYour Name 
1747*5113495bSYour Name 	is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
1748*5113495bSYour Name 	ether_type = eh->ether_type;
1749*5113495bSYour Name 
1750*5113495bSYour Name 	llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
1751*5113495bSYour Name 	/*
1752*5113495bSYour Name 	 * Check if packet is dot3 or eth2 type.
1753*5113495bSYour Name 	 */
1754*5113495bSYour Name 	if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
1755*5113495bSYour Name 		ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
1756*5113495bSYour Name 				sizeof(*llcHdr));
1757*5113495bSYour Name 
1758*5113495bSYour Name 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1759*5113495bSYour Name 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
1760*5113495bSYour Name 				sizeof(*llcHdr);
1761*5113495bSYour Name 			ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
1762*5113495bSYour Name 					+ sizeof(*llcHdr) +
1763*5113495bSYour Name 					sizeof(qdf_net_vlanhdr_t));
1764*5113495bSYour Name 		} else {
1765*5113495bSYour Name 			L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
1766*5113495bSYour Name 				sizeof(*llcHdr);
1767*5113495bSYour Name 		}
1768*5113495bSYour Name 	} else {
1769*5113495bSYour Name 		if (ether_type == htons(ETHERTYPE_VLAN)) {
1770*5113495bSYour Name 			evh = (qdf_ethervlan_header_t *) eh;
1771*5113495bSYour Name 			ether_type = evh->ether_type;
1772*5113495bSYour Name 			L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
1773*5113495bSYour Name 		}
1774*5113495bSYour Name 	}
1775*5113495bSYour Name 
1776*5113495bSYour Name 	/*
1777*5113495bSYour Name 	 * Find priority from IP TOS DSCP field
1778*5113495bSYour Name 	 */
1779*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
1780*5113495bSYour Name 		qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
1781*5113495bSYour Name 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
1782*5113495bSYour Name 			/* Only for unicast frames */
1783*5113495bSYour Name 			if (!is_mcast) {
1784*5113495bSYour Name 				/* send it on VO queue */
1785*5113495bSYour Name 				msdu_info->tid = DP_VO_TID;
1786*5113495bSYour Name 			}
1787*5113495bSYour Name 		} else {
1788*5113495bSYour Name 			/*
1789*5113495bSYour Name 			 * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
1790*5113495bSYour Name 			 * from TOS byte.
1791*5113495bSYour Name 			 */
1792*5113495bSYour Name 			tos = ip->ip_tos;
1793*5113495bSYour Name 			dscp_tid_override = 1;
1794*5113495bSYour Name 
1795*5113495bSYour Name 		}
1796*5113495bSYour Name 	} else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
1797*5113495bSYour Name 		/* TODO
1798*5113495bSYour Name 		 * use flowlabel
1799*5113495bSYour Name 		 *igmpmld cases to be handled in phase 2
1800*5113495bSYour Name 		 */
1801*5113495bSYour Name 		unsigned long ver_pri_flowlabel;
1802*5113495bSYour Name 		unsigned long pri;
1803*5113495bSYour Name 		ver_pri_flowlabel = *(unsigned long *) L3datap;
1804*5113495bSYour Name 		pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
1805*5113495bSYour Name 			DP_IPV6_PRIORITY_SHIFT;
1806*5113495bSYour Name 		tos = pri;
1807*5113495bSYour Name 		dscp_tid_override = 1;
1808*5113495bSYour Name 	} else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1809*5113495bSYour Name 		msdu_info->tid = DP_VO_TID;
1810*5113495bSYour Name 	else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
1811*5113495bSYour Name 		/* Only for unicast frames */
1812*5113495bSYour Name 		if (!is_mcast) {
1813*5113495bSYour Name 			/* send ucast arp on VO queue */
1814*5113495bSYour Name 			msdu_info->tid = DP_VO_TID;
1815*5113495bSYour Name 		}
1816*5113495bSYour Name 	}
1817*5113495bSYour Name 
1818*5113495bSYour Name 	/*
1819*5113495bSYour Name 	 * Assign all MCAST packets to BE
1820*5113495bSYour Name 	 */
1821*5113495bSYour Name 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
1822*5113495bSYour Name 		if (is_mcast) {
1823*5113495bSYour Name 			tos = 0;
1824*5113495bSYour Name 			dscp_tid_override = 1;
1825*5113495bSYour Name 		}
1826*5113495bSYour Name 	}
1827*5113495bSYour Name 
1828*5113495bSYour Name 	if (dscp_tid_override == 1) {
1829*5113495bSYour Name 		tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
1830*5113495bSYour Name 		msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
1831*5113495bSYour Name 	}
1832*5113495bSYour Name 
1833*5113495bSYour Name 	if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
1834*5113495bSYour Name 		msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
1835*5113495bSYour Name 
1836*5113495bSYour Name 	return;
1837*5113495bSYour Name }
1838*5113495bSYour Name 
1839*5113495bSYour Name /**
1840*5113495bSYour Name  * dp_tx_classify_tid() - Obtain TID to be used for this frame
1841*5113495bSYour Name  * @vdev: DP vdev handle
1842*5113495bSYour Name  * @nbuf: skb
1843*5113495bSYour Name  * @msdu_info: msdu descriptor
1844*5113495bSYour Name  *
1845*5113495bSYour Name  * Software based TID classification is required when more than 2 DSCP-TID
1846*5113495bSYour Name  * mapping tables are needed.
1847*5113495bSYour Name  * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
1848*5113495bSYour Name  *
1849*5113495bSYour Name  * Return: void
1850*5113495bSYour Name  */
dp_tx_classify_tid(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)1851*5113495bSYour Name static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1852*5113495bSYour Name 				      struct dp_tx_msdu_info_s *msdu_info)
1853*5113495bSYour Name {
1854*5113495bSYour Name 	DP_TX_TID_OVERRIDE(msdu_info, nbuf);
1855*5113495bSYour Name 
1856*5113495bSYour Name 	/*
1857*5113495bSYour Name 	 * skip_sw_tid_classification flag will set in below cases-
1858*5113495bSYour Name 	 * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
1859*5113495bSYour Name 	 * 2. hlos_tid_override enabled for vdev
1860*5113495bSYour Name 	 * 3. mesh mode enabled for vdev
1861*5113495bSYour Name 	 */
1862*5113495bSYour Name 	if (qdf_likely(vdev->skip_sw_tid_classification)) {
1863*5113495bSYour Name 		/* Update tid in msdu_info from skb priority */
1864*5113495bSYour Name 		if (qdf_unlikely(vdev->skip_sw_tid_classification
1865*5113495bSYour Name 			& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1866*5113495bSYour Name 			uint32_t tid = qdf_nbuf_get_priority(nbuf);
1867*5113495bSYour Name 
1868*5113495bSYour Name 			if (tid == DP_TX_INVALID_QOS_TAG)
1869*5113495bSYour Name 				return;
1870*5113495bSYour Name 
1871*5113495bSYour Name 			msdu_info->tid = tid;
1872*5113495bSYour Name 			return;
1873*5113495bSYour Name 		}
1874*5113495bSYour Name 		return;
1875*5113495bSYour Name 	}
1876*5113495bSYour Name 
1877*5113495bSYour Name 	dp_tx_get_tid(vdev, nbuf, msdu_info);
1878*5113495bSYour Name }
1879*5113495bSYour Name 
1880*5113495bSYour Name #ifdef FEATURE_WLAN_TDLS
1881*5113495bSYour Name /**
1882*5113495bSYour Name  * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
1883*5113495bSYour Name  * @soc: datapath SOC
1884*5113495bSYour Name  * @vdev: datapath vdev
1885*5113495bSYour Name  * @tx_desc: TX descriptor
1886*5113495bSYour Name  *
1887*5113495bSYour Name  * Return: None
1888*5113495bSYour Name  */
dp_tx_update_tdls_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)1889*5113495bSYour Name static void dp_tx_update_tdls_flags(struct dp_soc *soc,
1890*5113495bSYour Name 				    struct dp_vdev *vdev,
1891*5113495bSYour Name 				    struct dp_tx_desc_s *tx_desc)
1892*5113495bSYour Name {
1893*5113495bSYour Name 	if (vdev) {
1894*5113495bSYour Name 		if (vdev->is_tdls_frame) {
1895*5113495bSYour Name 			tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
1896*5113495bSYour Name 			vdev->is_tdls_frame = false;
1897*5113495bSYour Name 		}
1898*5113495bSYour Name 	}
1899*5113495bSYour Name }
1900*5113495bSYour Name 
dp_htt_tx_comp_get_status(struct dp_soc * soc,char * htt_desc)1901*5113495bSYour Name static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
1902*5113495bSYour Name {
1903*5113495bSYour Name 	uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
1904*5113495bSYour Name 
1905*5113495bSYour Name 	switch (soc->arch_id) {
1906*5113495bSYour Name 	case CDP_ARCH_TYPE_LI:
1907*5113495bSYour Name 		tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
1908*5113495bSYour Name 		break;
1909*5113495bSYour Name 
1910*5113495bSYour Name 	case CDP_ARCH_TYPE_BE:
1911*5113495bSYour Name 		tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
1912*5113495bSYour Name 		break;
1913*5113495bSYour Name 
1914*5113495bSYour Name 	case CDP_ARCH_TYPE_RH:
1915*5113495bSYour Name 		{
1916*5113495bSYour Name 			uint32_t *msg_word = (uint32_t *)htt_desc;
1917*5113495bSYour Name 
1918*5113495bSYour Name 			tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(
1919*5113495bSYour Name 							*(msg_word + 3));
1920*5113495bSYour Name 		}
1921*5113495bSYour Name 		break;
1922*5113495bSYour Name 	default:
1923*5113495bSYour Name 		dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
1924*5113495bSYour Name 		QDF_BUG(0);
1925*5113495bSYour Name 	}
1926*5113495bSYour Name 
1927*5113495bSYour Name 	return tx_status;
1928*5113495bSYour Name }
1929*5113495bSYour Name 
1930*5113495bSYour Name /**
1931*5113495bSYour Name  * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
1932*5113495bSYour Name  * @soc: dp_soc handle
1933*5113495bSYour Name  * @tx_desc: TX descriptor
1934*5113495bSYour Name  *
1935*5113495bSYour Name  * Return: None
1936*5113495bSYour Name  */
dp_non_std_htt_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)1937*5113495bSYour Name static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
1938*5113495bSYour Name 					 struct dp_tx_desc_s *tx_desc)
1939*5113495bSYour Name {
1940*5113495bSYour Name 	uint8_t tx_status = 0;
1941*5113495bSYour Name 	uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
1942*5113495bSYour Name 
1943*5113495bSYour Name 	qdf_nbuf_t nbuf = tx_desc->nbuf;
1944*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
1945*5113495bSYour Name 						     DP_MOD_ID_TDLS);
1946*5113495bSYour Name 
1947*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
1948*5113495bSYour Name 		dp_err_rl("vdev is null!");
1949*5113495bSYour Name 		goto error;
1950*5113495bSYour Name 	}
1951*5113495bSYour Name 
1952*5113495bSYour Name 	hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
1953*5113495bSYour Name 	tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
1954*5113495bSYour Name 	dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
1955*5113495bSYour Name 
1956*5113495bSYour Name 	if (vdev->tx_non_std_data_callback.func) {
1957*5113495bSYour Name 		qdf_nbuf_set_next(nbuf, NULL);
1958*5113495bSYour Name 		vdev->tx_non_std_data_callback.func(
1959*5113495bSYour Name 				vdev->tx_non_std_data_callback.ctxt,
1960*5113495bSYour Name 				nbuf, tx_status);
1961*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1962*5113495bSYour Name 		return;
1963*5113495bSYour Name 	} else {
1964*5113495bSYour Name 		dp_err_rl("callback func is null");
1965*5113495bSYour Name 	}
1966*5113495bSYour Name 
1967*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
1968*5113495bSYour Name error:
1969*5113495bSYour Name 	qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
1970*5113495bSYour Name 	qdf_nbuf_free(nbuf);
1971*5113495bSYour Name }
1972*5113495bSYour Name 
1973*5113495bSYour Name /**
1974*5113495bSYour Name  * dp_tx_msdu_single_map() - do nbuf map
1975*5113495bSYour Name  * @vdev: DP vdev handle
1976*5113495bSYour Name  * @tx_desc: DP TX descriptor pointer
1977*5113495bSYour Name  * @nbuf: skb pointer
1978*5113495bSYour Name  *
1979*5113495bSYour Name  * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
1980*5113495bSYour Name  * operation done in other component.
1981*5113495bSYour Name  *
1982*5113495bSYour Name  * Return: QDF_STATUS
1983*5113495bSYour Name  */
dp_tx_msdu_single_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)1984*5113495bSYour Name static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
1985*5113495bSYour Name 					       struct dp_tx_desc_s *tx_desc,
1986*5113495bSYour Name 					       qdf_nbuf_t nbuf)
1987*5113495bSYour Name {
1988*5113495bSYour Name 	if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
1989*5113495bSYour Name 		return qdf_nbuf_map_nbytes_single(vdev->osdev,
1990*5113495bSYour Name 						  nbuf,
1991*5113495bSYour Name 						  QDF_DMA_TO_DEVICE,
1992*5113495bSYour Name 						  nbuf->len);
1993*5113495bSYour Name 	else
1994*5113495bSYour Name 		return qdf_nbuf_map_single(vdev->osdev, nbuf,
1995*5113495bSYour Name 					   QDF_DMA_TO_DEVICE);
1996*5113495bSYour Name }
1997*5113495bSYour Name #else
dp_tx_update_tdls_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)1998*5113495bSYour Name static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
1999*5113495bSYour Name 					   struct dp_vdev *vdev,
2000*5113495bSYour Name 					   struct dp_tx_desc_s *tx_desc)
2001*5113495bSYour Name {
2002*5113495bSYour Name }
2003*5113495bSYour Name 
dp_non_std_htt_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)2004*5113495bSYour Name static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
2005*5113495bSYour Name 						struct dp_tx_desc_s *tx_desc)
2006*5113495bSYour Name {
2007*5113495bSYour Name }
2008*5113495bSYour Name 
dp_tx_msdu_single_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2009*5113495bSYour Name static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
2010*5113495bSYour Name 					       struct dp_tx_desc_s *tx_desc,
2011*5113495bSYour Name 					       qdf_nbuf_t nbuf)
2012*5113495bSYour Name {
2013*5113495bSYour Name 	return qdf_nbuf_map_nbytes_single(vdev->osdev,
2014*5113495bSYour Name 					  nbuf,
2015*5113495bSYour Name 					  QDF_DMA_TO_DEVICE,
2016*5113495bSYour Name 					  nbuf->len);
2017*5113495bSYour Name }
2018*5113495bSYour Name #endif
2019*5113495bSYour Name 
2020*5113495bSYour Name static inline
dp_tx_nbuf_map_regular(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2021*5113495bSYour Name qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
2022*5113495bSYour Name 				      struct dp_tx_desc_s *tx_desc,
2023*5113495bSYour Name 				      qdf_nbuf_t nbuf)
2024*5113495bSYour Name {
2025*5113495bSYour Name 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2026*5113495bSYour Name 
2027*5113495bSYour Name 	ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
2028*5113495bSYour Name 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
2029*5113495bSYour Name 		return 0;
2030*5113495bSYour Name 
2031*5113495bSYour Name 	return qdf_nbuf_mapped_paddr_get(nbuf);
2032*5113495bSYour Name }
2033*5113495bSYour Name 
2034*5113495bSYour Name static inline
dp_tx_nbuf_unmap_regular(struct dp_soc * soc,struct dp_tx_desc_s * desc)2035*5113495bSYour Name void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2036*5113495bSYour Name {
2037*5113495bSYour Name 	qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
2038*5113495bSYour Name 					   desc->nbuf,
2039*5113495bSYour Name 					   desc->dma_addr,
2040*5113495bSYour Name 					   QDF_DMA_TO_DEVICE,
2041*5113495bSYour Name 					   desc->length);
2042*5113495bSYour Name }
2043*5113495bSYour Name 
2044*5113495bSYour Name #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
2045*5113495bSYour Name static inline bool
is_nbuf_frm_rmnet(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)2046*5113495bSYour Name is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2047*5113495bSYour Name {
2048*5113495bSYour Name 	struct net_device *ingress_dev;
2049*5113495bSYour Name 	skb_frag_t *frag;
2050*5113495bSYour Name 	uint16_t buf_len = 0;
2051*5113495bSYour Name 	uint16_t linear_data_len = 0;
2052*5113495bSYour Name 	uint8_t *payload_addr = NULL;
2053*5113495bSYour Name 
2054*5113495bSYour Name 	ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
2055*5113495bSYour Name 
2056*5113495bSYour Name 	if (!ingress_dev)
2057*5113495bSYour Name 		return false;
2058*5113495bSYour Name 
2059*5113495bSYour Name 	if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
2060*5113495bSYour Name 		qdf_net_if_release_dev((struct qdf_net_if *)ingress_dev);
2061*5113495bSYour Name 		frag = &(skb_shinfo(nbuf)->frags[0]);
2062*5113495bSYour Name 		buf_len = skb_frag_size(frag);
2063*5113495bSYour Name 		payload_addr = (uint8_t *)skb_frag_address(frag);
2064*5113495bSYour Name 		linear_data_len = skb_headlen(nbuf);
2065*5113495bSYour Name 
2066*5113495bSYour Name 		buf_len += linear_data_len;
2067*5113495bSYour Name 		payload_addr = payload_addr - linear_data_len;
2068*5113495bSYour Name 		memcpy(payload_addr, nbuf->data, linear_data_len);
2069*5113495bSYour Name 
2070*5113495bSYour Name 		msdu_info->frm_type = dp_tx_frm_rmnet;
2071*5113495bSYour Name 		msdu_info->buf_len = buf_len;
2072*5113495bSYour Name 		msdu_info->payload_addr = payload_addr;
2073*5113495bSYour Name 
2074*5113495bSYour Name 		return true;
2075*5113495bSYour Name 	}
2076*5113495bSYour Name 	qdf_net_if_release_dev((struct qdf_net_if *)ingress_dev);
2077*5113495bSYour Name 	return false;
2078*5113495bSYour Name }
2079*5113495bSYour Name 
2080*5113495bSYour Name static inline
dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)2081*5113495bSYour Name qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2082*5113495bSYour Name 				    struct dp_tx_desc_s *tx_desc)
2083*5113495bSYour Name {
2084*5113495bSYour Name 	qdf_dma_addr_t paddr;
2085*5113495bSYour Name 
2086*5113495bSYour Name 	paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
2087*5113495bSYour Name 	tx_desc->length  = msdu_info->buf_len;
2088*5113495bSYour Name 
2089*5113495bSYour Name 	qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
2090*5113495bSYour Name 				 (void *)(msdu_info->payload_addr +
2091*5113495bSYour Name 					  msdu_info->buf_len));
2092*5113495bSYour Name 
2093*5113495bSYour Name 	tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
2094*5113495bSYour Name 	return paddr;
2095*5113495bSYour Name }
2096*5113495bSYour Name #else
2097*5113495bSYour Name static inline bool
is_nbuf_frm_rmnet(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)2098*5113495bSYour Name is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
2099*5113495bSYour Name {
2100*5113495bSYour Name 	return false;
2101*5113495bSYour Name }
2102*5113495bSYour Name 
2103*5113495bSYour Name static inline
dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)2104*5113495bSYour Name qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
2105*5113495bSYour Name 				    struct dp_tx_desc_s *tx_desc)
2106*5113495bSYour Name {
2107*5113495bSYour Name 	return 0;
2108*5113495bSYour Name }
2109*5113495bSYour Name #endif
2110*5113495bSYour Name 
2111*5113495bSYour Name #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2112*5113495bSYour Name static inline
dp_tx_nbuf_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2113*5113495bSYour Name qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2114*5113495bSYour Name 			      struct dp_tx_desc_s *tx_desc,
2115*5113495bSYour Name 			      qdf_nbuf_t nbuf)
2116*5113495bSYour Name {
2117*5113495bSYour Name 	if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_FAST)) {
2118*5113495bSYour Name 		qdf_nbuf_dma_clean_range((void *)nbuf->data,
2119*5113495bSYour Name 					 (void *)(nbuf->data + nbuf->len));
2120*5113495bSYour Name 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2121*5113495bSYour Name 	} else {
2122*5113495bSYour Name 		return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2123*5113495bSYour Name 	}
2124*5113495bSYour Name }
2125*5113495bSYour Name 
2126*5113495bSYour Name static inline
dp_tx_nbuf_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2127*5113495bSYour Name void dp_tx_nbuf_unmap(struct dp_soc *soc,
2128*5113495bSYour Name 		      struct dp_tx_desc_s *desc)
2129*5113495bSYour Name {
2130*5113495bSYour Name 	if (qdf_unlikely(!(desc->flags &
2131*5113495bSYour Name 			   (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
2132*5113495bSYour Name 		return dp_tx_nbuf_unmap_regular(soc, desc);
2133*5113495bSYour Name }
2134*5113495bSYour Name #else
2135*5113495bSYour Name static inline
dp_tx_nbuf_map(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)2136*5113495bSYour Name qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
2137*5113495bSYour Name 			      struct dp_tx_desc_s *tx_desc,
2138*5113495bSYour Name 			      qdf_nbuf_t nbuf)
2139*5113495bSYour Name {
2140*5113495bSYour Name 	return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
2141*5113495bSYour Name }
2142*5113495bSYour Name 
2143*5113495bSYour Name static inline
dp_tx_nbuf_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2144*5113495bSYour Name void dp_tx_nbuf_unmap(struct dp_soc *soc,
2145*5113495bSYour Name 		      struct dp_tx_desc_s *desc)
2146*5113495bSYour Name {
2147*5113495bSYour Name 	return dp_tx_nbuf_unmap_regular(soc, desc);
2148*5113495bSYour Name }
2149*5113495bSYour Name #endif
2150*5113495bSYour Name 
2151*5113495bSYour Name #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
2152*5113495bSYour Name static inline
dp_tx_enh_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2153*5113495bSYour Name void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2154*5113495bSYour Name {
2155*5113495bSYour Name 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE))) {
2156*5113495bSYour Name 		dp_tx_nbuf_unmap(soc, desc);
2157*5113495bSYour Name 		desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
2158*5113495bSYour Name 	}
2159*5113495bSYour Name }
2160*5113495bSYour Name 
dp_tx_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2161*5113495bSYour Name static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2162*5113495bSYour Name {
2163*5113495bSYour Name 	if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
2164*5113495bSYour Name 		dp_tx_nbuf_unmap(soc, desc);
2165*5113495bSYour Name }
2166*5113495bSYour Name #else
2167*5113495bSYour Name static inline
dp_tx_enh_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2168*5113495bSYour Name void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2169*5113495bSYour Name {
2170*5113495bSYour Name }
2171*5113495bSYour Name 
dp_tx_unmap(struct dp_soc * soc,struct dp_tx_desc_s * desc)2172*5113495bSYour Name static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
2173*5113495bSYour Name {
2174*5113495bSYour Name 	dp_tx_nbuf_unmap(soc, desc);
2175*5113495bSYour Name }
2176*5113495bSYour Name #endif
2177*5113495bSYour Name 
2178*5113495bSYour Name #ifdef MESH_MODE_SUPPORT
2179*5113495bSYour Name /**
2180*5113495bSYour Name  * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
2181*5113495bSYour Name  * @soc: datapath SOC
2182*5113495bSYour Name  * @vdev: datapath vdev
2183*5113495bSYour Name  * @tx_desc: TX descriptor
2184*5113495bSYour Name  *
2185*5113495bSYour Name  * Return: None
2186*5113495bSYour Name  */
dp_tx_update_mesh_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)2187*5113495bSYour Name static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2188*5113495bSYour Name 					   struct dp_vdev *vdev,
2189*5113495bSYour Name 					   struct dp_tx_desc_s *tx_desc)
2190*5113495bSYour Name {
2191*5113495bSYour Name 	if (qdf_unlikely(vdev->mesh_vdev))
2192*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
2193*5113495bSYour Name }
2194*5113495bSYour Name 
2195*5113495bSYour Name /**
2196*5113495bSYour Name  * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
2197*5113495bSYour Name  * @soc: dp_soc handle
2198*5113495bSYour Name  * @tx_desc: TX descriptor
2199*5113495bSYour Name  * @delayed_free: delay the nbuf free
2200*5113495bSYour Name  *
2201*5113495bSYour Name  * Return: nbuf to be freed late
2202*5113495bSYour Name  */
dp_mesh_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,bool delayed_free)2203*5113495bSYour Name static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2204*5113495bSYour Name 						   struct dp_tx_desc_s *tx_desc,
2205*5113495bSYour Name 						   bool delayed_free)
2206*5113495bSYour Name {
2207*5113495bSYour Name 	qdf_nbuf_t nbuf = tx_desc->nbuf;
2208*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
2209*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
2210*5113495bSYour Name 
2211*5113495bSYour Name 	vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
2212*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
2213*5113495bSYour Name 		if (vdev)
2214*5113495bSYour Name 			DP_STATS_INC(vdev,
2215*5113495bSYour Name 				     tx_i[xmit_type].mesh.completion_fw, 1);
2216*5113495bSYour Name 
2217*5113495bSYour Name 		if (delayed_free)
2218*5113495bSYour Name 			return nbuf;
2219*5113495bSYour Name 
2220*5113495bSYour Name 		qdf_nbuf_free(nbuf);
2221*5113495bSYour Name 	} else {
2222*5113495bSYour Name 		if (vdev && vdev->osif_tx_free_ext) {
2223*5113495bSYour Name 			vdev->osif_tx_free_ext((nbuf));
2224*5113495bSYour Name 		} else {
2225*5113495bSYour Name 			if (delayed_free)
2226*5113495bSYour Name 				return nbuf;
2227*5113495bSYour Name 
2228*5113495bSYour Name 			qdf_nbuf_free(nbuf);
2229*5113495bSYour Name 		}
2230*5113495bSYour Name 	}
2231*5113495bSYour Name 
2232*5113495bSYour Name 	if (vdev)
2233*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
2234*5113495bSYour Name 
2235*5113495bSYour Name 	return NULL;
2236*5113495bSYour Name }
2237*5113495bSYour Name #else
dp_tx_update_mesh_flags(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)2238*5113495bSYour Name static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
2239*5113495bSYour Name 					   struct dp_vdev *vdev,
2240*5113495bSYour Name 					   struct dp_tx_desc_s *tx_desc)
2241*5113495bSYour Name {
2242*5113495bSYour Name }
2243*5113495bSYour Name 
dp_mesh_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,bool delayed_free)2244*5113495bSYour Name static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
2245*5113495bSYour Name 						   struct dp_tx_desc_s *tx_desc,
2246*5113495bSYour Name 						   bool delayed_free)
2247*5113495bSYour Name {
2248*5113495bSYour Name 	return NULL;
2249*5113495bSYour Name }
2250*5113495bSYour Name #endif
2251*5113495bSYour Name 
dp_tx_frame_is_drop(struct dp_vdev * vdev,uint8_t * srcmac,uint8_t * dstmac)2252*5113495bSYour Name int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
2253*5113495bSYour Name {
2254*5113495bSYour Name 	struct dp_pdev *pdev = NULL;
2255*5113495bSYour Name 	struct dp_ast_entry *src_ast_entry = NULL;
2256*5113495bSYour Name 	struct dp_ast_entry *dst_ast_entry = NULL;
2257*5113495bSYour Name 	struct dp_soc *soc = NULL;
2258*5113495bSYour Name 
2259*5113495bSYour Name 	qdf_assert(vdev);
2260*5113495bSYour Name 	pdev = vdev->pdev;
2261*5113495bSYour Name 	qdf_assert(pdev);
2262*5113495bSYour Name 	soc = pdev->soc;
2263*5113495bSYour Name 
2264*5113495bSYour Name 	dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
2265*5113495bSYour Name 				(soc, dstmac, vdev->pdev->pdev_id);
2266*5113495bSYour Name 
2267*5113495bSYour Name 	src_ast_entry = dp_peer_ast_hash_find_by_pdevid
2268*5113495bSYour Name 				(soc, srcmac, vdev->pdev->pdev_id);
2269*5113495bSYour Name 	if (dst_ast_entry && src_ast_entry) {
2270*5113495bSYour Name 		if (dst_ast_entry->peer_id ==
2271*5113495bSYour Name 				src_ast_entry->peer_id)
2272*5113495bSYour Name 			return 1;
2273*5113495bSYour Name 	}
2274*5113495bSYour Name 
2275*5113495bSYour Name 	return 0;
2276*5113495bSYour Name }
2277*5113495bSYour Name 
2278*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
2279*5113495bSYour Name 	defined(WLAN_MCAST_MLO)
2280*5113495bSYour Name /* MLO peer id for reinject*/
2281*5113495bSYour Name #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
2282*5113495bSYour Name /* MLO vdev id inc offset */
2283*5113495bSYour Name #define DP_MLO_VDEV_ID_OFFSET 0x80
2284*5113495bSYour Name 
2285*5113495bSYour Name #ifdef QCA_SUPPORT_WDS_EXTENDED
2286*5113495bSYour Name static inline bool
dp_tx_wds_ext_check(struct cdp_tx_exception_metadata * tx_exc_metadata)2287*5113495bSYour Name dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2288*5113495bSYour Name {
2289*5113495bSYour Name 	if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
2290*5113495bSYour Name 		return true;
2291*5113495bSYour Name 
2292*5113495bSYour Name 	return false;
2293*5113495bSYour Name }
2294*5113495bSYour Name #else
2295*5113495bSYour Name static inline bool
dp_tx_wds_ext_check(struct cdp_tx_exception_metadata * tx_exc_metadata)2296*5113495bSYour Name dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
2297*5113495bSYour Name {
2298*5113495bSYour Name 	return false;
2299*5113495bSYour Name }
2300*5113495bSYour Name #endif
2301*5113495bSYour Name 
2302*5113495bSYour Name static inline void
dp_tx_bypass_reinjection(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,struct cdp_tx_exception_metadata * tx_exc_metadata)2303*5113495bSYour Name dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2304*5113495bSYour Name 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2305*5113495bSYour Name {
2306*5113495bSYour Name 	/* wds ext enabled will not set the TO_FW bit */
2307*5113495bSYour Name 	if (dp_tx_wds_ext_check(tx_exc_metadata))
2308*5113495bSYour Name 		return;
2309*5113495bSYour Name 
2310*5113495bSYour Name 	if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
2311*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
2312*5113495bSYour Name 		qdf_atomic_inc(&soc->num_tx_exception);
2313*5113495bSYour Name 	}
2314*5113495bSYour Name }
2315*5113495bSYour Name 
2316*5113495bSYour Name static inline void
dp_tx_update_mcast_param(uint16_t peer_id,uint16_t * htt_tcl_metadata,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)2317*5113495bSYour Name dp_tx_update_mcast_param(uint16_t peer_id,
2318*5113495bSYour Name 			 uint16_t *htt_tcl_metadata,
2319*5113495bSYour Name 			 struct dp_vdev *vdev,
2320*5113495bSYour Name 			 struct dp_tx_msdu_info_s *msdu_info)
2321*5113495bSYour Name {
2322*5113495bSYour Name 	if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
2323*5113495bSYour Name 		*htt_tcl_metadata = 0;
2324*5113495bSYour Name 		DP_TX_TCL_METADATA_TYPE_SET(
2325*5113495bSYour Name 				*htt_tcl_metadata,
2326*5113495bSYour Name 				HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
2327*5113495bSYour Name 		HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
2328*5113495bSYour Name 						    msdu_info->gsn);
2329*5113495bSYour Name 
2330*5113495bSYour Name 		msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
2331*5113495bSYour Name 		HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
2332*5113495bSYour Name 							*htt_tcl_metadata, 1);
2333*5113495bSYour Name 	} else {
2334*5113495bSYour Name 		msdu_info->vdev_id = vdev->vdev_id;
2335*5113495bSYour Name 	}
2336*5113495bSYour Name }
2337*5113495bSYour Name #else
2338*5113495bSYour Name static inline void
dp_tx_bypass_reinjection(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,struct cdp_tx_exception_metadata * tx_exc_metadata)2339*5113495bSYour Name dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
2340*5113495bSYour Name 			 struct cdp_tx_exception_metadata *tx_exc_metadata)
2341*5113495bSYour Name {
2342*5113495bSYour Name }
2343*5113495bSYour Name 
2344*5113495bSYour Name static inline void
dp_tx_update_mcast_param(uint16_t peer_id,uint16_t * htt_tcl_metadata,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)2345*5113495bSYour Name dp_tx_update_mcast_param(uint16_t peer_id,
2346*5113495bSYour Name 			 uint16_t *htt_tcl_metadata,
2347*5113495bSYour Name 			 struct dp_vdev *vdev,
2348*5113495bSYour Name 			 struct dp_tx_msdu_info_s *msdu_info)
2349*5113495bSYour Name {
2350*5113495bSYour Name }
2351*5113495bSYour Name #endif
2352*5113495bSYour Name 
2353*5113495bSYour Name #ifdef DP_TX_SW_DROP_STATS_INC
tx_sw_drop_stats_inc(struct dp_pdev * pdev,qdf_nbuf_t nbuf,enum cdp_tx_sw_drop drop_code)2354*5113495bSYour Name static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2355*5113495bSYour Name 				 qdf_nbuf_t nbuf,
2356*5113495bSYour Name 				 enum cdp_tx_sw_drop drop_code)
2357*5113495bSYour Name {
2358*5113495bSYour Name 	/* EAPOL Drop stats */
2359*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
2360*5113495bSYour Name 		switch (drop_code) {
2361*5113495bSYour Name 		case TX_DESC_ERR:
2362*5113495bSYour Name 			DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
2363*5113495bSYour Name 			break;
2364*5113495bSYour Name 		case TX_HAL_RING_ACCESS_ERR:
2365*5113495bSYour Name 			DP_STATS_INC(pdev,
2366*5113495bSYour Name 				     eap_drop_stats.tx_hal_ring_access_err, 1);
2367*5113495bSYour Name 			break;
2368*5113495bSYour Name 		case TX_DMA_MAP_ERR:
2369*5113495bSYour Name 			DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
2370*5113495bSYour Name 			break;
2371*5113495bSYour Name 		case TX_HW_ENQUEUE:
2372*5113495bSYour Name 			DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
2373*5113495bSYour Name 			break;
2374*5113495bSYour Name 		case TX_SW_ENQUEUE:
2375*5113495bSYour Name 			DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
2376*5113495bSYour Name 			break;
2377*5113495bSYour Name 		default:
2378*5113495bSYour Name 			dp_info_rl("Invalid eapol_drop code: %d", drop_code);
2379*5113495bSYour Name 			break;
2380*5113495bSYour Name 		}
2381*5113495bSYour Name 	}
2382*5113495bSYour Name }
2383*5113495bSYour Name #else
tx_sw_drop_stats_inc(struct dp_pdev * pdev,qdf_nbuf_t nbuf,enum cdp_tx_sw_drop drop_code)2384*5113495bSYour Name static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
2385*5113495bSYour Name 				 qdf_nbuf_t nbuf,
2386*5113495bSYour Name 				 enum cdp_tx_sw_drop drop_code)
2387*5113495bSYour Name {
2388*5113495bSYour Name }
2389*5113495bSYour Name #endif
2390*5113495bSYour Name 
2391*5113495bSYour Name #ifdef WLAN_FEATURE_TX_LATENCY_STATS
2392*5113495bSYour Name /**
2393*5113495bSYour Name  * dp_tx_latency_stats_enabled() - check enablement of transmit latency
2394*5113495bSYour Name  * statistics
2395*5113495bSYour Name  * @vdev: DP vdev handle
2396*5113495bSYour Name  *
2397*5113495bSYour Name  * Return: true if transmit latency statistics is enabled, false otherwise.
2398*5113495bSYour Name  */
dp_tx_latency_stats_enabled(struct dp_vdev * vdev)2399*5113495bSYour Name static inline bool dp_tx_latency_stats_enabled(struct dp_vdev *vdev)
2400*5113495bSYour Name {
2401*5113495bSYour Name 	return qdf_atomic_read(&vdev->tx_latency_cfg.enabled);
2402*5113495bSYour Name }
2403*5113495bSYour Name 
2404*5113495bSYour Name /**
2405*5113495bSYour Name  * dp_tx_latency_stats_report_enabled() - check enablement of async report
2406*5113495bSYour Name  * for transmit latency statistics
2407*5113495bSYour Name  * @vdev: DP vdev handle
2408*5113495bSYour Name  *
2409*5113495bSYour Name  * Return: true if transmit latency statistics is enabled, false otherwise.
2410*5113495bSYour Name  */
dp_tx_latency_stats_report_enabled(struct dp_vdev * vdev)2411*5113495bSYour Name static inline bool dp_tx_latency_stats_report_enabled(struct dp_vdev *vdev)
2412*5113495bSYour Name {
2413*5113495bSYour Name 	return qdf_atomic_read(&vdev->tx_latency_cfg.report);
2414*5113495bSYour Name }
2415*5113495bSYour Name 
2416*5113495bSYour Name /**
2417*5113495bSYour Name  * dp_tx_get_driver_ingress_ts() - get driver ingress timestamp from nbuf
2418*5113495bSYour Name  * @vdev: DP vdev handle
2419*5113495bSYour Name  * @msdu_info: pointer to MSDU Descriptor
2420*5113495bSYour Name  * @nbuf: original buffer from network stack
2421*5113495bSYour Name  *
2422*5113495bSYour Name  * Return: None
2423*5113495bSYour Name  */
2424*5113495bSYour Name static inline void
dp_tx_get_driver_ingress_ts(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,qdf_nbuf_t nbuf)2425*5113495bSYour Name dp_tx_get_driver_ingress_ts(struct dp_vdev *vdev,
2426*5113495bSYour Name 			    struct dp_tx_msdu_info_s *msdu_info,
2427*5113495bSYour Name 			    qdf_nbuf_t nbuf)
2428*5113495bSYour Name {
2429*5113495bSYour Name 	if (!dp_tx_latency_stats_enabled(vdev))
2430*5113495bSYour Name 		return;
2431*5113495bSYour Name 
2432*5113495bSYour Name 	msdu_info->driver_ingress_ts = qdf_nbuf_get_tx_ts(nbuf, true);
2433*5113495bSYour Name }
2434*5113495bSYour Name 
2435*5113495bSYour Name /**
2436*5113495bSYour Name  * dp_tx_update_ts_on_enqueued() - set driver ingress/egress timestamp in
2437*5113495bSYour Name  * tx descriptor
2438*5113495bSYour Name  * @vdev: DP vdev handle
2439*5113495bSYour Name  * @msdu_info: pointer to MSDU Descriptor
2440*5113495bSYour Name  * @tx_desc: pointer to tx descriptor
2441*5113495bSYour Name  *
2442*5113495bSYour Name  * Return: None
2443*5113495bSYour Name  */
2444*5113495bSYour Name static inline void
dp_tx_update_ts_on_enqueued(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)2445*5113495bSYour Name dp_tx_update_ts_on_enqueued(struct dp_vdev *vdev,
2446*5113495bSYour Name 			    struct dp_tx_msdu_info_s *msdu_info,
2447*5113495bSYour Name 			    struct dp_tx_desc_s *tx_desc)
2448*5113495bSYour Name {
2449*5113495bSYour Name 	if (!dp_tx_latency_stats_enabled(vdev))
2450*5113495bSYour Name 		return;
2451*5113495bSYour Name 
2452*5113495bSYour Name 	tx_desc->driver_ingress_ts = msdu_info->driver_ingress_ts;
2453*5113495bSYour Name 	tx_desc->driver_egress_ts = qdf_ktime_real_get();
2454*5113495bSYour Name }
2455*5113495bSYour Name 
2456*5113495bSYour Name /**
2457*5113495bSYour Name  * dp_tx_latency_stats_update_bucket() - update transmit latency statistics
2458*5113495bSYour Name  * for specified type
2459*5113495bSYour Name  * @vdev: DP vdev handle
2460*5113495bSYour Name  * @tx_latency: pointer to transmit latency stats
2461*5113495bSYour Name  * @idx: index of the statistics
2462*5113495bSYour Name  * @type: transmit latency type
2463*5113495bSYour Name  * @value: latency to be recorded
2464*5113495bSYour Name  *
2465*5113495bSYour Name  * Return: None
2466*5113495bSYour Name  */
2467*5113495bSYour Name static inline void
dp_tx_latency_stats_update_bucket(struct dp_vdev * vdev,struct dp_tx_latency * tx_latency,int idx,enum cdp_tx_latency_type type,uint32_t value)2468*5113495bSYour Name dp_tx_latency_stats_update_bucket(struct dp_vdev *vdev,
2469*5113495bSYour Name 				  struct dp_tx_latency *tx_latency,
2470*5113495bSYour Name 				  int idx, enum cdp_tx_latency_type type,
2471*5113495bSYour Name 				  uint32_t value)
2472*5113495bSYour Name {
2473*5113495bSYour Name 	int32_t granularity;
2474*5113495bSYour Name 	int lvl;
2475*5113495bSYour Name 
2476*5113495bSYour Name 	granularity =
2477*5113495bSYour Name 		qdf_atomic_read(&vdev->tx_latency_cfg.granularity[type]);
2478*5113495bSYour Name 	if (qdf_unlikely(!granularity))
2479*5113495bSYour Name 		return;
2480*5113495bSYour Name 
2481*5113495bSYour Name 	lvl = value / granularity;
2482*5113495bSYour Name 	if (lvl >= CDP_TX_LATENCY_DISTR_LV_MAX)
2483*5113495bSYour Name 		lvl = CDP_TX_LATENCY_DISTR_LV_MAX - 1;
2484*5113495bSYour Name 
2485*5113495bSYour Name 	qdf_atomic_inc(&tx_latency->stats[idx][type].msdus_accum);
2486*5113495bSYour Name 	qdf_atomic_add(value, &tx_latency->stats[idx][type].latency_accum);
2487*5113495bSYour Name 	qdf_atomic_inc(&tx_latency->stats[idx][type].distribution[lvl]);
2488*5113495bSYour Name }
2489*5113495bSYour Name 
2490*5113495bSYour Name /**
2491*5113495bSYour Name  * dp_tx_latency_stats_update() - update transmit latency statistics on
2492*5113495bSYour Name  * msdu transmit completed
2493*5113495bSYour Name  * @soc: dp soc handle
2494*5113495bSYour Name  * @txrx_peer: txrx peer handle
2495*5113495bSYour Name  * @tx_desc: pointer to tx descriptor
2496*5113495bSYour Name  * @ts: tx completion status
2497*5113495bSYour Name  * @link_id: link id
2498*5113495bSYour Name  *
2499*5113495bSYour Name  * Return: None
2500*5113495bSYour Name  */
2501*5113495bSYour Name static inline void
dp_tx_latency_stats_update(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,struct dp_tx_desc_s * tx_desc,struct hal_tx_completion_status * ts,uint8_t link_id)2502*5113495bSYour Name dp_tx_latency_stats_update(struct dp_soc *soc,
2503*5113495bSYour Name 			   struct dp_txrx_peer *txrx_peer,
2504*5113495bSYour Name 			   struct dp_tx_desc_s *tx_desc,
2505*5113495bSYour Name 			   struct hal_tx_completion_status *ts,
2506*5113495bSYour Name 			   uint8_t link_id)
2507*5113495bSYour Name {
2508*5113495bSYour Name 	uint32_t driver_latency, ring_buf_latency, hw_latency;
2509*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_INVAL;
2510*5113495bSYour Name 	int64_t current_ts, ingress, egress;
2511*5113495bSYour Name 	struct dp_vdev *vdev = txrx_peer->vdev;
2512*5113495bSYour Name 	struct dp_tx_latency *tx_latency;
2513*5113495bSYour Name 	uint8_t idx;
2514*5113495bSYour Name 
2515*5113495bSYour Name 	if (!dp_tx_latency_stats_enabled(vdev))
2516*5113495bSYour Name 		return;
2517*5113495bSYour Name 
2518*5113495bSYour Name 	if (!tx_desc->driver_ingress_ts || !tx_desc->driver_egress_ts)
2519*5113495bSYour Name 		return;
2520*5113495bSYour Name 
2521*5113495bSYour Name 	status = dp_tx_compute_hw_delay_us(ts, vdev->delta_tsf, &hw_latency);
2522*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
2523*5113495bSYour Name 		return;
2524*5113495bSYour Name 
2525*5113495bSYour Name 	ingress = qdf_ktime_to_us(tx_desc->driver_ingress_ts);
2526*5113495bSYour Name 	egress = qdf_ktime_to_us(tx_desc->driver_egress_ts);
2527*5113495bSYour Name 	driver_latency = (uint32_t)(egress - ingress);
2528*5113495bSYour Name 
2529*5113495bSYour Name 	current_ts = qdf_ktime_to_us(qdf_ktime_real_get());
2530*5113495bSYour Name 	ring_buf_latency = (uint32_t)(current_ts - egress);
2531*5113495bSYour Name 
2532*5113495bSYour Name 	tx_latency = &txrx_peer->stats[link_id].tx_latency;
2533*5113495bSYour Name 	idx = tx_latency->cur_idx;
2534*5113495bSYour Name 	dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
2535*5113495bSYour Name 					  CDP_TX_LATENCY_TYPE_DRIVER,
2536*5113495bSYour Name 					  driver_latency);
2537*5113495bSYour Name 	dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
2538*5113495bSYour Name 					  CDP_TX_LATENCY_TYPE_RING_BUF,
2539*5113495bSYour Name 					  ring_buf_latency);
2540*5113495bSYour Name 	dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx,
2541*5113495bSYour Name 					  CDP_TX_LATENCY_TYPE_HW, hw_latency);
2542*5113495bSYour Name }
2543*5113495bSYour Name 
2544*5113495bSYour Name /**
2545*5113495bSYour Name  * dp_tx_latency_stats_clear_bucket() - clear specified transmit latency
2546*5113495bSYour Name  * statistics for specified type
2547*5113495bSYour Name  * @tx_latency: pointer to transmit latency stats
2548*5113495bSYour Name  * @idx: index of the statistics
2549*5113495bSYour Name  * @type: transmit latency type
2550*5113495bSYour Name  *
2551*5113495bSYour Name  * Return: None
2552*5113495bSYour Name  */
2553*5113495bSYour Name static inline void
dp_tx_latency_stats_clear_bucket(struct dp_tx_latency * tx_latency,int idx,enum cdp_tx_latency_type type)2554*5113495bSYour Name dp_tx_latency_stats_clear_bucket(struct dp_tx_latency *tx_latency,
2555*5113495bSYour Name 				 int idx, enum cdp_tx_latency_type type)
2556*5113495bSYour Name {
2557*5113495bSYour Name 	int lvl;
2558*5113495bSYour Name 	struct dp_tx_latency_stats *stats;
2559*5113495bSYour Name 
2560*5113495bSYour Name 	stats = &tx_latency->stats[idx][type];
2561*5113495bSYour Name 	qdf_atomic_init(&stats->msdus_accum);
2562*5113495bSYour Name 	qdf_atomic_init(&stats->latency_accum);
2563*5113495bSYour Name 	for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++)
2564*5113495bSYour Name 		qdf_atomic_init(&stats->distribution[lvl]);
2565*5113495bSYour Name }
2566*5113495bSYour Name 
2567*5113495bSYour Name /**
2568*5113495bSYour Name  * dp_tx_latency_stats_clear_buckets() - clear specified transmit latency
2569*5113495bSYour Name  * statistics
2570*5113495bSYour Name  * @tx_latency: pointer to transmit latency stats
2571*5113495bSYour Name  * @idx: index of the statistics
2572*5113495bSYour Name  *
2573*5113495bSYour Name  * Return: None
2574*5113495bSYour Name  */
2575*5113495bSYour Name static void
dp_tx_latency_stats_clear_buckets(struct dp_tx_latency * tx_latency,int idx)2576*5113495bSYour Name dp_tx_latency_stats_clear_buckets(struct dp_tx_latency *tx_latency,
2577*5113495bSYour Name 				  int idx)
2578*5113495bSYour Name {
2579*5113495bSYour Name 	int type;
2580*5113495bSYour Name 
2581*5113495bSYour Name 	for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++)
2582*5113495bSYour Name 		dp_tx_latency_stats_clear_bucket(tx_latency, idx, type);
2583*5113495bSYour Name }
2584*5113495bSYour Name 
2585*5113495bSYour Name /**
2586*5113495bSYour Name  * dp_tx_latency_stats_update_cca() - update transmit latency statistics for
2587*5113495bSYour Name  * CCA
2588*5113495bSYour Name  * @soc: dp soc handle
2589*5113495bSYour Name  * @peer_id: peer id
2590*5113495bSYour Name  * @granularity: granularity of distribution
2591*5113495bSYour Name  * @distribution: distribution of transmit latency statistics
2592*5113495bSYour Name  * @avg: average of CCA latency(in microseconds) within a cycle
2593*5113495bSYour Name  *
2594*5113495bSYour Name  * Return: None
2595*5113495bSYour Name  */
2596*5113495bSYour Name void
dp_tx_latency_stats_update_cca(struct dp_soc * soc,uint16_t peer_id,uint32_t granularity,uint32_t * distribution,uint32_t avg)2597*5113495bSYour Name dp_tx_latency_stats_update_cca(struct dp_soc *soc, uint16_t peer_id,
2598*5113495bSYour Name 			       uint32_t granularity, uint32_t *distribution,
2599*5113495bSYour Name 			       uint32_t avg)
2600*5113495bSYour Name {
2601*5113495bSYour Name 	int lvl, idx;
2602*5113495bSYour Name 	uint8_t link_id;
2603*5113495bSYour Name 	struct dp_tx_latency *tx_latency;
2604*5113495bSYour Name 	struct dp_tx_latency_stats *stats;
2605*5113495bSYour Name 	int32_t cur_granularity;
2606*5113495bSYour Name 	struct dp_vdev *vdev;
2607*5113495bSYour Name 	struct dp_tx_latency_config *cfg;
2608*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
2609*5113495bSYour Name 	struct dp_peer *peer;
2610*5113495bSYour Name 
2611*5113495bSYour Name 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
2612*5113495bSYour Name 	if (!peer) {
2613*5113495bSYour Name 		dp_err_rl("Peer not found peer id %d", peer_id);
2614*5113495bSYour Name 		return;
2615*5113495bSYour Name 	}
2616*5113495bSYour Name 
2617*5113495bSYour Name 	if (IS_MLO_DP_MLD_PEER(peer))
2618*5113495bSYour Name 		goto out;
2619*5113495bSYour Name 
2620*5113495bSYour Name 	vdev = peer->vdev;
2621*5113495bSYour Name 	if (!dp_tx_latency_stats_enabled(vdev))
2622*5113495bSYour Name 		goto out;
2623*5113495bSYour Name 
2624*5113495bSYour Name 	cfg = &vdev->tx_latency_cfg;
2625*5113495bSYour Name 	cur_granularity =
2626*5113495bSYour Name 		qdf_atomic_read(&cfg->granularity[CDP_TX_LATENCY_TYPE_CCA]);
2627*5113495bSYour Name 
2628*5113495bSYour Name 	/* in unit of ms */
2629*5113495bSYour Name 	cur_granularity /= 1000;
2630*5113495bSYour Name 	if (cur_granularity != granularity) {
2631*5113495bSYour Name 		dp_info_rl("invalid granularity, cur %d report %d",
2632*5113495bSYour Name 			   cur_granularity, granularity);
2633*5113495bSYour Name 		goto out;
2634*5113495bSYour Name 	}
2635*5113495bSYour Name 
2636*5113495bSYour Name 	txrx_peer = dp_get_txrx_peer(peer);
2637*5113495bSYour Name 	if (qdf_unlikely(!txrx_peer)) {
2638*5113495bSYour Name 		dp_err_rl("txrx_peer NULL for MAC: " QDF_MAC_ADDR_FMT,
2639*5113495bSYour Name 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2640*5113495bSYour Name 		goto out;
2641*5113495bSYour Name 	}
2642*5113495bSYour Name 
2643*5113495bSYour Name 	link_id = dp_get_peer_link_id(peer);
2644*5113495bSYour Name 	if (link_id >= txrx_peer->stats_arr_size)
2645*5113495bSYour Name 		goto out;
2646*5113495bSYour Name 
2647*5113495bSYour Name 	tx_latency = &txrx_peer->stats[link_id].tx_latency;
2648*5113495bSYour Name 	idx = tx_latency->cur_idx;
2649*5113495bSYour Name 	stats = &tx_latency->stats[idx][CDP_TX_LATENCY_TYPE_CCA];
2650*5113495bSYour Name 	qdf_atomic_set(&stats->latency_accum, avg);
2651*5113495bSYour Name 	qdf_atomic_set(&stats->msdus_accum, (avg ? 1 : 0));
2652*5113495bSYour Name 	for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++)
2653*5113495bSYour Name 		qdf_atomic_set(&stats->distribution[lvl],
2654*5113495bSYour Name 			       distribution[lvl]);
2655*5113495bSYour Name 
2656*5113495bSYour Name 	/* prepare for the next cycle */
2657*5113495bSYour Name 	tx_latency->cur_idx = 1 - idx;
2658*5113495bSYour Name 	dp_tx_latency_stats_clear_buckets(tx_latency, tx_latency->cur_idx);
2659*5113495bSYour Name 
2660*5113495bSYour Name out:
2661*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2662*5113495bSYour Name }
2663*5113495bSYour Name 
2664*5113495bSYour Name /**
2665*5113495bSYour Name  * dp_tx_latency_stats_get_per_peer() - get transmit latency statistics for a
2666*5113495bSYour Name  * peer
2667*5113495bSYour Name  * @soc: dp soc handle
2668*5113495bSYour Name  * @peer: dp peer Handle
2669*5113495bSYour Name  * @latency: buffer to hold transmit latency statistics
2670*5113495bSYour Name  *
2671*5113495bSYour Name  * Return: QDF_STATUS
2672*5113495bSYour Name  */
2673*5113495bSYour Name static QDF_STATUS
dp_tx_latency_stats_get_per_peer(struct dp_soc * soc,struct dp_peer * peer,struct cdp_tx_latency * latency)2674*5113495bSYour Name dp_tx_latency_stats_get_per_peer(struct dp_soc *soc, struct dp_peer *peer,
2675*5113495bSYour Name 				 struct cdp_tx_latency *latency)
2676*5113495bSYour Name {
2677*5113495bSYour Name 	int lvl, type, link_id;
2678*5113495bSYour Name 	int32_t latency_accum, msdus_accum;
2679*5113495bSYour Name 	struct dp_vdev *vdev;
2680*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
2681*5113495bSYour Name 	struct dp_tx_latency *tx_latency;
2682*5113495bSYour Name 	struct dp_tx_latency_config *cfg;
2683*5113495bSYour Name 	struct dp_tx_latency_stats *stats;
2684*5113495bSYour Name 	uint8_t last_idx;
2685*5113495bSYour Name 
2686*5113495bSYour Name 	if (unlikely(!latency))
2687*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2688*5113495bSYour Name 
2689*5113495bSYour Name 	/* Authenticated link/legacy peer only */
2690*5113495bSYour Name 	if (IS_MLO_DP_MLD_PEER(peer) || peer->state != OL_TXRX_PEER_STATE_AUTH)
2691*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2692*5113495bSYour Name 
2693*5113495bSYour Name 	vdev = peer->vdev;
2694*5113495bSYour Name 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
2695*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2696*5113495bSYour Name 
2697*5113495bSYour Name 	txrx_peer = dp_get_txrx_peer(peer);
2698*5113495bSYour Name 	if (!txrx_peer)
2699*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2700*5113495bSYour Name 
2701*5113495bSYour Name 	link_id = dp_get_peer_link_id(peer);
2702*5113495bSYour Name 	if (link_id >= txrx_peer->stats_arr_size)
2703*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2704*5113495bSYour Name 
2705*5113495bSYour Name 	tx_latency = &txrx_peer->stats[link_id].tx_latency;
2706*5113495bSYour Name 	qdf_mem_zero(latency, sizeof(*latency));
2707*5113495bSYour Name 	qdf_mem_copy(latency->mac_remote.bytes,
2708*5113495bSYour Name 		     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2709*5113495bSYour Name 	last_idx = 1 - tx_latency->cur_idx;
2710*5113495bSYour Name 	cfg = &vdev->tx_latency_cfg;
2711*5113495bSYour Name 	for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++) {
2712*5113495bSYour Name 		latency->stats[type].granularity =
2713*5113495bSYour Name 			qdf_atomic_read(&cfg->granularity[type]);
2714*5113495bSYour Name 		stats = &tx_latency->stats[last_idx][type];
2715*5113495bSYour Name 		msdus_accum = qdf_atomic_read(&stats->msdus_accum);
2716*5113495bSYour Name 		if (!msdus_accum)
2717*5113495bSYour Name 			continue;
2718*5113495bSYour Name 
2719*5113495bSYour Name 		latency_accum = qdf_atomic_read(&stats->latency_accum);
2720*5113495bSYour Name 		latency->stats[type].average = latency_accum / msdus_accum;
2721*5113495bSYour Name 		for (lvl = 0; lvl < CDP_TX_LATENCY_DISTR_LV_MAX; lvl++) {
2722*5113495bSYour Name 			latency->stats[type].distribution[lvl] =
2723*5113495bSYour Name 				qdf_atomic_read(&stats->distribution[lvl]);
2724*5113495bSYour Name 		}
2725*5113495bSYour Name 	}
2726*5113495bSYour Name 
2727*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2728*5113495bSYour Name }
2729*5113495bSYour Name 
2730*5113495bSYour Name /**
2731*5113495bSYour Name  * dp_tx_latency_stats_get_peer_iter() - iterator to get transmit latency
2732*5113495bSYour Name  * statistics for specified peer
2733*5113495bSYour Name  * @soc: dp soc handle
2734*5113495bSYour Name  * @peer: dp peer Handle
2735*5113495bSYour Name  * @arg: list to hold transmit latency statistics for peers
2736*5113495bSYour Name  *
2737*5113495bSYour Name  * Return: None
2738*5113495bSYour Name  */
2739*5113495bSYour Name static void
dp_tx_latency_stats_get_peer_iter(struct dp_soc * soc,struct dp_peer * peer,void * arg)2740*5113495bSYour Name dp_tx_latency_stats_get_peer_iter(struct dp_soc *soc,
2741*5113495bSYour Name 				  struct dp_peer *peer,
2742*5113495bSYour Name 				  void *arg)
2743*5113495bSYour Name {
2744*5113495bSYour Name 	struct dp_vdev *vdev;
2745*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
2746*5113495bSYour Name 	struct cdp_tx_latency *latency;
2747*5113495bSYour Name 	QDF_STATUS status;
2748*5113495bSYour Name 	qdf_list_t *stats_list = (qdf_list_t *)arg;
2749*5113495bSYour Name 
2750*5113495bSYour Name 	/* Authenticated link/legacy peer only */
2751*5113495bSYour Name 	if (IS_MLO_DP_MLD_PEER(peer) || peer->state != OL_TXRX_PEER_STATE_AUTH)
2752*5113495bSYour Name 		return;
2753*5113495bSYour Name 
2754*5113495bSYour Name 	txrx_peer = dp_get_txrx_peer(peer);
2755*5113495bSYour Name 	if (!txrx_peer)
2756*5113495bSYour Name 		return;
2757*5113495bSYour Name 
2758*5113495bSYour Name 	vdev = peer->vdev;
2759*5113495bSYour Name 	latency = qdf_mem_malloc(sizeof(*latency));
2760*5113495bSYour Name 	if (!latency)
2761*5113495bSYour Name 		return;
2762*5113495bSYour Name 
2763*5113495bSYour Name 	status = dp_tx_latency_stats_get_per_peer(soc, peer, latency);
2764*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
2765*5113495bSYour Name 		goto out;
2766*5113495bSYour Name 
2767*5113495bSYour Name 	status = qdf_list_insert_back(stats_list, &latency->node);
2768*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
2769*5113495bSYour Name 		goto out;
2770*5113495bSYour Name 
2771*5113495bSYour Name 	return;
2772*5113495bSYour Name 
2773*5113495bSYour Name out:
2774*5113495bSYour Name 	qdf_mem_free(latency);
2775*5113495bSYour Name }
2776*5113495bSYour Name 
2777*5113495bSYour Name /**
2778*5113495bSYour Name  * dp_tx_latency_stats_rpt_per_vdev() - report transmit latency statistics for
2779*5113495bSYour Name  * specified vdev
2780*5113495bSYour Name  * @soc: dp soc handle
2781*5113495bSYour Name  * @vdev: dp vdev Handle
2782*5113495bSYour Name  *
2783*5113495bSYour Name  * Return: None
2784*5113495bSYour Name  */
2785*5113495bSYour Name static void
dp_tx_latency_stats_rpt_per_vdev(struct dp_soc * soc,struct dp_vdev * vdev)2786*5113495bSYour Name dp_tx_latency_stats_rpt_per_vdev(struct dp_soc *soc, struct dp_vdev *vdev)
2787*5113495bSYour Name {
2788*5113495bSYour Name 	qdf_list_t stats_list;
2789*5113495bSYour Name 	struct cdp_tx_latency *entry, *next;
2790*5113495bSYour Name 
2791*5113495bSYour Name 	if (!soc->tx_latency_cb || !dp_tx_latency_stats_report_enabled(vdev))
2792*5113495bSYour Name 		return;
2793*5113495bSYour Name 
2794*5113495bSYour Name 	qdf_list_create(&stats_list, 0);
2795*5113495bSYour Name 	dp_vdev_iterate_peer(vdev, dp_tx_latency_stats_get_peer_iter,
2796*5113495bSYour Name 			     &stats_list, DP_MOD_ID_CDP);
2797*5113495bSYour Name 	if (qdf_list_empty(&stats_list))
2798*5113495bSYour Name 		goto out;
2799*5113495bSYour Name 
2800*5113495bSYour Name 	soc->tx_latency_cb(vdev->vdev_id, &stats_list);
2801*5113495bSYour Name 
2802*5113495bSYour Name 	qdf_list_for_each_del(&stats_list, entry, next, node) {
2803*5113495bSYour Name 		qdf_list_remove_node(&stats_list, &entry->node);
2804*5113495bSYour Name 		qdf_mem_free(entry);
2805*5113495bSYour Name 	}
2806*5113495bSYour Name 
2807*5113495bSYour Name out:
2808*5113495bSYour Name 	qdf_list_destroy(&stats_list);
2809*5113495bSYour Name }
2810*5113495bSYour Name 
2811*5113495bSYour Name /**
2812*5113495bSYour Name  * dp_tx_latency_stats_report() - report transmit latency statistics for each
2813*5113495bSYour Name  * vdev of specified pdev
2814*5113495bSYour Name  * @soc: dp soc handle
2815*5113495bSYour Name  * @pdev: dp pdev Handle
2816*5113495bSYour Name  *
2817*5113495bSYour Name  * Return: None
2818*5113495bSYour Name  */
dp_tx_latency_stats_report(struct dp_soc * soc,struct dp_pdev * pdev)2819*5113495bSYour Name void dp_tx_latency_stats_report(struct dp_soc *soc, struct dp_pdev *pdev)
2820*5113495bSYour Name {
2821*5113495bSYour Name 	struct dp_vdev *vdev;
2822*5113495bSYour Name 
2823*5113495bSYour Name 	if (!soc->tx_latency_cb)
2824*5113495bSYour Name 		return;
2825*5113495bSYour Name 
2826*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2827*5113495bSYour Name 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
2828*5113495bSYour Name 		dp_tx_latency_stats_rpt_per_vdev(soc, vdev);
2829*5113495bSYour Name 	}
2830*5113495bSYour Name 
2831*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2832*5113495bSYour Name }
2833*5113495bSYour Name 
2834*5113495bSYour Name /**
2835*5113495bSYour Name  * dp_tx_latency_stats_clear_per_peer() - iterator to clear transmit latency
2836*5113495bSYour Name  * statistics for specified peer
2837*5113495bSYour Name  * @soc: dp soc handle
2838*5113495bSYour Name  * @peer: dp pdev Handle
2839*5113495bSYour Name  * @arg: argument from iterator
2840*5113495bSYour Name  *
2841*5113495bSYour Name  * Return: None
2842*5113495bSYour Name  */
2843*5113495bSYour Name static void
dp_tx_latency_stats_clear_per_peer(struct dp_soc * soc,struct dp_peer * peer,void * arg)2844*5113495bSYour Name dp_tx_latency_stats_clear_per_peer(struct dp_soc *soc, struct dp_peer *peer,
2845*5113495bSYour Name 				   void *arg)
2846*5113495bSYour Name {
2847*5113495bSYour Name 	int link_id;
2848*5113495bSYour Name 	struct dp_tx_latency *tx_latency;
2849*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
2850*5113495bSYour Name 
2851*5113495bSYour Name 	if (!txrx_peer) {
2852*5113495bSYour Name 		dp_err("no txrx peer, skip");
2853*5113495bSYour Name 		return;
2854*5113495bSYour Name 	}
2855*5113495bSYour Name 
2856*5113495bSYour Name 	for (link_id = 0; link_id < txrx_peer->stats_arr_size; link_id++) {
2857*5113495bSYour Name 		tx_latency = &txrx_peer->stats[link_id].tx_latency;
2858*5113495bSYour Name 		dp_tx_latency_stats_clear_buckets(tx_latency, 0);
2859*5113495bSYour Name 		dp_tx_latency_stats_clear_buckets(tx_latency, 1);
2860*5113495bSYour Name 	}
2861*5113495bSYour Name }
2862*5113495bSYour Name 
2863*5113495bSYour Name /**
2864*5113495bSYour Name  * dp_tx_latency_stats_clear_per_vdev() - clear transmit latency statistics
2865*5113495bSYour Name  * for specified vdev
2866*5113495bSYour Name  * @vdev: dp vdev handle
2867*5113495bSYour Name  *
2868*5113495bSYour Name  * Return: None
2869*5113495bSYour Name  */
dp_tx_latency_stats_clear_per_vdev(struct dp_vdev * vdev)2870*5113495bSYour Name static inline void dp_tx_latency_stats_clear_per_vdev(struct dp_vdev *vdev)
2871*5113495bSYour Name {
2872*5113495bSYour Name 	dp_vdev_iterate_peer(vdev, dp_tx_latency_stats_clear_per_peer,
2873*5113495bSYour Name 			     NULL, DP_MOD_ID_CDP);
2874*5113495bSYour Name }
2875*5113495bSYour Name 
2876*5113495bSYour Name /**
2877*5113495bSYour Name  * dp_tx_latency_stats_fetch() - fetch transmit latency statistics for
2878*5113495bSYour Name  * specified link mac address
2879*5113495bSYour Name  * @soc_hdl: Handle to struct dp_soc
2880*5113495bSYour Name  * @vdev_id: vdev id
2881*5113495bSYour Name  * @mac: link mac address of remote peer
2882*5113495bSYour Name  * @latency: buffer to hold per-link transmit latency statistics
2883*5113495bSYour Name  *
2884*5113495bSYour Name  * Return: QDF_STATUS
2885*5113495bSYour Name  */
2886*5113495bSYour Name QDF_STATUS
dp_tx_latency_stats_fetch(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * mac,struct cdp_tx_latency * latency)2887*5113495bSYour Name dp_tx_latency_stats_fetch(struct cdp_soc_t *soc_hdl,
2888*5113495bSYour Name 			  uint8_t vdev_id, uint8_t *mac,
2889*5113495bSYour Name 			  struct cdp_tx_latency *latency)
2890*5113495bSYour Name {
2891*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2892*5113495bSYour Name 	struct cdp_peer_info peer_info = {0};
2893*5113495bSYour Name 	struct dp_peer *peer;
2894*5113495bSYour Name 	QDF_STATUS status;
2895*5113495bSYour Name 
2896*5113495bSYour Name 	/* MAC addr of link peer may be the same as MLD peer,
2897*5113495bSYour Name 	 * so specify the type as CDP_LINK_PEER_TYPE here to
2898*5113495bSYour Name 	 * get link peer explicitly.
2899*5113495bSYour Name 	 */
2900*5113495bSYour Name 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac, false,
2901*5113495bSYour Name 				 CDP_LINK_PEER_TYPE);
2902*5113495bSYour Name 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
2903*5113495bSYour Name 	if (!peer) {
2904*5113495bSYour Name 		dp_err_rl("peer(vdev id %d mac " QDF_MAC_ADDR_FMT ") not found",
2905*5113495bSYour Name 			  vdev_id, QDF_MAC_ADDR_REF(mac));
2906*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2907*5113495bSYour Name 	}
2908*5113495bSYour Name 
2909*5113495bSYour Name 	status = dp_tx_latency_stats_get_per_peer(soc, peer, latency);
2910*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2911*5113495bSYour Name 	return status;
2912*5113495bSYour Name }
2913*5113495bSYour Name 
2914*5113495bSYour Name /**
2915*5113495bSYour Name  * dp_tx_latency_stats_config() - config transmit latency statistics for
2916*5113495bSYour Name  * specified vdev
2917*5113495bSYour Name  * @soc_hdl: Handle to struct dp_soc
2918*5113495bSYour Name  * @vdev_id: vdev id
2919*5113495bSYour Name  * @cfg: configuration for transmit latency statistics
2920*5113495bSYour Name  *
2921*5113495bSYour Name  * Return: QDF_STATUS
2922*5113495bSYour Name  */
2923*5113495bSYour Name QDF_STATUS
dp_tx_latency_stats_config(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct cdp_tx_latency_config * cfg)2924*5113495bSYour Name dp_tx_latency_stats_config(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2925*5113495bSYour Name 			   struct cdp_tx_latency_config *cfg)
2926*5113495bSYour Name {
2927*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2928*5113495bSYour Name 	struct dp_vdev *vdev;
2929*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_INVAL;
2930*5113495bSYour Name 	uint32_t cca_granularity;
2931*5113495bSYour Name 	int type;
2932*5113495bSYour Name 
2933*5113495bSYour Name 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
2934*5113495bSYour Name 	if (!vdev) {
2935*5113495bSYour Name 		dp_err_rl("vdev %d does not exist", vdev_id);
2936*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2937*5113495bSYour Name 	}
2938*5113495bSYour Name 
2939*5113495bSYour Name 	/* disable to ignore upcoming updates */
2940*5113495bSYour Name 	qdf_atomic_set(&vdev->tx_latency_cfg.enabled, 0);
2941*5113495bSYour Name 	dp_tx_latency_stats_clear_per_vdev(vdev);
2942*5113495bSYour Name 
2943*5113495bSYour Name 	if (!cfg->enable)
2944*5113495bSYour Name 		goto send_htt;
2945*5113495bSYour Name 
2946*5113495bSYour Name 	qdf_atomic_set(&vdev->tx_latency_cfg.report, (cfg->report ? 1 : 0));
2947*5113495bSYour Name 	for (type = 0; type < CDP_TX_LATENCY_TYPE_MAX; type++)
2948*5113495bSYour Name 		qdf_atomic_set(&vdev->tx_latency_cfg.granularity[type],
2949*5113495bSYour Name 			       cfg->granularity[type]);
2950*5113495bSYour Name 
2951*5113495bSYour Name send_htt:
2952*5113495bSYour Name 	/* in units of ms */
2953*5113495bSYour Name 	cca_granularity = cfg->granularity[CDP_TX_LATENCY_TYPE_CCA] / 1000;
2954*5113495bSYour Name 	status = dp_h2t_tx_latency_stats_cfg_msg_send(soc, vdev_id,
2955*5113495bSYour Name 						      cfg->enable, cfg->period,
2956*5113495bSYour Name 						      cca_granularity);
2957*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
2958*5113495bSYour Name 		dp_err_rl("failed to send htt msg: %d", status);
2959*5113495bSYour Name 		goto out;
2960*5113495bSYour Name 	}
2961*5113495bSYour Name 
2962*5113495bSYour Name 	qdf_atomic_set(&vdev->tx_latency_cfg.enabled, (cfg->enable ? 1 : 0));
2963*5113495bSYour Name 
2964*5113495bSYour Name out:
2965*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
2966*5113495bSYour Name 	return status;
2967*5113495bSYour Name }
2968*5113495bSYour Name 
2969*5113495bSYour Name /**
2970*5113495bSYour Name  * dp_tx_latency_stats_register_cb() - register transmit latency statistics
2971*5113495bSYour Name  * callback
2972*5113495bSYour Name  * @handle: Handle to struct dp_soc
2973*5113495bSYour Name  * @cb: callback function for transmit latency statistics
2974*5113495bSYour Name  *
2975*5113495bSYour Name  * Return: QDF_STATUS
2976*5113495bSYour Name  */
2977*5113495bSYour Name QDF_STATUS
dp_tx_latency_stats_register_cb(struct cdp_soc_t * handle,cdp_tx_latency_cb cb)2978*5113495bSYour Name dp_tx_latency_stats_register_cb(struct cdp_soc_t *handle, cdp_tx_latency_cb cb)
2979*5113495bSYour Name {
2980*5113495bSYour Name 	struct dp_soc *soc = (struct dp_soc *)handle;
2981*5113495bSYour Name 
2982*5113495bSYour Name 	if (!soc || !cb) {
2983*5113495bSYour Name 		dp_err("soc or cb is NULL");
2984*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2985*5113495bSYour Name 	}
2986*5113495bSYour Name 
2987*5113495bSYour Name 	soc->tx_latency_cb = cb;
2988*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2989*5113495bSYour Name }
2990*5113495bSYour Name 
2991*5113495bSYour Name #else
2992*5113495bSYour Name static inline void
dp_tx_get_driver_ingress_ts(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,qdf_nbuf_t nbuf)2993*5113495bSYour Name dp_tx_get_driver_ingress_ts(struct dp_vdev *vdev,
2994*5113495bSYour Name 			    struct dp_tx_msdu_info_s *msdu_info,
2995*5113495bSYour Name 			    qdf_nbuf_t nbuf)
2996*5113495bSYour Name {
2997*5113495bSYour Name }
2998*5113495bSYour Name 
2999*5113495bSYour Name static inline void
dp_tx_update_ts_on_enqueued(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,struct dp_tx_desc_s * tx_desc)3000*5113495bSYour Name dp_tx_update_ts_on_enqueued(struct dp_vdev *vdev,
3001*5113495bSYour Name 			    struct dp_tx_msdu_info_s *msdu_info,
3002*5113495bSYour Name 			    struct dp_tx_desc_s *tx_desc)
3003*5113495bSYour Name {
3004*5113495bSYour Name }
3005*5113495bSYour Name 
3006*5113495bSYour Name static inline void
dp_tx_latency_stats_update(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,struct dp_tx_desc_s * tx_desc,struct hal_tx_completion_status * ts,uint8_t link_id)3007*5113495bSYour Name dp_tx_latency_stats_update(struct dp_soc *soc,
3008*5113495bSYour Name 			   struct dp_txrx_peer *txrx_peer,
3009*5113495bSYour Name 			   struct dp_tx_desc_s *tx_desc,
3010*5113495bSYour Name 			   struct hal_tx_completion_status *ts,
3011*5113495bSYour Name 			   uint8_t link_id)
3012*5113495bSYour Name {
3013*5113495bSYour Name }
3014*5113495bSYour Name #endif
3015*5113495bSYour Name 
3016*5113495bSYour Name qdf_nbuf_t
dp_tx_send_msdu_single(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint16_t peer_id,struct cdp_tx_exception_metadata * tx_exc_metadata)3017*5113495bSYour Name dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3018*5113495bSYour Name 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
3019*5113495bSYour Name 		       struct cdp_tx_exception_metadata *tx_exc_metadata)
3020*5113495bSYour Name {
3021*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
3022*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
3023*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
3024*5113495bSYour Name 	QDF_STATUS status;
3025*5113495bSYour Name 	struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
3026*5113495bSYour Name 	uint16_t htt_tcl_metadata = 0;
3027*5113495bSYour Name 	enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
3028*5113495bSYour Name 	uint8_t tid = msdu_info->tid;
3029*5113495bSYour Name 	struct cdp_tid_tx_stats *tid_stats = NULL;
3030*5113495bSYour Name 	qdf_dma_addr_t paddr;
3031*5113495bSYour Name 
3032*5113495bSYour Name 	/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
3033*5113495bSYour Name 	tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
3034*5113495bSYour Name 			msdu_info, tx_exc_metadata);
3035*5113495bSYour Name 	if (!tx_desc) {
3036*5113495bSYour Name 		dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
3037*5113495bSYour Name 			  vdev->vdev_id, vdev, tx_q->desc_pool_id);
3038*5113495bSYour Name 		drop_code = TX_DESC_ERR;
3039*5113495bSYour Name 		goto fail_return;
3040*5113495bSYour Name 	}
3041*5113495bSYour Name 
3042*5113495bSYour Name 	dp_tx_update_tdls_flags(soc, vdev, tx_desc);
3043*5113495bSYour Name 
3044*5113495bSYour Name 	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
3045*5113495bSYour Name 		htt_tcl_metadata = vdev->htt_tcl_metadata;
3046*5113495bSYour Name 		DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
3047*5113495bSYour Name 	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
3048*5113495bSYour Name 		DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
3049*5113495bSYour Name 					    DP_TCL_METADATA_TYPE_PEER_BASED);
3050*5113495bSYour Name 		DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
3051*5113495bSYour Name 					       peer_id);
3052*5113495bSYour Name 		dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
3053*5113495bSYour Name 	} else
3054*5113495bSYour Name 		htt_tcl_metadata = vdev->htt_tcl_metadata;
3055*5113495bSYour Name 
3056*5113495bSYour Name 	if (msdu_info->exception_fw)
3057*5113495bSYour Name 		DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
3058*5113495bSYour Name 
3059*5113495bSYour Name 	dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
3060*5113495bSYour Name 					 !pdev->enhanced_stats_en);
3061*5113495bSYour Name 
3062*5113495bSYour Name 	dp_tx_update_mesh_flags(soc, vdev, tx_desc);
3063*5113495bSYour Name 
3064*5113495bSYour Name 	if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
3065*5113495bSYour Name 		paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
3066*5113495bSYour Name 	else
3067*5113495bSYour Name 		paddr =  dp_tx_nbuf_map(vdev, tx_desc, nbuf);
3068*5113495bSYour Name 
3069*5113495bSYour Name 	if (!paddr) {
3070*5113495bSYour Name 		/* Handle failure */
3071*5113495bSYour Name 		dp_err("qdf_nbuf_map failed");
3072*5113495bSYour Name 		DP_STATS_INC(vdev,
3073*5113495bSYour Name 			     tx_i[msdu_info->xmit_type].dropped.dma_error, 1);
3074*5113495bSYour Name 		drop_code = TX_DMA_MAP_ERR;
3075*5113495bSYour Name 		goto release_desc;
3076*5113495bSYour Name 	}
3077*5113495bSYour Name 
3078*5113495bSYour Name 	tx_desc->dma_addr = paddr;
3079*5113495bSYour Name 	dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
3080*5113495bSYour Name 			       tx_desc->id, DP_TX_DESC_MAP);
3081*5113495bSYour Name 	dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
3082*5113495bSYour Name 	/* Enqueue the Tx MSDU descriptor to HW for transmit */
3083*5113495bSYour Name 	status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
3084*5113495bSYour Name 					     htt_tcl_metadata,
3085*5113495bSYour Name 					     tx_exc_metadata, msdu_info);
3086*5113495bSYour Name 
3087*5113495bSYour Name 	if (status != QDF_STATUS_SUCCESS) {
3088*5113495bSYour Name 		dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
3089*5113495bSYour Name 			     tx_desc, tx_q->ring_id);
3090*5113495bSYour Name 		dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
3091*5113495bSYour Name 				       tx_desc->id, DP_TX_DESC_UNMAP);
3092*5113495bSYour Name 		dp_tx_nbuf_unmap(soc, tx_desc);
3093*5113495bSYour Name 		drop_code = TX_HW_ENQUEUE;
3094*5113495bSYour Name 		goto release_desc;
3095*5113495bSYour Name 	}
3096*5113495bSYour Name 
3097*5113495bSYour Name 	dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc);
3098*5113495bSYour Name 
3099*5113495bSYour Name 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
3100*5113495bSYour Name 	return NULL;
3101*5113495bSYour Name 
3102*5113495bSYour Name release_desc:
3103*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id);
3104*5113495bSYour Name 
3105*5113495bSYour Name fail_return:
3106*5113495bSYour Name 	dp_tx_get_tid(vdev, nbuf, msdu_info);
3107*5113495bSYour Name 	tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
3108*5113495bSYour Name 	tid_stats = &pdev->stats.tid_stats.
3109*5113495bSYour Name 		    tid_tx_stats[tx_q->ring_id][tid];
3110*5113495bSYour Name 	tid_stats->swdrop_cnt[drop_code]++;
3111*5113495bSYour Name 	return nbuf;
3112*5113495bSYour Name }
3113*5113495bSYour Name 
3114*5113495bSYour Name /**
3115*5113495bSYour Name  * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
3116*5113495bSYour Name  * @soc: Soc handle
3117*5113495bSYour Name  * @desc: software Tx descriptor to be processed
3118*5113495bSYour Name  *
3119*5113495bSYour Name  * Return: 0 if Success
3120*5113495bSYour Name  */
3121*5113495bSYour Name #ifdef FEATURE_WLAN_TDLS
3122*5113495bSYour Name static inline int
dp_tdls_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * desc)3123*5113495bSYour Name dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
3124*5113495bSYour Name {
3125*5113495bSYour Name 	/* If it is TDLS mgmt, don't unmap or free the frame */
3126*5113495bSYour Name 	if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
3127*5113495bSYour Name 		dp_non_std_htt_tx_comp_free_buff(soc, desc);
3128*5113495bSYour Name 		return 0;
3129*5113495bSYour Name 	}
3130*5113495bSYour Name 	return 1;
3131*5113495bSYour Name }
3132*5113495bSYour Name #else
3133*5113495bSYour Name static inline int
dp_tdls_tx_comp_free_buff(struct dp_soc * soc,struct dp_tx_desc_s * desc)3134*5113495bSYour Name dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
3135*5113495bSYour Name {
3136*5113495bSYour Name 	return 1;
3137*5113495bSYour Name }
3138*5113495bSYour Name #endif
3139*5113495bSYour Name 
dp_tx_comp_free_buf(struct dp_soc * soc,struct dp_tx_desc_s * desc,bool delayed_free)3140*5113495bSYour Name qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
3141*5113495bSYour Name 			       bool delayed_free)
3142*5113495bSYour Name {
3143*5113495bSYour Name 	qdf_nbuf_t nbuf = desc->nbuf;
3144*5113495bSYour Name 	enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
3145*5113495bSYour Name 
3146*5113495bSYour Name 	/* nbuf already freed in vdev detach path */
3147*5113495bSYour Name 	if (!nbuf)
3148*5113495bSYour Name 		return NULL;
3149*5113495bSYour Name 
3150*5113495bSYour Name 	if (!dp_tdls_tx_comp_free_buff(soc, desc))
3151*5113495bSYour Name 		return NULL;
3152*5113495bSYour Name 
3153*5113495bSYour Name 	/* 0 : MSDU buffer, 1 : MLE */
3154*5113495bSYour Name 	if (desc->msdu_ext_desc) {
3155*5113495bSYour Name 		/* TSO free */
3156*5113495bSYour Name 		if (hal_tx_ext_desc_get_tso_enable(
3157*5113495bSYour Name 					desc->msdu_ext_desc->vaddr)) {
3158*5113495bSYour Name 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
3159*5113495bSYour Name 					       desc->id, DP_TX_COMP_MSDU_EXT);
3160*5113495bSYour Name 			dp_tx_tso_seg_history_add(soc,
3161*5113495bSYour Name 						  desc->msdu_ext_desc->tso_desc,
3162*5113495bSYour Name 						  desc->nbuf, desc->id, type);
3163*5113495bSYour Name 			/* unmap eash TSO seg before free the nbuf */
3164*5113495bSYour Name 			dp_tx_tso_unmap_segment(soc,
3165*5113495bSYour Name 						desc->msdu_ext_desc->tso_desc,
3166*5113495bSYour Name 						desc->msdu_ext_desc->
3167*5113495bSYour Name 						tso_num_desc);
3168*5113495bSYour Name 			goto nbuf_free;
3169*5113495bSYour Name 		}
3170*5113495bSYour Name 
3171*5113495bSYour Name 		if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
3172*5113495bSYour Name 			void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
3173*5113495bSYour Name 			qdf_dma_addr_t iova;
3174*5113495bSYour Name 			uint32_t frag_len;
3175*5113495bSYour Name 			uint32_t i;
3176*5113495bSYour Name 
3177*5113495bSYour Name 			qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
3178*5113495bSYour Name 						     QDF_DMA_TO_DEVICE,
3179*5113495bSYour Name 						     qdf_nbuf_headlen(nbuf));
3180*5113495bSYour Name 
3181*5113495bSYour Name 			for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
3182*5113495bSYour Name 				hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
3183*5113495bSYour Name 							      &iova,
3184*5113495bSYour Name 							      &frag_len);
3185*5113495bSYour Name 				if (!iova || !frag_len)
3186*5113495bSYour Name 					break;
3187*5113495bSYour Name 
3188*5113495bSYour Name 				qdf_mem_unmap_page(soc->osdev, iova, frag_len,
3189*5113495bSYour Name 						   QDF_DMA_TO_DEVICE);
3190*5113495bSYour Name 			}
3191*5113495bSYour Name 
3192*5113495bSYour Name 			goto nbuf_free;
3193*5113495bSYour Name 		}
3194*5113495bSYour Name 	}
3195*5113495bSYour Name 	/* If it's ME frame, dont unmap the cloned nbuf's */
3196*5113495bSYour Name 	if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
3197*5113495bSYour Name 		goto nbuf_free;
3198*5113495bSYour Name 
3199*5113495bSYour Name 	dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
3200*5113495bSYour Name 	dp_tx_unmap(soc, desc);
3201*5113495bSYour Name 
3202*5113495bSYour Name 	if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
3203*5113495bSYour Name 		return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
3204*5113495bSYour Name 
3205*5113495bSYour Name 	if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
3206*5113495bSYour Name 		return NULL;
3207*5113495bSYour Name 
3208*5113495bSYour Name nbuf_free:
3209*5113495bSYour Name 	if (delayed_free)
3210*5113495bSYour Name 		return nbuf;
3211*5113495bSYour Name 
3212*5113495bSYour Name 	qdf_nbuf_free(nbuf);
3213*5113495bSYour Name 
3214*5113495bSYour Name 	return NULL;
3215*5113495bSYour Name }
3216*5113495bSYour Name 
3217*5113495bSYour Name /**
3218*5113495bSYour Name  * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
3219*5113495bSYour Name  * @soc: DP soc handle
3220*5113495bSYour Name  * @nbuf: skb
3221*5113495bSYour Name  * @msdu_info: MSDU info
3222*5113495bSYour Name  *
3223*5113495bSYour Name  * Return: None
3224*5113495bSYour Name  */
3225*5113495bSYour Name static inline void
dp_tx_sg_unmap_buf(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3226*5113495bSYour Name dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
3227*5113495bSYour Name 		   struct dp_tx_msdu_info_s *msdu_info)
3228*5113495bSYour Name {
3229*5113495bSYour Name 	uint32_t cur_idx;
3230*5113495bSYour Name 	struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
3231*5113495bSYour Name 
3232*5113495bSYour Name 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
3233*5113495bSYour Name 				     qdf_nbuf_headlen(nbuf));
3234*5113495bSYour Name 
3235*5113495bSYour Name 	for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
3236*5113495bSYour Name 		qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
3237*5113495bSYour Name 				   (seg->frags[cur_idx].paddr_lo | ((uint64_t)
3238*5113495bSYour Name 				    seg->frags[cur_idx].paddr_hi) << 32),
3239*5113495bSYour Name 				   seg->frags[cur_idx].len,
3240*5113495bSYour Name 				   QDF_DMA_TO_DEVICE);
3241*5113495bSYour Name }
3242*5113495bSYour Name 
3243*5113495bSYour Name #if QDF_LOCK_STATS
3244*5113495bSYour Name noinline
3245*5113495bSYour Name #else
3246*5113495bSYour Name #endif
dp_tx_send_msdu_multiple(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3247*5113495bSYour Name qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3248*5113495bSYour Name 				    struct dp_tx_msdu_info_s *msdu_info)
3249*5113495bSYour Name {
3250*5113495bSYour Name 	uint32_t i;
3251*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
3252*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
3253*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
3254*5113495bSYour Name 	bool is_cce_classified = false;
3255*5113495bSYour Name 	QDF_STATUS status;
3256*5113495bSYour Name 	uint16_t htt_tcl_metadata = 0;
3257*5113495bSYour Name 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
3258*5113495bSYour Name 	struct cdp_tid_tx_stats *tid_stats = NULL;
3259*5113495bSYour Name 	uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
3260*5113495bSYour Name 
3261*5113495bSYour Name 	if (msdu_info->frm_type == dp_tx_frm_me)
3262*5113495bSYour Name 		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
3263*5113495bSYour Name 
3264*5113495bSYour Name 	i = 0;
3265*5113495bSYour Name 	/* Print statement to track i and num_seg */
3266*5113495bSYour Name 	/*
3267*5113495bSYour Name 	 * For each segment (maps to 1 MSDU) , prepare software and hardware
3268*5113495bSYour Name 	 * descriptors using information in msdu_info
3269*5113495bSYour Name 	 */
3270*5113495bSYour Name 	while (i < msdu_info->num_seg) {
3271*5113495bSYour Name 		/*
3272*5113495bSYour Name 		 * Setup Tx descriptor for an MSDU, and MSDU extension
3273*5113495bSYour Name 		 * descriptor
3274*5113495bSYour Name 		 */
3275*5113495bSYour Name 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
3276*5113495bSYour Name 				tx_q->desc_pool_id);
3277*5113495bSYour Name 
3278*5113495bSYour Name 		if (!tx_desc) {
3279*5113495bSYour Name 			if (msdu_info->frm_type == dp_tx_frm_me) {
3280*5113495bSYour Name 				prep_desc_fail++;
3281*5113495bSYour Name 				dp_tx_me_free_buf(pdev,
3282*5113495bSYour Name 					(void *)(msdu_info->u.sg_info
3283*5113495bSYour Name 						.curr_seg->frags[0].vaddr));
3284*5113495bSYour Name 				if (prep_desc_fail == msdu_info->num_seg) {
3285*5113495bSYour Name 					/*
3286*5113495bSYour Name 					 * Unmap is needed only if descriptor
3287*5113495bSYour Name 					 * preparation failed for all segments.
3288*5113495bSYour Name 					 */
3289*5113495bSYour Name 					qdf_nbuf_unmap(soc->osdev,
3290*5113495bSYour Name 						       msdu_info->u.sg_info.
3291*5113495bSYour Name 						       curr_seg->nbuf,
3292*5113495bSYour Name 						       QDF_DMA_TO_DEVICE);
3293*5113495bSYour Name 				}
3294*5113495bSYour Name 				/*
3295*5113495bSYour Name 				 * Free the nbuf for the current segment
3296*5113495bSYour Name 				 * and make it point to the next in the list.
3297*5113495bSYour Name 				 * For me, there are as many segments as there
3298*5113495bSYour Name 				 * are no of clients.
3299*5113495bSYour Name 				 */
3300*5113495bSYour Name 				qdf_nbuf_free(msdu_info->u.sg_info
3301*5113495bSYour Name 					      .curr_seg->nbuf);
3302*5113495bSYour Name 				if (msdu_info->u.sg_info.curr_seg->next) {
3303*5113495bSYour Name 					msdu_info->u.sg_info.curr_seg =
3304*5113495bSYour Name 						msdu_info->u.sg_info
3305*5113495bSYour Name 						.curr_seg->next;
3306*5113495bSYour Name 					nbuf = msdu_info->u.sg_info
3307*5113495bSYour Name 					       .curr_seg->nbuf;
3308*5113495bSYour Name 				}
3309*5113495bSYour Name 				i++;
3310*5113495bSYour Name 				continue;
3311*5113495bSYour Name 			}
3312*5113495bSYour Name 
3313*5113495bSYour Name 			if (msdu_info->frm_type == dp_tx_frm_tso) {
3314*5113495bSYour Name 				dp_tx_tso_seg_history_add(
3315*5113495bSYour Name 						soc,
3316*5113495bSYour Name 						msdu_info->u.tso_info.curr_seg,
3317*5113495bSYour Name 						nbuf, 0, DP_TX_DESC_UNMAP);
3318*5113495bSYour Name 				dp_tx_tso_unmap_segment(soc,
3319*5113495bSYour Name 							msdu_info->u.tso_info.
3320*5113495bSYour Name 							curr_seg,
3321*5113495bSYour Name 							msdu_info->u.tso_info.
3322*5113495bSYour Name 							tso_num_seg_list);
3323*5113495bSYour Name 
3324*5113495bSYour Name 				if (msdu_info->u.tso_info.curr_seg->next) {
3325*5113495bSYour Name 					msdu_info->u.tso_info.curr_seg =
3326*5113495bSYour Name 					msdu_info->u.tso_info.curr_seg->next;
3327*5113495bSYour Name 					i++;
3328*5113495bSYour Name 					continue;
3329*5113495bSYour Name 				}
3330*5113495bSYour Name 			}
3331*5113495bSYour Name 
3332*5113495bSYour Name 			if (msdu_info->frm_type == dp_tx_frm_sg)
3333*5113495bSYour Name 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
3334*5113495bSYour Name 
3335*5113495bSYour Name 			goto done;
3336*5113495bSYour Name 		}
3337*5113495bSYour Name 
3338*5113495bSYour Name 		if (msdu_info->frm_type == dp_tx_frm_me) {
3339*5113495bSYour Name 			tx_desc->msdu_ext_desc->me_buffer =
3340*5113495bSYour Name 				(struct dp_tx_me_buf_t *)msdu_info->
3341*5113495bSYour Name 				u.sg_info.curr_seg->frags[0].vaddr;
3342*5113495bSYour Name 			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
3343*5113495bSYour Name 		}
3344*5113495bSYour Name 
3345*5113495bSYour Name 		if (is_cce_classified)
3346*5113495bSYour Name 			tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
3347*5113495bSYour Name 
3348*5113495bSYour Name 		htt_tcl_metadata = vdev->htt_tcl_metadata;
3349*5113495bSYour Name 		if (msdu_info->exception_fw) {
3350*5113495bSYour Name 			DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
3351*5113495bSYour Name 		}
3352*5113495bSYour Name 
3353*5113495bSYour Name 		dp_tx_is_hp_update_required(i, msdu_info);
3354*5113495bSYour Name 
3355*5113495bSYour Name 		/*
3356*5113495bSYour Name 		 * For frames with multiple segments (TSO, ME), jump to next
3357*5113495bSYour Name 		 * segment.
3358*5113495bSYour Name 		 */
3359*5113495bSYour Name 		if (msdu_info->frm_type == dp_tx_frm_tso) {
3360*5113495bSYour Name 			if (msdu_info->u.tso_info.curr_seg->next) {
3361*5113495bSYour Name 				msdu_info->u.tso_info.curr_seg =
3362*5113495bSYour Name 					msdu_info->u.tso_info.curr_seg->next;
3363*5113495bSYour Name 
3364*5113495bSYour Name 				/*
3365*5113495bSYour Name 				 * If this is a jumbo nbuf, then increment the
3366*5113495bSYour Name 				 * number of nbuf users for each additional
3367*5113495bSYour Name 				 * segment of the msdu. This will ensure that
3368*5113495bSYour Name 				 * the skb is freed only after receiving tx
3369*5113495bSYour Name 				 * completion for all segments of an nbuf
3370*5113495bSYour Name 				 */
3371*5113495bSYour Name 				qdf_nbuf_inc_users(nbuf);
3372*5113495bSYour Name 
3373*5113495bSYour Name 				/* Check with MCL if this is needed */
3374*5113495bSYour Name 				/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
3375*5113495bSYour Name 				 */
3376*5113495bSYour Name 			}
3377*5113495bSYour Name 		}
3378*5113495bSYour Name 
3379*5113495bSYour Name 		dp_tx_update_mcast_param(DP_INVALID_PEER,
3380*5113495bSYour Name 					 &htt_tcl_metadata,
3381*5113495bSYour Name 					 vdev,
3382*5113495bSYour Name 					 msdu_info);
3383*5113495bSYour Name 		/*
3384*5113495bSYour Name 		 * Enqueue the Tx MSDU descriptor to HW for transmit
3385*5113495bSYour Name 		 */
3386*5113495bSYour Name 		status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
3387*5113495bSYour Name 						     htt_tcl_metadata,
3388*5113495bSYour Name 						     NULL, msdu_info);
3389*5113495bSYour Name 
3390*5113495bSYour Name 		dp_tx_check_and_flush_hp(soc, status, msdu_info);
3391*5113495bSYour Name 
3392*5113495bSYour Name 		if (status != QDF_STATUS_SUCCESS) {
3393*5113495bSYour Name 			dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
3394*5113495bSYour Name 				   tx_desc, tx_q->ring_id);
3395*5113495bSYour Name 
3396*5113495bSYour Name 			dp_tx_get_tid(vdev, nbuf, msdu_info);
3397*5113495bSYour Name 			tid_stats = &pdev->stats.tid_stats.
3398*5113495bSYour Name 				    tid_tx_stats[tx_q->ring_id][msdu_info->tid];
3399*5113495bSYour Name 			tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
3400*5113495bSYour Name 
3401*5113495bSYour Name 			if (msdu_info->frm_type == dp_tx_frm_me) {
3402*5113495bSYour Name 				hw_enq_fail++;
3403*5113495bSYour Name 				if (hw_enq_fail == msdu_info->num_seg) {
3404*5113495bSYour Name 					/*
3405*5113495bSYour Name 					 * Unmap is needed only if enqueue
3406*5113495bSYour Name 					 * failed for all segments.
3407*5113495bSYour Name 					 */
3408*5113495bSYour Name 					qdf_nbuf_unmap(soc->osdev,
3409*5113495bSYour Name 						       msdu_info->u.sg_info.
3410*5113495bSYour Name 						       curr_seg->nbuf,
3411*5113495bSYour Name 						       QDF_DMA_TO_DEVICE);
3412*5113495bSYour Name 				}
3413*5113495bSYour Name 				/*
3414*5113495bSYour Name 				 * Free the nbuf for the current segment
3415*5113495bSYour Name 				 * and make it point to the next in the list.
3416*5113495bSYour Name 				 * For me, there are as many segments as there
3417*5113495bSYour Name 				 * are no of clients.
3418*5113495bSYour Name 				 */
3419*5113495bSYour Name 				qdf_nbuf_free(msdu_info->u.sg_info
3420*5113495bSYour Name 					      .curr_seg->nbuf);
3421*5113495bSYour Name 				dp_tx_desc_release(soc, tx_desc,
3422*5113495bSYour Name 						   tx_q->desc_pool_id);
3423*5113495bSYour Name 				if (msdu_info->u.sg_info.curr_seg->next) {
3424*5113495bSYour Name 					msdu_info->u.sg_info.curr_seg =
3425*5113495bSYour Name 						msdu_info->u.sg_info
3426*5113495bSYour Name 						.curr_seg->next;
3427*5113495bSYour Name 					nbuf = msdu_info->u.sg_info
3428*5113495bSYour Name 					       .curr_seg->nbuf;
3429*5113495bSYour Name 				} else
3430*5113495bSYour Name 					break;
3431*5113495bSYour Name 				i++;
3432*5113495bSYour Name 				continue;
3433*5113495bSYour Name 			}
3434*5113495bSYour Name 
3435*5113495bSYour Name 			/*
3436*5113495bSYour Name 			 * For TSO frames, the nbuf users increment done for
3437*5113495bSYour Name 			 * the current segment has to be reverted, since the
3438*5113495bSYour Name 			 * hw enqueue for this segment failed
3439*5113495bSYour Name 			 */
3440*5113495bSYour Name 			if (msdu_info->frm_type == dp_tx_frm_tso &&
3441*5113495bSYour Name 			    msdu_info->u.tso_info.curr_seg) {
3442*5113495bSYour Name 				/*
3443*5113495bSYour Name 				 * unmap and free current,
3444*5113495bSYour Name 				 * retransmit remaining segments
3445*5113495bSYour Name 				 */
3446*5113495bSYour Name 				dp_tx_comp_free_buf(soc, tx_desc, false);
3447*5113495bSYour Name 				i++;
3448*5113495bSYour Name 				dp_tx_desc_release(soc, tx_desc,
3449*5113495bSYour Name 						   tx_q->desc_pool_id);
3450*5113495bSYour Name 				continue;
3451*5113495bSYour Name 			}
3452*5113495bSYour Name 
3453*5113495bSYour Name 			if (msdu_info->frm_type == dp_tx_frm_sg)
3454*5113495bSYour Name 				dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
3455*5113495bSYour Name 
3456*5113495bSYour Name 			dp_tx_desc_release(soc, tx_desc, tx_q->desc_pool_id);
3457*5113495bSYour Name 			goto done;
3458*5113495bSYour Name 		}
3459*5113495bSYour Name 
3460*5113495bSYour Name 		dp_tx_update_ts_on_enqueued(vdev, msdu_info, tx_desc);
3461*5113495bSYour Name 
3462*5113495bSYour Name 		/*
3463*5113495bSYour Name 		 * TODO
3464*5113495bSYour Name 		 * if tso_info structure can be modified to have curr_seg
3465*5113495bSYour Name 		 * as first element, following 2 blocks of code (for TSO and SG)
3466*5113495bSYour Name 		 * can be combined into 1
3467*5113495bSYour Name 		 */
3468*5113495bSYour Name 
3469*5113495bSYour Name 		/*
3470*5113495bSYour Name 		 * For Multicast-Unicast converted packets,
3471*5113495bSYour Name 		 * each converted frame (for a client) is represented as
3472*5113495bSYour Name 		 * 1 segment
3473*5113495bSYour Name 		 */
3474*5113495bSYour Name 		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
3475*5113495bSYour Name 				(msdu_info->frm_type == dp_tx_frm_me)) {
3476*5113495bSYour Name 			if (msdu_info->u.sg_info.curr_seg->next) {
3477*5113495bSYour Name 				msdu_info->u.sg_info.curr_seg =
3478*5113495bSYour Name 					msdu_info->u.sg_info.curr_seg->next;
3479*5113495bSYour Name 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
3480*5113495bSYour Name 			} else
3481*5113495bSYour Name 				break;
3482*5113495bSYour Name 		}
3483*5113495bSYour Name 		i++;
3484*5113495bSYour Name 	}
3485*5113495bSYour Name 
3486*5113495bSYour Name 	nbuf = NULL;
3487*5113495bSYour Name 
3488*5113495bSYour Name done:
3489*5113495bSYour Name 	return nbuf;
3490*5113495bSYour Name }
3491*5113495bSYour Name 
3492*5113495bSYour Name /**
3493*5113495bSYour Name  * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
3494*5113495bSYour Name  *                     for SG frames
3495*5113495bSYour Name  * @vdev: DP vdev handle
3496*5113495bSYour Name  * @nbuf: skb
3497*5113495bSYour Name  * @seg_info: Pointer to Segment info Descriptor to be prepared
3498*5113495bSYour Name  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
3499*5113495bSYour Name  *
3500*5113495bSYour Name  * Return: NULL on success,
3501*5113495bSYour Name  *         nbuf when it fails to send
3502*5113495bSYour Name  */
dp_tx_prepare_sg(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_seg_info_s * seg_info,struct dp_tx_msdu_info_s * msdu_info)3503*5113495bSYour Name static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3504*5113495bSYour Name 	struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
3505*5113495bSYour Name {
3506*5113495bSYour Name 	uint32_t cur_frag, nr_frags, i;
3507*5113495bSYour Name 	qdf_dma_addr_t paddr;
3508*5113495bSYour Name 	struct dp_tx_sg_info_s *sg_info;
3509*5113495bSYour Name 	uint8_t xmit_type = msdu_info->xmit_type;
3510*5113495bSYour Name 
3511*5113495bSYour Name 	sg_info = &msdu_info->u.sg_info;
3512*5113495bSYour Name 	nr_frags = qdf_nbuf_get_nr_frags(nbuf);
3513*5113495bSYour Name 
3514*5113495bSYour Name 	if (QDF_STATUS_SUCCESS !=
3515*5113495bSYour Name 		qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
3516*5113495bSYour Name 					   QDF_DMA_TO_DEVICE,
3517*5113495bSYour Name 					   qdf_nbuf_headlen(nbuf))) {
3518*5113495bSYour Name 		dp_tx_err("dma map error");
3519*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].sg.dma_map_error,
3520*5113495bSYour Name 			     1);
3521*5113495bSYour Name 		qdf_nbuf_free(nbuf);
3522*5113495bSYour Name 		return NULL;
3523*5113495bSYour Name 	}
3524*5113495bSYour Name 
3525*5113495bSYour Name 	paddr = qdf_nbuf_mapped_paddr_get(nbuf);
3526*5113495bSYour Name 	seg_info->frags[0].paddr_lo = paddr;
3527*5113495bSYour Name 	seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
3528*5113495bSYour Name 	seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
3529*5113495bSYour Name 	seg_info->frags[0].vaddr = (void *) nbuf;
3530*5113495bSYour Name 
3531*5113495bSYour Name 	for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
3532*5113495bSYour Name 		if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
3533*5113495bSYour Name 							    nbuf, 0,
3534*5113495bSYour Name 							    QDF_DMA_TO_DEVICE,
3535*5113495bSYour Name 							    cur_frag)) {
3536*5113495bSYour Name 			dp_tx_err("frag dma map error");
3537*5113495bSYour Name 			DP_STATS_INC(vdev,
3538*5113495bSYour Name 				     tx_i[xmit_type].sg.dma_map_error,
3539*5113495bSYour Name 				     1);
3540*5113495bSYour Name 			goto map_err;
3541*5113495bSYour Name 		}
3542*5113495bSYour Name 
3543*5113495bSYour Name 		paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
3544*5113495bSYour Name 		seg_info->frags[cur_frag + 1].paddr_lo = paddr;
3545*5113495bSYour Name 		seg_info->frags[cur_frag + 1].paddr_hi =
3546*5113495bSYour Name 			((uint64_t) paddr) >> 32;
3547*5113495bSYour Name 		seg_info->frags[cur_frag + 1].len =
3548*5113495bSYour Name 			qdf_nbuf_get_frag_size(nbuf, cur_frag);
3549*5113495bSYour Name 	}
3550*5113495bSYour Name 
3551*5113495bSYour Name 	seg_info->frag_cnt = (cur_frag + 1);
3552*5113495bSYour Name 	seg_info->total_len = qdf_nbuf_len(nbuf);
3553*5113495bSYour Name 	seg_info->next = NULL;
3554*5113495bSYour Name 
3555*5113495bSYour Name 	sg_info->curr_seg = seg_info;
3556*5113495bSYour Name 
3557*5113495bSYour Name 	msdu_info->frm_type = dp_tx_frm_sg;
3558*5113495bSYour Name 	msdu_info->num_seg = 1;
3559*5113495bSYour Name 
3560*5113495bSYour Name 	return nbuf;
3561*5113495bSYour Name map_err:
3562*5113495bSYour Name 	/* restore paddr into nbuf before calling unmap */
3563*5113495bSYour Name 	qdf_nbuf_mapped_paddr_set(nbuf,
3564*5113495bSYour Name 				  (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
3565*5113495bSYour Name 				  ((uint64_t)
3566*5113495bSYour Name 				  seg_info->frags[0].paddr_hi) << 32));
3567*5113495bSYour Name 	qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
3568*5113495bSYour Name 				     QDF_DMA_TO_DEVICE,
3569*5113495bSYour Name 				     seg_info->frags[0].len);
3570*5113495bSYour Name 	for (i = 1; i <= cur_frag; i++) {
3571*5113495bSYour Name 		qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
3572*5113495bSYour Name 				   (seg_info->frags[i].paddr_lo | ((uint64_t)
3573*5113495bSYour Name 				   seg_info->frags[i].paddr_hi) << 32),
3574*5113495bSYour Name 				   seg_info->frags[i].len,
3575*5113495bSYour Name 				   QDF_DMA_TO_DEVICE);
3576*5113495bSYour Name 	}
3577*5113495bSYour Name 	qdf_nbuf_free(nbuf);
3578*5113495bSYour Name 	return NULL;
3579*5113495bSYour Name }
3580*5113495bSYour Name 
3581*5113495bSYour Name /**
3582*5113495bSYour Name  * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
3583*5113495bSYour Name  * @vdev: DP vdev handle
3584*5113495bSYour Name  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
3585*5113495bSYour Name  * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
3586*5113495bSYour Name  *
3587*5113495bSYour Name  * Return: NULL on failure,
3588*5113495bSYour Name  *         nbuf when extracted successfully
3589*5113495bSYour Name  */
3590*5113495bSYour Name static
dp_tx_add_tx_sniffer_meta_data(struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,uint16_t ppdu_cookie)3591*5113495bSYour Name void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
3592*5113495bSYour Name 				    struct dp_tx_msdu_info_s *msdu_info,
3593*5113495bSYour Name 				    uint16_t ppdu_cookie)
3594*5113495bSYour Name {
3595*5113495bSYour Name 	struct htt_tx_msdu_desc_ext2_t *meta_data =
3596*5113495bSYour Name 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
3597*5113495bSYour Name 
3598*5113495bSYour Name 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
3599*5113495bSYour Name 
3600*5113495bSYour Name 	HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
3601*5113495bSYour Name 				(msdu_info->meta_data[5], 1);
3602*5113495bSYour Name 	HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
3603*5113495bSYour Name 				(msdu_info->meta_data[5], 1);
3604*5113495bSYour Name 	HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
3605*5113495bSYour Name 				(msdu_info->meta_data[6], ppdu_cookie);
3606*5113495bSYour Name 
3607*5113495bSYour Name 	msdu_info->exception_fw = 1;
3608*5113495bSYour Name 	msdu_info->is_tx_sniffer = 1;
3609*5113495bSYour Name }
3610*5113495bSYour Name 
3611*5113495bSYour Name #ifdef MESH_MODE_SUPPORT
3612*5113495bSYour Name 
3613*5113495bSYour Name /**
3614*5113495bSYour Name  * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
3615*5113495bSYour Name  *				and prepare msdu_info for mesh frames.
3616*5113495bSYour Name  * @vdev: DP vdev handle
3617*5113495bSYour Name  * @nbuf: skb
3618*5113495bSYour Name  * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
3619*5113495bSYour Name  *
3620*5113495bSYour Name  * Return: NULL on failure,
3621*5113495bSYour Name  *         nbuf when extracted successfully
3622*5113495bSYour Name  */
3623*5113495bSYour Name static
dp_tx_extract_mesh_meta_data(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3624*5113495bSYour Name qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3625*5113495bSYour Name 				struct dp_tx_msdu_info_s *msdu_info)
3626*5113495bSYour Name {
3627*5113495bSYour Name 	struct meta_hdr_s *mhdr;
3628*5113495bSYour Name 	struct htt_tx_msdu_desc_ext2_t *meta_data =
3629*5113495bSYour Name 				(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
3630*5113495bSYour Name 
3631*5113495bSYour Name 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
3632*5113495bSYour Name 
3633*5113495bSYour Name 	if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
3634*5113495bSYour Name 		msdu_info->exception_fw = 0;
3635*5113495bSYour Name 		goto remove_meta_hdr;
3636*5113495bSYour Name 	}
3637*5113495bSYour Name 
3638*5113495bSYour Name 	msdu_info->exception_fw = 1;
3639*5113495bSYour Name 
3640*5113495bSYour Name 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
3641*5113495bSYour Name 
3642*5113495bSYour Name 	meta_data->host_tx_desc_pool = 1;
3643*5113495bSYour Name 	meta_data->update_peer_cache = 1;
3644*5113495bSYour Name 	meta_data->learning_frame = 1;
3645*5113495bSYour Name 
3646*5113495bSYour Name 	if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
3647*5113495bSYour Name 		meta_data->power = mhdr->power;
3648*5113495bSYour Name 
3649*5113495bSYour Name 		meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
3650*5113495bSYour Name 		meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
3651*5113495bSYour Name 		meta_data->pream_type = mhdr->rate_info[0].preamble_type;
3652*5113495bSYour Name 		meta_data->retry_limit = mhdr->rate_info[0].max_tries;
3653*5113495bSYour Name 
3654*5113495bSYour Name 		meta_data->dyn_bw = 1;
3655*5113495bSYour Name 
3656*5113495bSYour Name 		meta_data->valid_pwr = 1;
3657*5113495bSYour Name 		meta_data->valid_mcs_mask = 1;
3658*5113495bSYour Name 		meta_data->valid_nss_mask = 1;
3659*5113495bSYour Name 		meta_data->valid_preamble_type  = 1;
3660*5113495bSYour Name 		meta_data->valid_retries = 1;
3661*5113495bSYour Name 		meta_data->valid_bw_info = 1;
3662*5113495bSYour Name 	}
3663*5113495bSYour Name 
3664*5113495bSYour Name 	if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
3665*5113495bSYour Name 		meta_data->encrypt_type = 0;
3666*5113495bSYour Name 		meta_data->valid_encrypt_type = 1;
3667*5113495bSYour Name 		meta_data->learning_frame = 0;
3668*5113495bSYour Name 	}
3669*5113495bSYour Name 
3670*5113495bSYour Name 	meta_data->valid_key_flags = 1;
3671*5113495bSYour Name 	meta_data->key_flags = (mhdr->keyix & 0x3);
3672*5113495bSYour Name 
3673*5113495bSYour Name remove_meta_hdr:
3674*5113495bSYour Name 	if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
3675*5113495bSYour Name 		dp_tx_err("qdf_nbuf_pull_head failed");
3676*5113495bSYour Name 		qdf_nbuf_free(nbuf);
3677*5113495bSYour Name 		return NULL;
3678*5113495bSYour Name 	}
3679*5113495bSYour Name 
3680*5113495bSYour Name 	msdu_info->tid = qdf_nbuf_get_priority(nbuf);
3681*5113495bSYour Name 
3682*5113495bSYour Name 	dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
3683*5113495bSYour Name 		   " tid %d to_fw %d",
3684*5113495bSYour Name 		   msdu_info->meta_data[0],
3685*5113495bSYour Name 		   msdu_info->meta_data[1],
3686*5113495bSYour Name 		   msdu_info->meta_data[2],
3687*5113495bSYour Name 		   msdu_info->meta_data[3],
3688*5113495bSYour Name 		   msdu_info->meta_data[4],
3689*5113495bSYour Name 		   msdu_info->meta_data[5],
3690*5113495bSYour Name 		   msdu_info->tid, msdu_info->exception_fw);
3691*5113495bSYour Name 
3692*5113495bSYour Name 	return nbuf;
3693*5113495bSYour Name }
3694*5113495bSYour Name #else
3695*5113495bSYour Name static
dp_tx_extract_mesh_meta_data(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)3696*5113495bSYour Name qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
3697*5113495bSYour Name 				struct dp_tx_msdu_info_s *msdu_info)
3698*5113495bSYour Name {
3699*5113495bSYour Name 	return nbuf;
3700*5113495bSYour Name }
3701*5113495bSYour Name 
3702*5113495bSYour Name #endif
3703*5113495bSYour Name 
3704*5113495bSYour Name /**
3705*5113495bSYour Name  * dp_check_exc_metadata() - Checks if parameters are valid
3706*5113495bSYour Name  * @tx_exc: holds all exception path parameters
3707*5113495bSYour Name  *
3708*5113495bSYour Name  * Return: true when all the parameters are valid else false
3709*5113495bSYour Name  *
3710*5113495bSYour Name  */
dp_check_exc_metadata(struct cdp_tx_exception_metadata * tx_exc)3711*5113495bSYour Name static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
3712*5113495bSYour Name {
3713*5113495bSYour Name 	bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
3714*5113495bSYour Name 			    HTT_INVALID_TID);
3715*5113495bSYour Name 	bool invalid_encap_type =
3716*5113495bSYour Name 			(tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
3717*5113495bSYour Name 			 tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
3718*5113495bSYour Name 	bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
3719*5113495bSYour Name 				 tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
3720*5113495bSYour Name 	bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
3721*5113495bSYour Name 			       tx_exc->ppdu_cookie == 0);
3722*5113495bSYour Name 
3723*5113495bSYour Name 	if (tx_exc->is_intrabss_fwd)
3724*5113495bSYour Name 		return true;
3725*5113495bSYour Name 
3726*5113495bSYour Name 	if (invalid_tid || invalid_encap_type || invalid_sec_type ||
3727*5113495bSYour Name 	    invalid_cookie) {
3728*5113495bSYour Name 		return false;
3729*5113495bSYour Name 	}
3730*5113495bSYour Name 
3731*5113495bSYour Name 	return true;
3732*5113495bSYour Name }
3733*5113495bSYour Name 
3734*5113495bSYour Name #ifdef ATH_SUPPORT_IQUE
dp_tx_mcast_enhance(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3735*5113495bSYour Name bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3736*5113495bSYour Name {
3737*5113495bSYour Name 	qdf_ether_header_t *eh;
3738*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
3739*5113495bSYour Name 	/* Mcast to Ucast Conversion*/
3740*5113495bSYour Name 	if (qdf_likely(!vdev->mcast_enhancement_en))
3741*5113495bSYour Name 		return true;
3742*5113495bSYour Name 
3743*5113495bSYour Name 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3744*5113495bSYour Name 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
3745*5113495bSYour Name 	    !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
3746*5113495bSYour Name 		dp_verbose_debug("Mcast frm for ME %pK", vdev);
3747*5113495bSYour Name 		qdf_nbuf_set_next(nbuf, NULL);
3748*5113495bSYour Name 
3749*5113495bSYour Name 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].mcast_en.mcast_pkt, 1,
3750*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
3751*5113495bSYour Name 		if (dp_tx_prepare_send_me(vdev, nbuf) ==
3752*5113495bSYour Name 				QDF_STATUS_SUCCESS) {
3753*5113495bSYour Name 			return false;
3754*5113495bSYour Name 		}
3755*5113495bSYour Name 
3756*5113495bSYour Name 		if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
3757*5113495bSYour Name 			if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
3758*5113495bSYour Name 					QDF_STATUS_SUCCESS) {
3759*5113495bSYour Name 				return false;
3760*5113495bSYour Name 			}
3761*5113495bSYour Name 		}
3762*5113495bSYour Name 	}
3763*5113495bSYour Name 
3764*5113495bSYour Name 	return true;
3765*5113495bSYour Name }
3766*5113495bSYour Name #else
dp_tx_mcast_enhance(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3767*5113495bSYour Name bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3768*5113495bSYour Name {
3769*5113495bSYour Name 	return true;
3770*5113495bSYour Name }
3771*5113495bSYour Name #endif
3772*5113495bSYour Name 
3773*5113495bSYour Name #ifdef QCA_SUPPORT_WDS_EXTENDED
3774*5113495bSYour Name /**
3775*5113495bSYour Name  * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
3776*5113495bSYour Name  * @vdev: vdev handle
3777*5113495bSYour Name  * @nbuf: skb
3778*5113495bSYour Name  *
3779*5113495bSYour Name  * Return: true if frame is dropped, false otherwise
3780*5113495bSYour Name  */
dp_tx_mcast_drop(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3781*5113495bSYour Name static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3782*5113495bSYour Name {
3783*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
3784*5113495bSYour Name 
3785*5113495bSYour Name 	/* Drop tx mcast and WDS Extended feature check */
3786*5113495bSYour Name 	if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
3787*5113495bSYour Name 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
3788*5113495bSYour Name 						qdf_nbuf_data(nbuf);
3789*5113495bSYour Name 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
3790*5113495bSYour Name 			DP_STATS_INC(vdev,
3791*5113495bSYour Name 				     tx_i[xmit_type].dropped.tx_mcast_drop, 1);
3792*5113495bSYour Name 			return true;
3793*5113495bSYour Name 		}
3794*5113495bSYour Name 	}
3795*5113495bSYour Name 
3796*5113495bSYour Name 	return false;
3797*5113495bSYour Name }
3798*5113495bSYour Name #else
dp_tx_mcast_drop(struct dp_vdev * vdev,qdf_nbuf_t nbuf)3799*5113495bSYour Name static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
3800*5113495bSYour Name {
3801*5113495bSYour Name 	return false;
3802*5113495bSYour Name }
3803*5113495bSYour Name #endif
3804*5113495bSYour Name /**
3805*5113495bSYour Name  * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
3806*5113495bSYour Name  * @nbuf: qdf_nbuf_t
3807*5113495bSYour Name  * @vdev: struct dp_vdev *
3808*5113495bSYour Name  *
3809*5113495bSYour Name  * Allow packet for processing only if it is for peer client which is
3810*5113495bSYour Name  * connected with same vap. Drop packet if client is connected to
3811*5113495bSYour Name  * different vap.
3812*5113495bSYour Name  *
3813*5113495bSYour Name  * Return: QDF_STATUS
3814*5113495bSYour Name  */
3815*5113495bSYour Name static inline QDF_STATUS
dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf,struct dp_vdev * vdev)3816*5113495bSYour Name dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
3817*5113495bSYour Name {
3818*5113495bSYour Name 	struct dp_ast_entry *dst_ast_entry = NULL;
3819*5113495bSYour Name 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
3820*5113495bSYour Name 
3821*5113495bSYour Name 	if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
3822*5113495bSYour Name 	    DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
3823*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
3824*5113495bSYour Name 
3825*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
3826*5113495bSYour Name 	dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
3827*5113495bSYour Name 							eh->ether_dhost,
3828*5113495bSYour Name 							vdev->vdev_id);
3829*5113495bSYour Name 
3830*5113495bSYour Name 	/* If there is no ast entry, return failure */
3831*5113495bSYour Name 	if (qdf_unlikely(!dst_ast_entry)) {
3832*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3833*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
3834*5113495bSYour Name 	}
3835*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
3836*5113495bSYour Name 
3837*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3838*5113495bSYour Name }
3839*5113495bSYour Name 
3840*5113495bSYour Name /**
3841*5113495bSYour Name  * dp_tx_nawds_handler() - NAWDS handler
3842*5113495bSYour Name  *
3843*5113495bSYour Name  * @soc: DP soc handle
3844*5113495bSYour Name  * @vdev: DP vdev handle
3845*5113495bSYour Name  * @msdu_info: msdu_info required to create HTT metadata
3846*5113495bSYour Name  * @nbuf: skb
3847*5113495bSYour Name  * @sa_peer_id:
3848*5113495bSYour Name  *
3849*5113495bSYour Name  * This API transfers the multicast frames with the peer id
3850*5113495bSYour Name  * on NAWDS enabled peer.
3851*5113495bSYour Name  *
3852*5113495bSYour Name  * Return: none
3853*5113495bSYour Name  */
3854*5113495bSYour Name 
dp_tx_nawds_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info,qdf_nbuf_t nbuf,uint16_t sa_peer_id)3855*5113495bSYour Name void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
3856*5113495bSYour Name 			 struct dp_tx_msdu_info_s *msdu_info,
3857*5113495bSYour Name 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id)
3858*5113495bSYour Name {
3859*5113495bSYour Name 	struct dp_peer *peer = NULL;
3860*5113495bSYour Name 	qdf_nbuf_t nbuf_clone = NULL;
3861*5113495bSYour Name 	uint16_t peer_id = DP_INVALID_PEER;
3862*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
3863*5113495bSYour Name 	uint8_t link_id = 0;
3864*5113495bSYour Name 
3865*5113495bSYour Name 	/* This check avoids pkt forwarding which is entered
3866*5113495bSYour Name 	 * in the ast table but still doesn't have valid peerid.
3867*5113495bSYour Name 	 */
3868*5113495bSYour Name 	if (sa_peer_id == HTT_INVALID_PEER)
3869*5113495bSYour Name 		return;
3870*5113495bSYour Name 
3871*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->peer_list_lock);
3872*5113495bSYour Name 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
3873*5113495bSYour Name 		txrx_peer = dp_get_txrx_peer(peer);
3874*5113495bSYour Name 		if (!txrx_peer)
3875*5113495bSYour Name 			continue;
3876*5113495bSYour Name 
3877*5113495bSYour Name 		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
3878*5113495bSYour Name 			peer_id = peer->peer_id;
3879*5113495bSYour Name 
3880*5113495bSYour Name 			if (!dp_peer_is_primary_link_peer(peer))
3881*5113495bSYour Name 				continue;
3882*5113495bSYour Name 
3883*5113495bSYour Name 			/* In the case of wds ext peer mcast traffic will be
3884*5113495bSYour Name 			 * sent as part of VLAN interface
3885*5113495bSYour Name 			 */
3886*5113495bSYour Name 			if (dp_peer_is_wds_ext_peer(txrx_peer))
3887*5113495bSYour Name 				continue;
3888*5113495bSYour Name 
3889*5113495bSYour Name 			/* Multicast packets needs to be
3890*5113495bSYour Name 			 * dropped in case of intra bss forwarding
3891*5113495bSYour Name 			 */
3892*5113495bSYour Name 			if (sa_peer_id == txrx_peer->peer_id) {
3893*5113495bSYour Name 				dp_tx_debug("multicast packet");
3894*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3895*5113495bSYour Name 							  tx.nawds_mcast_drop,
3896*5113495bSYour Name 							  1, link_id);
3897*5113495bSYour Name 				continue;
3898*5113495bSYour Name 			}
3899*5113495bSYour Name 
3900*5113495bSYour Name 			nbuf_clone = qdf_nbuf_clone(nbuf);
3901*5113495bSYour Name 
3902*5113495bSYour Name 			if (!nbuf_clone) {
3903*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_DP,
3904*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
3905*5113495bSYour Name 					  FL("nbuf clone failed"));
3906*5113495bSYour Name 				break;
3907*5113495bSYour Name 			}
3908*5113495bSYour Name 
3909*5113495bSYour Name 			nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
3910*5113495bSYour Name 							    msdu_info, peer_id,
3911*5113495bSYour Name 							    NULL);
3912*5113495bSYour Name 
3913*5113495bSYour Name 			if (nbuf_clone) {
3914*5113495bSYour Name 				dp_tx_debug("pkt send failed");
3915*5113495bSYour Name 				qdf_nbuf_free(nbuf_clone);
3916*5113495bSYour Name 			} else {
3917*5113495bSYour Name 				if (peer_id != DP_INVALID_PEER)
3918*5113495bSYour Name 					DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
3919*5113495bSYour Name 								      tx.nawds_mcast,
3920*5113495bSYour Name 								      1, qdf_nbuf_len(nbuf), link_id);
3921*5113495bSYour Name 			}
3922*5113495bSYour Name 		}
3923*5113495bSYour Name 	}
3924*5113495bSYour Name 
3925*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
3926*5113495bSYour Name }
3927*5113495bSYour Name 
3928*5113495bSYour Name #ifdef WLAN_MCAST_MLO
3929*5113495bSYour Name static inline bool
dp_tx_check_mesh_vdev(struct dp_vdev * vdev,struct cdp_tx_exception_metadata * tx_exc_metadata)3930*5113495bSYour Name dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
3931*5113495bSYour Name 		      struct cdp_tx_exception_metadata *tx_exc_metadata)
3932*5113495bSYour Name {
3933*5113495bSYour Name 	if (!tx_exc_metadata->is_mlo_mcast && qdf_unlikely(vdev->mesh_vdev))
3934*5113495bSYour Name 		return true;
3935*5113495bSYour Name 
3936*5113495bSYour Name 	return false;
3937*5113495bSYour Name }
3938*5113495bSYour Name #else
3939*5113495bSYour Name static inline bool
dp_tx_check_mesh_vdev(struct dp_vdev * vdev,struct cdp_tx_exception_metadata * tx_exc_metadata)3940*5113495bSYour Name dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
3941*5113495bSYour Name 		      struct cdp_tx_exception_metadata *tx_exc_metadata)
3942*5113495bSYour Name {
3943*5113495bSYour Name 	if (qdf_unlikely(vdev->mesh_vdev))
3944*5113495bSYour Name 		return true;
3945*5113495bSYour Name 
3946*5113495bSYour Name 	return false;
3947*5113495bSYour Name }
3948*5113495bSYour Name #endif
3949*5113495bSYour Name 
3950*5113495bSYour Name qdf_nbuf_t
dp_tx_send_exception(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)3951*5113495bSYour Name dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3952*5113495bSYour Name 		     qdf_nbuf_t nbuf,
3953*5113495bSYour Name 		     struct cdp_tx_exception_metadata *tx_exc_metadata)
3954*5113495bSYour Name {
3955*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3956*5113495bSYour Name 	struct dp_tx_msdu_info_s msdu_info;
3957*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3958*5113495bSYour Name 						     DP_MOD_ID_TX_EXCEPTION);
3959*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
3960*5113495bSYour Name 
3961*5113495bSYour Name 	if (qdf_unlikely(!vdev))
3962*5113495bSYour Name 		goto fail;
3963*5113495bSYour Name 
3964*5113495bSYour Name 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
3965*5113495bSYour Name 
3966*5113495bSYour Name 	if (!tx_exc_metadata)
3967*5113495bSYour Name 		goto fail;
3968*5113495bSYour Name 
3969*5113495bSYour Name 	msdu_info.tid = tx_exc_metadata->tid;
3970*5113495bSYour Name 	msdu_info.xmit_type = xmit_type;
3971*5113495bSYour Name 	dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
3972*5113495bSYour Name 			 QDF_MAC_ADDR_REF(nbuf->data));
3973*5113495bSYour Name 
3974*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, qdf_nbuf_len(nbuf));
3975*5113495bSYour Name 
3976*5113495bSYour Name 	if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
3977*5113495bSYour Name 		dp_tx_err("Invalid parameters in exception path");
3978*5113495bSYour Name 		goto fail;
3979*5113495bSYour Name 	}
3980*5113495bSYour Name 
3981*5113495bSYour Name 	/* for peer based metadata check if peer is valid */
3982*5113495bSYour Name 	if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
3983*5113495bSYour Name 		struct dp_peer *peer = NULL;
3984*5113495bSYour Name 
3985*5113495bSYour Name 		 peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
3986*5113495bSYour Name 					      tx_exc_metadata->peer_id,
3987*5113495bSYour Name 					      DP_MOD_ID_TX_EXCEPTION);
3988*5113495bSYour Name 		if (qdf_unlikely(!peer)) {
3989*5113495bSYour Name 			DP_STATS_INC(vdev,
3990*5113495bSYour Name 			     tx_i[xmit_type].dropped.invalid_peer_id_in_exc_path,
3991*5113495bSYour Name 			     1);
3992*5113495bSYour Name 			goto fail;
3993*5113495bSYour Name 		}
3994*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
3995*5113495bSYour Name 	}
3996*5113495bSYour Name 	/* Basic sanity checks for unsupported packets */
3997*5113495bSYour Name 
3998*5113495bSYour Name 	/* MESH mode */
3999*5113495bSYour Name 	if (dp_tx_check_mesh_vdev(vdev, tx_exc_metadata)) {
4000*5113495bSYour Name 		dp_tx_err("Mesh mode is not supported in exception path");
4001*5113495bSYour Name 		goto fail;
4002*5113495bSYour Name 	}
4003*5113495bSYour Name 
4004*5113495bSYour Name 	/*
4005*5113495bSYour Name 	 * Classify the frame and call corresponding
4006*5113495bSYour Name 	 * "prepare" function which extracts the segment (TSO)
4007*5113495bSYour Name 	 * and fragmentation information (for TSO , SG, ME, or Raw)
4008*5113495bSYour Name 	 * into MSDU_INFO structure which is later used to fill
4009*5113495bSYour Name 	 * SW and HW descriptors.
4010*5113495bSYour Name 	 */
4011*5113495bSYour Name 	if (qdf_nbuf_is_tso(nbuf)) {
4012*5113495bSYour Name 		dp_verbose_debug("TSO frame %pK", vdev);
4013*5113495bSYour Name 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
4014*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
4015*5113495bSYour Name 
4016*5113495bSYour Name 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
4017*5113495bSYour Name 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
4018*5113495bSYour Name 					 qdf_nbuf_len(nbuf));
4019*5113495bSYour Name 			goto fail;
4020*5113495bSYour Name 		}
4021*5113495bSYour Name 
4022*5113495bSYour Name 		DP_STATS_INC(vdev,
4023*5113495bSYour Name 			     tx_i[xmit_type].rcvd.num, msdu_info.num_seg - 1);
4024*5113495bSYour Name 
4025*5113495bSYour Name 		goto send_multiple;
4026*5113495bSYour Name 	}
4027*5113495bSYour Name 
4028*5113495bSYour Name 	/* SG */
4029*5113495bSYour Name 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
4030*5113495bSYour Name 		struct dp_tx_seg_info_s seg_info = {0};
4031*5113495bSYour Name 
4032*5113495bSYour Name 		nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
4033*5113495bSYour Name 		if (!nbuf)
4034*5113495bSYour Name 			goto fail;
4035*5113495bSYour Name 
4036*5113495bSYour Name 		dp_verbose_debug("non-TSO SG frame %pK", vdev);
4037*5113495bSYour Name 
4038*5113495bSYour Name 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sg.sg_pkt, 1,
4039*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
4040*5113495bSYour Name 
4041*5113495bSYour Name 		goto send_multiple;
4042*5113495bSYour Name 	}
4043*5113495bSYour Name 
4044*5113495bSYour Name 	if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
4045*5113495bSYour Name 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sniffer_rcvd, 1,
4046*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
4047*5113495bSYour Name 
4048*5113495bSYour Name 		dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
4049*5113495bSYour Name 					       tx_exc_metadata->ppdu_cookie);
4050*5113495bSYour Name 	}
4051*5113495bSYour Name 
4052*5113495bSYour Name 	/*
4053*5113495bSYour Name 	 * Get HW Queue to use for this frame.
4054*5113495bSYour Name 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
4055*5113495bSYour Name 	 * dedicated for data and 1 for command.
4056*5113495bSYour Name 	 * "queue_id" maps to one hardware ring.
4057*5113495bSYour Name 	 *  With each ring, we also associate a unique Tx descriptor pool
4058*5113495bSYour Name 	 *  to minimize lock contention for these resources.
4059*5113495bSYour Name 	 */
4060*5113495bSYour Name 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
4061*5113495bSYour Name 	DP_STATS_INC(vdev,
4062*5113495bSYour Name 		     tx_i[xmit_type].rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
4063*5113495bSYour Name 		     1);
4064*5113495bSYour Name 
4065*5113495bSYour Name 	/*
4066*5113495bSYour Name 	 * if the packet is mcast packet send through mlo_macst handler
4067*5113495bSYour Name 	 * for all prnt_vdevs
4068*5113495bSYour Name 	 */
4069*5113495bSYour Name 
4070*5113495bSYour Name 	if (soc->arch_ops.dp_tx_mlo_mcast_send) {
4071*5113495bSYour Name 		nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
4072*5113495bSYour Name 							  nbuf,
4073*5113495bSYour Name 							  tx_exc_metadata);
4074*5113495bSYour Name 		if (!nbuf)
4075*5113495bSYour Name 			goto fail;
4076*5113495bSYour Name 	}
4077*5113495bSYour Name 
4078*5113495bSYour Name 	if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
4079*5113495bSYour Name 		if (qdf_unlikely(vdev->nawds_enabled)) {
4080*5113495bSYour Name 			/*
4081*5113495bSYour Name 			 * This is a multicast packet
4082*5113495bSYour Name 			 */
4083*5113495bSYour Name 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
4084*5113495bSYour Name 					    tx_exc_metadata->peer_id);
4085*5113495bSYour Name 			DP_STATS_INC_PKT(vdev, tx_i[xmit_type].nawds_mcast,
4086*5113495bSYour Name 					 1, qdf_nbuf_len(nbuf));
4087*5113495bSYour Name 		}
4088*5113495bSYour Name 
4089*5113495bSYour Name 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
4090*5113495bSYour Name 					      DP_INVALID_PEER, NULL);
4091*5113495bSYour Name 	} else {
4092*5113495bSYour Name 		/*
4093*5113495bSYour Name 		 * Check exception descriptors
4094*5113495bSYour Name 		 */
4095*5113495bSYour Name 		if (dp_tx_exception_limit_check(vdev, xmit_type))
4096*5113495bSYour Name 			goto fail;
4097*5113495bSYour Name 
4098*5113495bSYour Name 		/*  Single linear frame */
4099*5113495bSYour Name 		/*
4100*5113495bSYour Name 		 * If nbuf is a simple linear frame, use send_single function to
4101*5113495bSYour Name 		 * prepare direct-buffer type TCL descriptor and enqueue to TCL
4102*5113495bSYour Name 		 * SRNG. There is no need to setup a MSDU extension descriptor.
4103*5113495bSYour Name 		 */
4104*5113495bSYour Name 		nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
4105*5113495bSYour Name 					      tx_exc_metadata->peer_id,
4106*5113495bSYour Name 					      tx_exc_metadata);
4107*5113495bSYour Name 	}
4108*5113495bSYour Name 
4109*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4110*5113495bSYour Name 	return nbuf;
4111*5113495bSYour Name 
4112*5113495bSYour Name send_multiple:
4113*5113495bSYour Name 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4114*5113495bSYour Name 
4115*5113495bSYour Name fail:
4116*5113495bSYour Name 	if (vdev)
4117*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4118*5113495bSYour Name 	dp_verbose_debug("pkt send failed");
4119*5113495bSYour Name 	return nbuf;
4120*5113495bSYour Name }
4121*5113495bSYour Name 
4122*5113495bSYour Name qdf_nbuf_t
dp_tx_send_exception_vdev_id_check(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)4123*5113495bSYour Name dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
4124*5113495bSYour Name 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
4125*5113495bSYour Name 				   struct cdp_tx_exception_metadata *tx_exc_metadata)
4126*5113495bSYour Name {
4127*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4128*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4129*5113495bSYour Name 						     DP_MOD_ID_TX_EXCEPTION);
4130*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4131*5113495bSYour Name 
4132*5113495bSYour Name 	if (qdf_unlikely(!vdev))
4133*5113495bSYour Name 		goto fail;
4134*5113495bSYour Name 
4135*5113495bSYour Name 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
4136*5113495bSYour Name 			== QDF_STATUS_E_FAILURE)) {
4137*5113495bSYour Name 		DP_STATS_INC(vdev,
4138*5113495bSYour Name 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
4139*5113495bSYour Name 			     1);
4140*5113495bSYour Name 		goto fail;
4141*5113495bSYour Name 	}
4142*5113495bSYour Name 
4143*5113495bSYour Name 	/* Unref count as it will again be taken inside dp_tx_exception */
4144*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4145*5113495bSYour Name 
4146*5113495bSYour Name 	return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
4147*5113495bSYour Name 
4148*5113495bSYour Name fail:
4149*5113495bSYour Name 	if (vdev)
4150*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
4151*5113495bSYour Name 	dp_verbose_debug("pkt send failed");
4152*5113495bSYour Name 	return nbuf;
4153*5113495bSYour Name }
4154*5113495bSYour Name 
4155*5113495bSYour Name #ifdef MESH_MODE_SUPPORT
dp_tx_send_mesh(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4156*5113495bSYour Name qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4157*5113495bSYour Name 			   qdf_nbuf_t nbuf)
4158*5113495bSYour Name {
4159*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4160*5113495bSYour Name 	struct meta_hdr_s *mhdr;
4161*5113495bSYour Name 	qdf_nbuf_t nbuf_mesh = NULL;
4162*5113495bSYour Name 	qdf_nbuf_t nbuf_clone = NULL;
4163*5113495bSYour Name 	struct dp_vdev *vdev;
4164*5113495bSYour Name 	uint8_t no_enc_frame = 0;
4165*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4166*5113495bSYour Name 
4167*5113495bSYour Name 	nbuf_mesh = qdf_nbuf_unshare(nbuf);
4168*5113495bSYour Name 	if (!nbuf_mesh) {
4169*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4170*5113495bSYour Name 				"qdf_nbuf_unshare failed");
4171*5113495bSYour Name 		return nbuf;
4172*5113495bSYour Name 	}
4173*5113495bSYour Name 
4174*5113495bSYour Name 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
4175*5113495bSYour Name 	if (!vdev) {
4176*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4177*5113495bSYour Name 				"vdev is NULL for vdev_id %d", vdev_id);
4178*5113495bSYour Name 		return nbuf;
4179*5113495bSYour Name 	}
4180*5113495bSYour Name 
4181*5113495bSYour Name 	nbuf = nbuf_mesh;
4182*5113495bSYour Name 
4183*5113495bSYour Name 	mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
4184*5113495bSYour Name 
4185*5113495bSYour Name 	if ((vdev->sec_type != cdp_sec_type_none) &&
4186*5113495bSYour Name 			(mhdr->flags & METAHDR_FLAG_NOENCRYPT))
4187*5113495bSYour Name 		no_enc_frame = 1;
4188*5113495bSYour Name 
4189*5113495bSYour Name 	if (mhdr->flags & METAHDR_FLAG_NOQOS)
4190*5113495bSYour Name 		qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
4191*5113495bSYour Name 
4192*5113495bSYour Name 	if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
4193*5113495bSYour Name 		       !no_enc_frame) {
4194*5113495bSYour Name 		nbuf_clone = qdf_nbuf_clone(nbuf);
4195*5113495bSYour Name 		if (!nbuf_clone) {
4196*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4197*5113495bSYour Name 				"qdf_nbuf_clone failed");
4198*5113495bSYour Name 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
4199*5113495bSYour Name 			return nbuf;
4200*5113495bSYour Name 		}
4201*5113495bSYour Name 		qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
4202*5113495bSYour Name 	}
4203*5113495bSYour Name 
4204*5113495bSYour Name 	if (nbuf_clone) {
4205*5113495bSYour Name 		if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
4206*5113495bSYour Name 			DP_STATS_INC(vdev, tx_i[xmit_type].mesh.exception_fw,
4207*5113495bSYour Name 				     1);
4208*5113495bSYour Name 		} else {
4209*5113495bSYour Name 			qdf_nbuf_free(nbuf_clone);
4210*5113495bSYour Name 		}
4211*5113495bSYour Name 	}
4212*5113495bSYour Name 
4213*5113495bSYour Name 	if (no_enc_frame)
4214*5113495bSYour Name 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
4215*5113495bSYour Name 	else
4216*5113495bSYour Name 		qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
4217*5113495bSYour Name 
4218*5113495bSYour Name 	nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
4219*5113495bSYour Name 	if ((!nbuf) && no_enc_frame) {
4220*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].mesh.exception_fw, 1);
4221*5113495bSYour Name 	}
4222*5113495bSYour Name 
4223*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
4224*5113495bSYour Name 	return nbuf;
4225*5113495bSYour Name }
4226*5113495bSYour Name 
4227*5113495bSYour Name #else
4228*5113495bSYour Name 
dp_tx_send_mesh(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4229*5113495bSYour Name qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4230*5113495bSYour Name 			   qdf_nbuf_t nbuf)
4231*5113495bSYour Name {
4232*5113495bSYour Name 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
4233*5113495bSYour Name }
4234*5113495bSYour Name 
4235*5113495bSYour Name #endif
4236*5113495bSYour Name 
4237*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
dp_tx_drop(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4238*5113495bSYour Name qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4239*5113495bSYour Name 		      qdf_nbuf_t nbuf)
4240*5113495bSYour Name {
4241*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4242*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
4243*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4244*5113495bSYour Name 
4245*5113495bSYour Name 	vdev = soc->vdev_id_map[vdev_id];
4246*5113495bSYour Name 	if (qdf_unlikely(!vdev))
4247*5113495bSYour Name 		return nbuf;
4248*5113495bSYour Name 
4249*5113495bSYour Name 	DP_STATS_INC(vdev, tx_i[xmit_type].dropped.drop_ingress, 1);
4250*5113495bSYour Name 	return nbuf;
4251*5113495bSYour Name }
4252*5113495bSYour Name 
dp_tx_exc_drop(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)4253*5113495bSYour Name qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4254*5113495bSYour Name 			  qdf_nbuf_t nbuf,
4255*5113495bSYour Name 			  struct cdp_tx_exception_metadata *tx_exc_metadata)
4256*5113495bSYour Name {
4257*5113495bSYour Name 	return dp_tx_drop(soc_hdl, vdev_id, nbuf);
4258*5113495bSYour Name }
4259*5113495bSYour Name #endif
4260*5113495bSYour Name 
4261*5113495bSYour Name #ifdef FEATURE_DIRECT_LINK
4262*5113495bSYour Name /**
4263*5113495bSYour Name  * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
4264*5113495bSYour Name  * @nbuf: skb
4265*5113495bSYour Name  * @vdev: DP vdev handle
4266*5113495bSYour Name  *
4267*5113495bSYour Name  * Return: None
4268*5113495bSYour Name  */
dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf,struct dp_vdev * vdev)4269*5113495bSYour Name static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
4270*5113495bSYour Name {
4271*5113495bSYour Name 	if (qdf_unlikely(vdev->to_fw))
4272*5113495bSYour Name 		QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
4273*5113495bSYour Name }
4274*5113495bSYour Name #else
dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf,struct dp_vdev * vdev)4275*5113495bSYour Name static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
4276*5113495bSYour Name {
4277*5113495bSYour Name }
4278*5113495bSYour Name #endif
4279*5113495bSYour Name 
dp_tx_send(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4280*5113495bSYour Name qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4281*5113495bSYour Name 		      qdf_nbuf_t nbuf)
4282*5113495bSYour Name {
4283*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4284*5113495bSYour Name 	uint16_t peer_id = HTT_INVALID_PEER;
4285*5113495bSYour Name 	/*
4286*5113495bSYour Name 	 * doing a memzero is causing additional function call overhead
4287*5113495bSYour Name 	 * so doing static stack clearing
4288*5113495bSYour Name 	 */
4289*5113495bSYour Name 	struct dp_tx_msdu_info_s msdu_info = {0};
4290*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
4291*5113495bSYour Name 	qdf_nbuf_t end_nbuf = NULL;
4292*5113495bSYour Name 	uint8_t xmit_type;
4293*5113495bSYour Name 
4294*5113495bSYour Name 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4295*5113495bSYour Name 		return nbuf;
4296*5113495bSYour Name 
4297*5113495bSYour Name 	/*
4298*5113495bSYour Name 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
4299*5113495bSYour Name 	 * this in per packet path.
4300*5113495bSYour Name 	 *
4301*5113495bSYour Name 	 * As in this path vdev memory is already protected with netdev
4302*5113495bSYour Name 	 * tx lock
4303*5113495bSYour Name 	 */
4304*5113495bSYour Name 	vdev = soc->vdev_id_map[vdev_id];
4305*5113495bSYour Name 	if (qdf_unlikely(!vdev))
4306*5113495bSYour Name 		return nbuf;
4307*5113495bSYour Name 
4308*5113495bSYour Name 	dp_tx_get_driver_ingress_ts(vdev, &msdu_info, nbuf);
4309*5113495bSYour Name 
4310*5113495bSYour Name 	dp_vdev_tx_mark_to_fw(nbuf, vdev);
4311*5113495bSYour Name 
4312*5113495bSYour Name 	/*
4313*5113495bSYour Name 	 * Set Default Host TID value to invalid TID
4314*5113495bSYour Name 	 * (TID override disabled)
4315*5113495bSYour Name 	 */
4316*5113495bSYour Name 	msdu_info.tid = HTT_TX_EXT_TID_INVALID;
4317*5113495bSYour Name 	xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4318*5113495bSYour Name 	msdu_info.xmit_type = xmit_type;
4319*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, qdf_nbuf_len(nbuf));
4320*5113495bSYour Name 
4321*5113495bSYour Name 	if (qdf_unlikely(vdev->mesh_vdev)) {
4322*5113495bSYour Name 		qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
4323*5113495bSYour Name 								&msdu_info);
4324*5113495bSYour Name 		if (!nbuf_mesh) {
4325*5113495bSYour Name 			dp_verbose_debug("Extracting mesh metadata failed");
4326*5113495bSYour Name 			return nbuf;
4327*5113495bSYour Name 		}
4328*5113495bSYour Name 		nbuf = nbuf_mesh;
4329*5113495bSYour Name 	}
4330*5113495bSYour Name 
4331*5113495bSYour Name 	/*
4332*5113495bSYour Name 	 * Get HW Queue to use for this frame.
4333*5113495bSYour Name 	 * TCL supports upto 4 DMA rings, out of which 3 rings are
4334*5113495bSYour Name 	 * dedicated for data and 1 for command.
4335*5113495bSYour Name 	 * "queue_id" maps to one hardware ring.
4336*5113495bSYour Name 	 *  With each ring, we also associate a unique Tx descriptor pool
4337*5113495bSYour Name 	 *  to minimize lock contention for these resources.
4338*5113495bSYour Name 	 */
4339*5113495bSYour Name 	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
4340*5113495bSYour Name 	DP_STATS_INC(vdev,
4341*5113495bSYour Name 		     tx_i[xmit_type].rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
4342*5113495bSYour Name 		     1);
4343*5113495bSYour Name 
4344*5113495bSYour Name 	/*
4345*5113495bSYour Name 	 * TCL H/W supports 2 DSCP-TID mapping tables.
4346*5113495bSYour Name 	 *  Table 1 - Default DSCP-TID mapping table
4347*5113495bSYour Name 	 *  Table 2 - 1 DSCP-TID override table
4348*5113495bSYour Name 	 *
4349*5113495bSYour Name 	 * If we need a different DSCP-TID mapping for this vap,
4350*5113495bSYour Name 	 * call tid_classify to extract DSCP/ToS from frame and
4351*5113495bSYour Name 	 * map to a TID and store in msdu_info. This is later used
4352*5113495bSYour Name 	 * to fill in TCL Input descriptor (per-packet TID override).
4353*5113495bSYour Name 	 */
4354*5113495bSYour Name 	dp_tx_classify_tid(vdev, nbuf, &msdu_info);
4355*5113495bSYour Name 
4356*5113495bSYour Name 	/*
4357*5113495bSYour Name 	 * Classify the frame and call corresponding
4358*5113495bSYour Name 	 * "prepare" function which extracts the segment (TSO)
4359*5113495bSYour Name 	 * and fragmentation information (for TSO , SG, ME, or Raw)
4360*5113495bSYour Name 	 * into MSDU_INFO structure which is later used to fill
4361*5113495bSYour Name 	 * SW and HW descriptors.
4362*5113495bSYour Name 	 */
4363*5113495bSYour Name 	if (qdf_nbuf_is_tso(nbuf)) {
4364*5113495bSYour Name 		dp_verbose_debug("TSO frame %pK", vdev);
4365*5113495bSYour Name 		DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
4366*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
4367*5113495bSYour Name 
4368*5113495bSYour Name 		if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
4369*5113495bSYour Name 			DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
4370*5113495bSYour Name 					 qdf_nbuf_len(nbuf));
4371*5113495bSYour Name 			return nbuf;
4372*5113495bSYour Name 		}
4373*5113495bSYour Name 
4374*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].rcvd.num,
4375*5113495bSYour Name 			     msdu_info.num_seg - 1);
4376*5113495bSYour Name 
4377*5113495bSYour Name 		goto send_multiple;
4378*5113495bSYour Name 	}
4379*5113495bSYour Name 
4380*5113495bSYour Name 	/* SG */
4381*5113495bSYour Name 	if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
4382*5113495bSYour Name 		if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
4383*5113495bSYour Name 			if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
4384*5113495bSYour Name 				return nbuf;
4385*5113495bSYour Name 		} else {
4386*5113495bSYour Name 			struct dp_tx_seg_info_s seg_info = {0};
4387*5113495bSYour Name 
4388*5113495bSYour Name 			if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
4389*5113495bSYour Name 				goto send_single;
4390*5113495bSYour Name 
4391*5113495bSYour Name 			nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
4392*5113495bSYour Name 						&msdu_info);
4393*5113495bSYour Name 			if (!nbuf)
4394*5113495bSYour Name 				return NULL;
4395*5113495bSYour Name 
4396*5113495bSYour Name 			dp_verbose_debug("non-TSO SG frame %pK", vdev);
4397*5113495bSYour Name 
4398*5113495bSYour Name 			DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sg.sg_pkt, 1,
4399*5113495bSYour Name 					 qdf_nbuf_len(nbuf));
4400*5113495bSYour Name 
4401*5113495bSYour Name 			goto send_multiple;
4402*5113495bSYour Name 		}
4403*5113495bSYour Name 	}
4404*5113495bSYour Name 
4405*5113495bSYour Name 	if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
4406*5113495bSYour Name 		return NULL;
4407*5113495bSYour Name 
4408*5113495bSYour Name 	if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
4409*5113495bSYour Name 		return nbuf;
4410*5113495bSYour Name 
4411*5113495bSYour Name 	/* RAW */
4412*5113495bSYour Name 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
4413*5113495bSYour Name 		struct dp_tx_seg_info_s seg_info = {0};
4414*5113495bSYour Name 
4415*5113495bSYour Name 		nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
4416*5113495bSYour Name 		if (!nbuf)
4417*5113495bSYour Name 			return NULL;
4418*5113495bSYour Name 
4419*5113495bSYour Name 		dp_verbose_debug("Raw frame %pK", vdev);
4420*5113495bSYour Name 
4421*5113495bSYour Name 		goto send_multiple;
4422*5113495bSYour Name 
4423*5113495bSYour Name 	}
4424*5113495bSYour Name 
4425*5113495bSYour Name 	if (qdf_unlikely(vdev->nawds_enabled)) {
4426*5113495bSYour Name 		qdf_ether_header_t *eh = (qdf_ether_header_t *)
4427*5113495bSYour Name 					  qdf_nbuf_data(nbuf);
4428*5113495bSYour Name 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
4429*5113495bSYour Name 			uint16_t sa_peer_id = DP_INVALID_PEER;
4430*5113495bSYour Name 
4431*5113495bSYour Name 			if (!soc->ast_offload_support) {
4432*5113495bSYour Name 				struct dp_ast_entry *ast_entry = NULL;
4433*5113495bSYour Name 
4434*5113495bSYour Name 				qdf_spin_lock_bh(&soc->ast_lock);
4435*5113495bSYour Name 				ast_entry = dp_peer_ast_hash_find_by_pdevid
4436*5113495bSYour Name 					(soc,
4437*5113495bSYour Name 					 (uint8_t *)(eh->ether_shost),
4438*5113495bSYour Name 					 vdev->pdev->pdev_id);
4439*5113495bSYour Name 				if (ast_entry)
4440*5113495bSYour Name 					sa_peer_id = ast_entry->peer_id;
4441*5113495bSYour Name 				qdf_spin_unlock_bh(&soc->ast_lock);
4442*5113495bSYour Name 			}
4443*5113495bSYour Name 
4444*5113495bSYour Name 			dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
4445*5113495bSYour Name 					    sa_peer_id);
4446*5113495bSYour Name 		}
4447*5113495bSYour Name 		peer_id = DP_INVALID_PEER;
4448*5113495bSYour Name 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].nawds_mcast,
4449*5113495bSYour Name 				 1, qdf_nbuf_len(nbuf));
4450*5113495bSYour Name 	}
4451*5113495bSYour Name 
4452*5113495bSYour Name send_single:
4453*5113495bSYour Name 	/*  Single linear frame */
4454*5113495bSYour Name 	/*
4455*5113495bSYour Name 	 * If nbuf is a simple linear frame, use send_single function to
4456*5113495bSYour Name 	 * prepare direct-buffer type TCL descriptor and enqueue to TCL
4457*5113495bSYour Name 	 * SRNG. There is no need to setup a MSDU extension descriptor.
4458*5113495bSYour Name 	 */
4459*5113495bSYour Name 	nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
4460*5113495bSYour Name 					      peer_id, end_nbuf);
4461*5113495bSYour Name 	return nbuf;
4462*5113495bSYour Name 
4463*5113495bSYour Name send_multiple:
4464*5113495bSYour Name 	nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
4465*5113495bSYour Name 
4466*5113495bSYour Name 	if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
4467*5113495bSYour Name 		dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
4468*5113495bSYour Name 
4469*5113495bSYour Name 	return nbuf;
4470*5113495bSYour Name }
4471*5113495bSYour Name 
dp_tx_send_vdev_id_check(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)4472*5113495bSYour Name qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
4473*5113495bSYour Name 				    uint8_t vdev_id, qdf_nbuf_t nbuf)
4474*5113495bSYour Name {
4475*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4476*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
4477*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4478*5113495bSYour Name 
4479*5113495bSYour Name 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
4480*5113495bSYour Name 		return nbuf;
4481*5113495bSYour Name 
4482*5113495bSYour Name 	/*
4483*5113495bSYour Name 	 * dp_vdev_get_ref_by_id does does a atomic operation avoid using
4484*5113495bSYour Name 	 * this in per packet path.
4485*5113495bSYour Name 	 *
4486*5113495bSYour Name 	 * As in this path vdev memory is already protected with netdev
4487*5113495bSYour Name 	 * tx lock
4488*5113495bSYour Name 	 */
4489*5113495bSYour Name 	vdev = soc->vdev_id_map[vdev_id];
4490*5113495bSYour Name 	if (qdf_unlikely(!vdev))
4491*5113495bSYour Name 		return nbuf;
4492*5113495bSYour Name 
4493*5113495bSYour Name 	if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
4494*5113495bSYour Name 			== QDF_STATUS_E_FAILURE)) {
4495*5113495bSYour Name 		DP_STATS_INC(vdev,
4496*5113495bSYour Name 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
4497*5113495bSYour Name 			     1);
4498*5113495bSYour Name 		return nbuf;
4499*5113495bSYour Name 	}
4500*5113495bSYour Name 
4501*5113495bSYour Name 	return dp_tx_send(soc_hdl, vdev_id, nbuf);
4502*5113495bSYour Name }
4503*5113495bSYour Name 
4504*5113495bSYour Name #ifdef UMAC_SUPPORT_PROXY_ARP
4505*5113495bSYour Name /**
4506*5113495bSYour Name  * dp_tx_proxy_arp() - Tx proxy arp handler
4507*5113495bSYour Name  * @vdev: datapath vdev handle
4508*5113495bSYour Name  * @nbuf: sk buffer
4509*5113495bSYour Name  *
4510*5113495bSYour Name  * Return: status
4511*5113495bSYour Name  */
dp_tx_proxy_arp(struct dp_vdev * vdev,qdf_nbuf_t nbuf)4512*5113495bSYour Name int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
4513*5113495bSYour Name {
4514*5113495bSYour Name 	if (vdev->osif_proxy_arp)
4515*5113495bSYour Name 		return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
4516*5113495bSYour Name 
4517*5113495bSYour Name 	/*
4518*5113495bSYour Name 	 * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
4519*5113495bSYour Name 	 * osif_proxy_arp has a valid function pointer assigned
4520*5113495bSYour Name 	 * to it
4521*5113495bSYour Name 	 */
4522*5113495bSYour Name 	dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
4523*5113495bSYour Name 
4524*5113495bSYour Name 	return QDF_STATUS_NOT_INITIALIZED;
4525*5113495bSYour Name }
4526*5113495bSYour Name #else
dp_tx_proxy_arp(struct dp_vdev * vdev,qdf_nbuf_t nbuf)4527*5113495bSYour Name int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
4528*5113495bSYour Name {
4529*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
4530*5113495bSYour Name }
4531*5113495bSYour Name #endif
4532*5113495bSYour Name 
4533*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
4534*5113495bSYour Name 	!defined(CONFIG_MLO_SINGLE_DEV)
4535*5113495bSYour Name #ifdef WLAN_MCAST_MLO
4536*5113495bSYour Name static bool
dp_tx_reinject_mlo_hdl(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,uint8_t reinject_reason)4537*5113495bSYour Name dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4538*5113495bSYour Name 		       struct dp_tx_desc_s *tx_desc,
4539*5113495bSYour Name 		       qdf_nbuf_t nbuf,
4540*5113495bSYour Name 		       uint8_t reinject_reason)
4541*5113495bSYour Name {
4542*5113495bSYour Name 	if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
4543*5113495bSYour Name 		if (soc->arch_ops.dp_tx_mcast_handler)
4544*5113495bSYour Name 			soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
4545*5113495bSYour Name 
4546*5113495bSYour Name 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4547*5113495bSYour Name 		return true;
4548*5113495bSYour Name 	}
4549*5113495bSYour Name 
4550*5113495bSYour Name 	return false;
4551*5113495bSYour Name }
4552*5113495bSYour Name #else /* WLAN_MCAST_MLO */
4553*5113495bSYour Name static inline bool
dp_tx_reinject_mlo_hdl(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,uint8_t reinject_reason)4554*5113495bSYour Name dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4555*5113495bSYour Name 		       struct dp_tx_desc_s *tx_desc,
4556*5113495bSYour Name 		       qdf_nbuf_t nbuf,
4557*5113495bSYour Name 		       uint8_t reinject_reason)
4558*5113495bSYour Name {
4559*5113495bSYour Name 	return false;
4560*5113495bSYour Name }
4561*5113495bSYour Name #endif /* WLAN_MCAST_MLO */
4562*5113495bSYour Name #else
4563*5113495bSYour Name static inline bool
dp_tx_reinject_mlo_hdl(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf,uint8_t reinject_reason)4564*5113495bSYour Name dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
4565*5113495bSYour Name 		       struct dp_tx_desc_s *tx_desc,
4566*5113495bSYour Name 		       qdf_nbuf_t nbuf,
4567*5113495bSYour Name 		       uint8_t reinject_reason)
4568*5113495bSYour Name {
4569*5113495bSYour Name 	return false;
4570*5113495bSYour Name }
4571*5113495bSYour Name #endif
4572*5113495bSYour Name 
dp_tx_reinject_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t reinject_reason)4573*5113495bSYour Name void dp_tx_reinject_handler(struct dp_soc *soc,
4574*5113495bSYour Name 			    struct dp_vdev *vdev,
4575*5113495bSYour Name 			    struct dp_tx_desc_s *tx_desc,
4576*5113495bSYour Name 			    uint8_t *status,
4577*5113495bSYour Name 			    uint8_t reinject_reason)
4578*5113495bSYour Name {
4579*5113495bSYour Name 	struct dp_peer *peer = NULL;
4580*5113495bSYour Name 	uint32_t peer_id = HTT_INVALID_PEER;
4581*5113495bSYour Name 	qdf_nbuf_t nbuf = tx_desc->nbuf;
4582*5113495bSYour Name 	qdf_nbuf_t nbuf_copy = NULL;
4583*5113495bSYour Name 	struct dp_tx_msdu_info_s msdu_info;
4584*5113495bSYour Name #ifdef WDS_VENDOR_EXTENSION
4585*5113495bSYour Name 	int is_mcast = 0, is_ucast = 0;
4586*5113495bSYour Name 	int num_peers_3addr = 0;
4587*5113495bSYour Name 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
4588*5113495bSYour Name 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
4589*5113495bSYour Name #endif
4590*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
4591*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
4592*5113495bSYour Name 
4593*5113495bSYour Name 	qdf_assert(vdev);
4594*5113495bSYour Name 
4595*5113495bSYour Name 	dp_tx_debug("Tx reinject path");
4596*5113495bSYour Name 
4597*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].reinject_pkts, 1,
4598*5113495bSYour Name 			 qdf_nbuf_len(tx_desc->nbuf));
4599*5113495bSYour Name 
4600*5113495bSYour Name 	if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
4601*5113495bSYour Name 		return;
4602*5113495bSYour Name 
4603*5113495bSYour Name #ifdef WDS_VENDOR_EXTENSION
4604*5113495bSYour Name 	if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
4605*5113495bSYour Name 		is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
4606*5113495bSYour Name 	} else {
4607*5113495bSYour Name 		is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
4608*5113495bSYour Name 	}
4609*5113495bSYour Name 	is_ucast = !is_mcast;
4610*5113495bSYour Name 
4611*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->peer_list_lock);
4612*5113495bSYour Name 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4613*5113495bSYour Name 		txrx_peer = dp_get_txrx_peer(peer);
4614*5113495bSYour Name 
4615*5113495bSYour Name 		if (!txrx_peer || txrx_peer->bss_peer)
4616*5113495bSYour Name 			continue;
4617*5113495bSYour Name 
4618*5113495bSYour Name 		/* Detect wds peers that use 3-addr framing for mcast.
4619*5113495bSYour Name 		 * if there are any, the bss_peer is used to send the
4620*5113495bSYour Name 		 * the mcast frame using 3-addr format. all wds enabled
4621*5113495bSYour Name 		 * peers that use 4-addr framing for mcast frames will
4622*5113495bSYour Name 		 * be duplicated and sent as 4-addr frames below.
4623*5113495bSYour Name 		 */
4624*5113495bSYour Name 		if (!txrx_peer->wds_enabled ||
4625*5113495bSYour Name 		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
4626*5113495bSYour Name 			num_peers_3addr = 1;
4627*5113495bSYour Name 			break;
4628*5113495bSYour Name 		}
4629*5113495bSYour Name 	}
4630*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
4631*5113495bSYour Name #endif
4632*5113495bSYour Name 
4633*5113495bSYour Name 	if (qdf_unlikely(vdev->mesh_vdev)) {
4634*5113495bSYour Name 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
4635*5113495bSYour Name 	} else {
4636*5113495bSYour Name 		qdf_spin_lock_bh(&vdev->peer_list_lock);
4637*5113495bSYour Name 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4638*5113495bSYour Name 			txrx_peer = dp_get_txrx_peer(peer);
4639*5113495bSYour Name 			if (!txrx_peer)
4640*5113495bSYour Name 				continue;
4641*5113495bSYour Name 
4642*5113495bSYour Name 			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
4643*5113495bSYour Name #ifdef WDS_VENDOR_EXTENSION
4644*5113495bSYour Name 			/*
4645*5113495bSYour Name 			 * . if 3-addr STA, then send on BSS Peer
4646*5113495bSYour Name 			 * . if Peer WDS enabled and accept 4-addr mcast,
4647*5113495bSYour Name 			 * send mcast on that peer only
4648*5113495bSYour Name 			 * . if Peer WDS enabled and accept 4-addr ucast,
4649*5113495bSYour Name 			 * send ucast on that peer only
4650*5113495bSYour Name 			 */
4651*5113495bSYour Name 			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
4652*5113495bSYour Name 			 (txrx_peer->wds_enabled &&
4653*5113495bSYour Name 			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
4654*5113495bSYour Name 			 (is_ucast &&
4655*5113495bSYour Name 			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
4656*5113495bSYour Name #else
4657*5113495bSYour Name 			(txrx_peer->bss_peer &&
4658*5113495bSYour Name 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
4659*5113495bSYour Name #endif
4660*5113495bSYour Name 				peer_id = DP_INVALID_PEER;
4661*5113495bSYour Name 
4662*5113495bSYour Name 				nbuf_copy = qdf_nbuf_copy(nbuf);
4663*5113495bSYour Name 
4664*5113495bSYour Name 				if (!nbuf_copy) {
4665*5113495bSYour Name 					dp_tx_debug("nbuf copy failed");
4666*5113495bSYour Name 					break;
4667*5113495bSYour Name 				}
4668*5113495bSYour Name 				qdf_mem_zero(&msdu_info, sizeof(msdu_info));
4669*5113495bSYour Name 				dp_tx_get_queue(vdev, nbuf,
4670*5113495bSYour Name 						&msdu_info.tx_queue);
4671*5113495bSYour Name 				msdu_info.xmit_type =
4672*5113495bSYour Name 					qdf_nbuf_get_vdev_xmit_type(nbuf);
4673*5113495bSYour Name 				nbuf_copy = dp_tx_send_msdu_single(vdev,
4674*5113495bSYour Name 						nbuf_copy,
4675*5113495bSYour Name 						&msdu_info,
4676*5113495bSYour Name 						peer_id,
4677*5113495bSYour Name 						NULL);
4678*5113495bSYour Name 
4679*5113495bSYour Name 				if (nbuf_copy) {
4680*5113495bSYour Name 					dp_tx_debug("pkt send failed");
4681*5113495bSYour Name 					qdf_nbuf_free(nbuf_copy);
4682*5113495bSYour Name 				}
4683*5113495bSYour Name 			}
4684*5113495bSYour Name 		}
4685*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->peer_list_lock);
4686*5113495bSYour Name 
4687*5113495bSYour Name 		qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
4688*5113495bSYour Name 					     QDF_DMA_TO_DEVICE, nbuf->len);
4689*5113495bSYour Name 		qdf_nbuf_free(nbuf);
4690*5113495bSYour Name 	}
4691*5113495bSYour Name 
4692*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4693*5113495bSYour Name }
4694*5113495bSYour Name 
4695*5113495bSYour Name void dp_tx_inspect_handler(struct dp_soc *soc,
4696*5113495bSYour Name 			   struct dp_vdev *vdev,
4697*5113495bSYour Name 			   struct dp_tx_desc_s *tx_desc,
4698*5113495bSYour Name 			   uint8_t *status)
4699*5113495bSYour Name {
4700*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
4701*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4702*5113495bSYour Name 			"%s Tx inspect path",
4703*5113495bSYour Name 			__func__);
4704*5113495bSYour Name 
4705*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].inspect_pkts, 1,
4706*5113495bSYour Name 			 qdf_nbuf_len(tx_desc->nbuf));
4707*5113495bSYour Name 
4708*5113495bSYour Name 	DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
4709*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
4710*5113495bSYour Name }
4711*5113495bSYour Name 
4712*5113495bSYour Name #ifdef MESH_MODE_SUPPORT
4713*5113495bSYour Name /**
4714*5113495bSYour Name  * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
4715*5113495bSYour Name  *                                         in mesh meta header
4716*5113495bSYour Name  * @tx_desc: software descriptor head pointer
4717*5113495bSYour Name  * @ts: pointer to tx completion stats
4718*5113495bSYour Name  * Return: none
4719*5113495bSYour Name  */
4720*5113495bSYour Name static
4721*5113495bSYour Name void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4722*5113495bSYour Name 		struct hal_tx_completion_status *ts)
4723*5113495bSYour Name {
4724*5113495bSYour Name 	qdf_nbuf_t netbuf = tx_desc->nbuf;
4725*5113495bSYour Name 
4726*5113495bSYour Name 	if (!tx_desc->msdu_ext_desc) {
4727*5113495bSYour Name 		if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
4728*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4729*5113495bSYour Name 				"netbuf %pK offset %d",
4730*5113495bSYour Name 				netbuf, tx_desc->pkt_offset);
4731*5113495bSYour Name 			return;
4732*5113495bSYour Name 		}
4733*5113495bSYour Name 	}
4734*5113495bSYour Name }
4735*5113495bSYour Name 
4736*5113495bSYour Name #else
4737*5113495bSYour Name static
4738*5113495bSYour Name void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
4739*5113495bSYour Name 		struct hal_tx_completion_status *ts)
4740*5113495bSYour Name {
4741*5113495bSYour Name }
4742*5113495bSYour Name 
4743*5113495bSYour Name #endif
4744*5113495bSYour Name 
4745*5113495bSYour Name #ifdef CONFIG_SAWF
4746*5113495bSYour Name static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4747*5113495bSYour Name 					 struct dp_vdev *vdev,
4748*5113495bSYour Name 					 struct dp_txrx_peer *txrx_peer,
4749*5113495bSYour Name 					 struct dp_tx_desc_s *tx_desc,
4750*5113495bSYour Name 					 struct hal_tx_completion_status *ts,
4751*5113495bSYour Name 					 uint8_t tid)
4752*5113495bSYour Name {
4753*5113495bSYour Name 	dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4754*5113495bSYour Name 					   ts, tid);
4755*5113495bSYour Name }
4756*5113495bSYour Name 
4757*5113495bSYour Name static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats  *tx_delay,
4758*5113495bSYour Name 				    uint32_t nw_delay,
4759*5113495bSYour Name 				    uint32_t sw_delay,
4760*5113495bSYour Name 				    uint32_t hw_delay)
4761*5113495bSYour Name {
4762*5113495bSYour Name 	dp_peer_tid_delay_avg(tx_delay,
4763*5113495bSYour Name 			      nw_delay,
4764*5113495bSYour Name 			      sw_delay,
4765*5113495bSYour Name 			      hw_delay);
4766*5113495bSYour Name }
4767*5113495bSYour Name #else
4768*5113495bSYour Name static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
4769*5113495bSYour Name 					 struct dp_vdev *vdev,
4770*5113495bSYour Name 					 struct dp_txrx_peer *txrx_peer,
4771*5113495bSYour Name 					 struct dp_tx_desc_s *tx_desc,
4772*5113495bSYour Name 					 struct hal_tx_completion_status *ts,
4773*5113495bSYour Name 					 uint8_t tid)
4774*5113495bSYour Name {
4775*5113495bSYour Name }
4776*5113495bSYour Name 
4777*5113495bSYour Name static inline void
4778*5113495bSYour Name dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
4779*5113495bSYour Name 			uint32_t nw_delay, uint32_t sw_delay,
4780*5113495bSYour Name 			uint32_t hw_delay)
4781*5113495bSYour Name {
4782*5113495bSYour Name }
4783*5113495bSYour Name #endif
4784*5113495bSYour Name 
4785*5113495bSYour Name #ifdef QCA_PEER_EXT_STATS
4786*5113495bSYour Name #ifdef WLAN_CONFIG_TX_DELAY
4787*5113495bSYour Name static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4788*5113495bSYour Name 				    struct dp_tx_desc_s *tx_desc,
4789*5113495bSYour Name 				    struct hal_tx_completion_status *ts,
4790*5113495bSYour Name 				    struct dp_vdev *vdev)
4791*5113495bSYour Name {
4792*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
4793*5113495bSYour Name 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4794*5113495bSYour Name 	int64_t timestamp_ingress, timestamp_hw_enqueue;
4795*5113495bSYour Name 	uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
4796*5113495bSYour Name 
4797*5113495bSYour Name 	if (!ts->valid)
4798*5113495bSYour Name 		return;
4799*5113495bSYour Name 
4800*5113495bSYour Name 	timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
4801*5113495bSYour Name 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4802*5113495bSYour Name 
4803*5113495bSYour Name 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4804*5113495bSYour Name 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4805*5113495bSYour Name 
4806*5113495bSYour Name 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4807*5113495bSYour Name 		if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4808*5113495bSYour Name 							  &fwhw_transmit_delay))
4809*5113495bSYour Name 			dp_hist_update_stats(&tx_delay->hwtx_delay,
4810*5113495bSYour Name 					     fwhw_transmit_delay);
4811*5113495bSYour Name 
4812*5113495bSYour Name 	dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
4813*5113495bSYour Name 				fwhw_transmit_delay);
4814*5113495bSYour Name }
4815*5113495bSYour Name #else
4816*5113495bSYour Name /**
4817*5113495bSYour Name  * dp_tx_compute_tid_delay() - Compute per TID delay
4818*5113495bSYour Name  * @stats: Per TID delay stats
4819*5113495bSYour Name  * @tx_desc: Software Tx descriptor
4820*5113495bSYour Name  * @ts: Tx completion status
4821*5113495bSYour Name  * @vdev: vdev
4822*5113495bSYour Name  *
4823*5113495bSYour Name  * Compute the software enqueue and hw enqueue delays and
4824*5113495bSYour Name  * update the respective histograms
4825*5113495bSYour Name  *
4826*5113495bSYour Name  * Return: void
4827*5113495bSYour Name  */
4828*5113495bSYour Name static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
4829*5113495bSYour Name 				    struct dp_tx_desc_s *tx_desc,
4830*5113495bSYour Name 				    struct hal_tx_completion_status *ts,
4831*5113495bSYour Name 				    struct dp_vdev *vdev)
4832*5113495bSYour Name {
4833*5113495bSYour Name 	struct cdp_delay_tx_stats  *tx_delay = &stats->tx_delay;
4834*5113495bSYour Name 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
4835*5113495bSYour Name 	uint32_t sw_enqueue_delay, fwhw_transmit_delay;
4836*5113495bSYour Name 
4837*5113495bSYour Name 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
4838*5113495bSYour Name 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
4839*5113495bSYour Name 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
4840*5113495bSYour Name 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
4841*5113495bSYour Name 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
4842*5113495bSYour Name 					 timestamp_hw_enqueue);
4843*5113495bSYour Name 
4844*5113495bSYour Name 	/*
4845*5113495bSYour Name 	 * Update the Tx software enqueue delay and HW enque-Completion delay.
4846*5113495bSYour Name 	 */
4847*5113495bSYour Name 	dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
4848*5113495bSYour Name 	dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
4849*5113495bSYour Name }
4850*5113495bSYour Name #endif
4851*5113495bSYour Name 
4852*5113495bSYour Name /**
4853*5113495bSYour Name  * dp_tx_update_peer_delay_stats() - Update the peer delay stats
4854*5113495bSYour Name  * @txrx_peer: DP peer context
4855*5113495bSYour Name  * @tx_desc: Tx software descriptor
4856*5113495bSYour Name  * @ts: Tx completion status
4857*5113495bSYour Name  * @ring_id: Rx CPU context ID/CPU_ID
4858*5113495bSYour Name  *
4859*5113495bSYour Name  * Update the peer extended stats. These are enhanced other
4860*5113495bSYour Name  * delay stats per msdu level.
4861*5113495bSYour Name  *
4862*5113495bSYour Name  * Return: void
4863*5113495bSYour Name  */
4864*5113495bSYour Name static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4865*5113495bSYour Name 					  struct dp_tx_desc_s *tx_desc,
4866*5113495bSYour Name 					  struct hal_tx_completion_status *ts,
4867*5113495bSYour Name 					  uint8_t ring_id)
4868*5113495bSYour Name {
4869*5113495bSYour Name 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4870*5113495bSYour Name 	struct dp_soc *soc = NULL;
4871*5113495bSYour Name 	struct dp_peer_delay_stats *delay_stats = NULL;
4872*5113495bSYour Name 	uint8_t tid;
4873*5113495bSYour Name 
4874*5113495bSYour Name 	soc = pdev->soc;
4875*5113495bSYour Name 	if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
4876*5113495bSYour Name 		return;
4877*5113495bSYour Name 
4878*5113495bSYour Name 	if (!txrx_peer->delay_stats)
4879*5113495bSYour Name 		return;
4880*5113495bSYour Name 
4881*5113495bSYour Name 	tid = ts->tid;
4882*5113495bSYour Name 	delay_stats = txrx_peer->delay_stats;
4883*5113495bSYour Name 
4884*5113495bSYour Name 	/*
4885*5113495bSYour Name 	 * For non-TID packets use the TID 9
4886*5113495bSYour Name 	 */
4887*5113495bSYour Name 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
4888*5113495bSYour Name 		tid = CDP_MAX_DATA_TIDS - 1;
4889*5113495bSYour Name 
4890*5113495bSYour Name 	dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
4891*5113495bSYour Name 				tx_desc, ts, txrx_peer->vdev);
4892*5113495bSYour Name }
4893*5113495bSYour Name #else
4894*5113495bSYour Name static inline
4895*5113495bSYour Name void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
4896*5113495bSYour Name 				   struct dp_tx_desc_s *tx_desc,
4897*5113495bSYour Name 				   struct hal_tx_completion_status *ts,
4898*5113495bSYour Name 				   uint8_t ring_id)
4899*5113495bSYour Name {
4900*5113495bSYour Name }
4901*5113495bSYour Name #endif
4902*5113495bSYour Name 
4903*5113495bSYour Name #ifdef WLAN_PEER_JITTER
4904*5113495bSYour Name /**
4905*5113495bSYour Name  * dp_tx_jitter_get_avg_jitter() - compute the average jitter
4906*5113495bSYour Name  * @curr_delay: Current delay
4907*5113495bSYour Name  * @prev_delay: Previous delay
4908*5113495bSYour Name  * @avg_jitter: Average Jitter
4909*5113495bSYour Name  * Return: Newly Computed Average Jitter
4910*5113495bSYour Name  */
4911*5113495bSYour Name static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
4912*5113495bSYour Name 					    uint32_t prev_delay,
4913*5113495bSYour Name 					    uint32_t avg_jitter)
4914*5113495bSYour Name {
4915*5113495bSYour Name 	uint32_t curr_jitter;
4916*5113495bSYour Name 	int32_t jitter_diff;
4917*5113495bSYour Name 
4918*5113495bSYour Name 	curr_jitter = qdf_abs(curr_delay - prev_delay);
4919*5113495bSYour Name 	if (!avg_jitter)
4920*5113495bSYour Name 		return curr_jitter;
4921*5113495bSYour Name 
4922*5113495bSYour Name 	jitter_diff = curr_jitter - avg_jitter;
4923*5113495bSYour Name 	if (jitter_diff < 0)
4924*5113495bSYour Name 		avg_jitter = avg_jitter -
4925*5113495bSYour Name 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4926*5113495bSYour Name 	else
4927*5113495bSYour Name 		avg_jitter = avg_jitter +
4928*5113495bSYour Name 			(qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
4929*5113495bSYour Name 
4930*5113495bSYour Name 	return avg_jitter;
4931*5113495bSYour Name }
4932*5113495bSYour Name 
4933*5113495bSYour Name /**
4934*5113495bSYour Name  * dp_tx_jitter_get_avg_delay() - compute the average delay
4935*5113495bSYour Name  * @curr_delay: Current delay
4936*5113495bSYour Name  * @avg_delay: Average delay
4937*5113495bSYour Name  * Return: Newly Computed Average Delay
4938*5113495bSYour Name  */
4939*5113495bSYour Name static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
4940*5113495bSYour Name 					   uint32_t avg_delay)
4941*5113495bSYour Name {
4942*5113495bSYour Name 	int32_t delay_diff;
4943*5113495bSYour Name 
4944*5113495bSYour Name 	if (!avg_delay)
4945*5113495bSYour Name 		return curr_delay;
4946*5113495bSYour Name 
4947*5113495bSYour Name 	delay_diff = curr_delay - avg_delay;
4948*5113495bSYour Name 	if (delay_diff < 0)
4949*5113495bSYour Name 		avg_delay = avg_delay - (qdf_abs(delay_diff) >>
4950*5113495bSYour Name 					DP_AVG_DELAY_WEIGHT_DENOM);
4951*5113495bSYour Name 	else
4952*5113495bSYour Name 		avg_delay = avg_delay + (qdf_abs(delay_diff) >>
4953*5113495bSYour Name 					DP_AVG_DELAY_WEIGHT_DENOM);
4954*5113495bSYour Name 
4955*5113495bSYour Name 	return avg_delay;
4956*5113495bSYour Name }
4957*5113495bSYour Name 
4958*5113495bSYour Name #ifdef WLAN_CONFIG_TX_DELAY
4959*5113495bSYour Name /**
4960*5113495bSYour Name  * dp_tx_compute_cur_delay() - get the current delay
4961*5113495bSYour Name  * @soc: soc handle
4962*5113495bSYour Name  * @vdev: vdev structure for data path state
4963*5113495bSYour Name  * @ts: Tx completion status
4964*5113495bSYour Name  * @curr_delay: current delay
4965*5113495bSYour Name  * @tx_desc: tx descriptor
4966*5113495bSYour Name  * Return: void
4967*5113495bSYour Name  */
4968*5113495bSYour Name static
4969*5113495bSYour Name QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4970*5113495bSYour Name 				   struct dp_vdev *vdev,
4971*5113495bSYour Name 				   struct hal_tx_completion_status *ts,
4972*5113495bSYour Name 				   uint32_t *curr_delay,
4973*5113495bSYour Name 				   struct dp_tx_desc_s *tx_desc)
4974*5113495bSYour Name {
4975*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
4976*5113495bSYour Name 
4977*5113495bSYour Name 	if (soc->arch_ops.dp_tx_compute_hw_delay)
4978*5113495bSYour Name 		status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
4979*5113495bSYour Name 							      curr_delay);
4980*5113495bSYour Name 	return status;
4981*5113495bSYour Name }
4982*5113495bSYour Name #else
4983*5113495bSYour Name static
4984*5113495bSYour Name QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
4985*5113495bSYour Name 				   struct dp_vdev *vdev,
4986*5113495bSYour Name 				   struct hal_tx_completion_status *ts,
4987*5113495bSYour Name 				   uint32_t *curr_delay,
4988*5113495bSYour Name 				   struct dp_tx_desc_s *tx_desc)
4989*5113495bSYour Name {
4990*5113495bSYour Name 	int64_t current_timestamp, timestamp_hw_enqueue;
4991*5113495bSYour Name 
4992*5113495bSYour Name 	current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
4993*5113495bSYour Name 	timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
4994*5113495bSYour Name 	*curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
4995*5113495bSYour Name 
4996*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
4997*5113495bSYour Name }
4998*5113495bSYour Name #endif
4999*5113495bSYour Name 
5000*5113495bSYour Name /**
5001*5113495bSYour Name  * dp_tx_compute_tid_jitter() - compute per tid per ring jitter
5002*5113495bSYour Name  * @jitter: per tid per ring jitter stats
5003*5113495bSYour Name  * @ts: Tx completion status
5004*5113495bSYour Name  * @vdev: vdev structure for data path state
5005*5113495bSYour Name  * @tx_desc: tx descriptor
5006*5113495bSYour Name  * Return: void
5007*5113495bSYour Name  */
5008*5113495bSYour Name static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
5009*5113495bSYour Name 				     struct hal_tx_completion_status *ts,
5010*5113495bSYour Name 				     struct dp_vdev *vdev,
5011*5113495bSYour Name 				     struct dp_tx_desc_s *tx_desc)
5012*5113495bSYour Name {
5013*5113495bSYour Name 	uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
5014*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
5015*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
5016*5113495bSYour Name 
5017*5113495bSYour Name 	if (ts->status !=  HAL_TX_TQM_RR_FRAME_ACKED) {
5018*5113495bSYour Name 		jitter->tx_drop += 1;
5019*5113495bSYour Name 		return;
5020*5113495bSYour Name 	}
5021*5113495bSYour Name 
5022*5113495bSYour Name 	status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
5023*5113495bSYour Name 					 tx_desc);
5024*5113495bSYour Name 
5025*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status)) {
5026*5113495bSYour Name 		avg_delay = jitter->tx_avg_delay;
5027*5113495bSYour Name 		avg_jitter = jitter->tx_avg_jitter;
5028*5113495bSYour Name 		prev_delay = jitter->tx_prev_delay;
5029*5113495bSYour Name 		avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
5030*5113495bSYour Name 							 prev_delay,
5031*5113495bSYour Name 							 avg_jitter);
5032*5113495bSYour Name 		avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
5033*5113495bSYour Name 		jitter->tx_avg_delay = avg_delay;
5034*5113495bSYour Name 		jitter->tx_avg_jitter = avg_jitter;
5035*5113495bSYour Name 		jitter->tx_prev_delay = curr_delay;
5036*5113495bSYour Name 		jitter->tx_total_success += 1;
5037*5113495bSYour Name 	} else if (status == QDF_STATUS_E_FAILURE) {
5038*5113495bSYour Name 		jitter->tx_avg_err += 1;
5039*5113495bSYour Name 	}
5040*5113495bSYour Name }
5041*5113495bSYour Name 
5042*5113495bSYour Name /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
5043*5113495bSYour Name  * @txrx_peer: DP peer context
5044*5113495bSYour Name  * @tx_desc: Tx software descriptor
5045*5113495bSYour Name  * @ts: Tx completion status
5046*5113495bSYour Name  * @ring_id: Rx CPU context ID/CPU_ID
5047*5113495bSYour Name  * Return: void
5048*5113495bSYour Name  */
5049*5113495bSYour Name static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
5050*5113495bSYour Name 					   struct dp_tx_desc_s *tx_desc,
5051*5113495bSYour Name 					   struct hal_tx_completion_status *ts,
5052*5113495bSYour Name 					   uint8_t ring_id)
5053*5113495bSYour Name {
5054*5113495bSYour Name 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
5055*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
5056*5113495bSYour Name 	struct cdp_peer_tid_stats *jitter_stats = NULL;
5057*5113495bSYour Name 	uint8_t tid;
5058*5113495bSYour Name 	struct cdp_peer_tid_stats *rx_tid = NULL;
5059*5113495bSYour Name 
5060*5113495bSYour Name 	if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
5061*5113495bSYour Name 		return;
5062*5113495bSYour Name 
5063*5113495bSYour Name 	if (!txrx_peer->jitter_stats)
5064*5113495bSYour Name 		return;
5065*5113495bSYour Name 
5066*5113495bSYour Name 	tid = ts->tid;
5067*5113495bSYour Name 	jitter_stats = txrx_peer->jitter_stats;
5068*5113495bSYour Name 
5069*5113495bSYour Name 	/*
5070*5113495bSYour Name 	 * For non-TID packets use the TID 9
5071*5113495bSYour Name 	 */
5072*5113495bSYour Name 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
5073*5113495bSYour Name 		tid = CDP_MAX_DATA_TIDS - 1;
5074*5113495bSYour Name 
5075*5113495bSYour Name 	rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
5076*5113495bSYour Name 	dp_tx_compute_tid_jitter(rx_tid,
5077*5113495bSYour Name 				 ts, txrx_peer->vdev, tx_desc);
5078*5113495bSYour Name }
5079*5113495bSYour Name #else
5080*5113495bSYour Name static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
5081*5113495bSYour Name 					   struct dp_tx_desc_s *tx_desc,
5082*5113495bSYour Name 					   struct hal_tx_completion_status *ts,
5083*5113495bSYour Name 					   uint8_t ring_id)
5084*5113495bSYour Name {
5085*5113495bSYour Name }
5086*5113495bSYour Name #endif
5087*5113495bSYour Name 
5088*5113495bSYour Name #ifdef HW_TX_DELAY_STATS_ENABLE
5089*5113495bSYour Name /**
5090*5113495bSYour Name  * dp_update_tx_delay_stats() - update the delay stats
5091*5113495bSYour Name  * @vdev: vdev handle
5092*5113495bSYour Name  * @delay: delay in ms or us based on the flag delay_in_us
5093*5113495bSYour Name  * @tid: tid value
5094*5113495bSYour Name  * @mode: type of tx delay mode
5095*5113495bSYour Name  * @ring_id: ring number
5096*5113495bSYour Name  * @delay_in_us: flag to indicate whether the delay is in ms or us
5097*5113495bSYour Name  *
5098*5113495bSYour Name  * Return: none
5099*5113495bSYour Name  */
5100*5113495bSYour Name static inline
5101*5113495bSYour Name void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
5102*5113495bSYour Name 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
5103*5113495bSYour Name {
5104*5113495bSYour Name 	struct cdp_tid_tx_stats *tstats =
5105*5113495bSYour Name 		&vdev->stats.tid_tx_stats[ring_id][tid];
5106*5113495bSYour Name 
5107*5113495bSYour Name 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
5108*5113495bSYour Name 			      delay_in_us);
5109*5113495bSYour Name }
5110*5113495bSYour Name #else
5111*5113495bSYour Name static inline
5112*5113495bSYour Name void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
5113*5113495bSYour Name 			      uint8_t mode, uint8_t ring_id, bool delay_in_us)
5114*5113495bSYour Name {
5115*5113495bSYour Name 	struct cdp_tid_tx_stats *tstats =
5116*5113495bSYour Name 		&vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
5117*5113495bSYour Name 
5118*5113495bSYour Name 	dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
5119*5113495bSYour Name 			      delay_in_us);
5120*5113495bSYour Name }
5121*5113495bSYour Name #endif
5122*5113495bSYour Name 
5123*5113495bSYour Name void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
5124*5113495bSYour Name 			 uint8_t tid, uint8_t ring_id)
5125*5113495bSYour Name {
5126*5113495bSYour Name 	int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
5127*5113495bSYour Name 	uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
5128*5113495bSYour Name 	uint32_t fwhw_transmit_delay_us;
5129*5113495bSYour Name 
5130*5113495bSYour Name 	if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
5131*5113495bSYour Name 	    qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
5132*5113495bSYour Name 		return;
5133*5113495bSYour Name 
5134*5113495bSYour Name 	if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
5135*5113495bSYour Name 		fwhw_transmit_delay_us =
5136*5113495bSYour Name 			qdf_ktime_to_us(qdf_ktime_real_get()) -
5137*5113495bSYour Name 			qdf_ktime_to_us(tx_desc->timestamp);
5138*5113495bSYour Name 
5139*5113495bSYour Name 		/*
5140*5113495bSYour Name 		 * Delay between packet enqueued to HW and Tx completion in us
5141*5113495bSYour Name 		 */
5142*5113495bSYour Name 		dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
5143*5113495bSYour Name 					 CDP_DELAY_STATS_FW_HW_TRANSMIT,
5144*5113495bSYour Name 					 ring_id, true);
5145*5113495bSYour Name 		/*
5146*5113495bSYour Name 		 * For MCL, only enqueue to completion delay is required
5147*5113495bSYour Name 		 * so return if the vdev flag is enabled.
5148*5113495bSYour Name 		 */
5149*5113495bSYour Name 		return;
5150*5113495bSYour Name 	}
5151*5113495bSYour Name 
5152*5113495bSYour Name 	current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
5153*5113495bSYour Name 	timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
5154*5113495bSYour Name 	fwhw_transmit_delay = (uint32_t)(current_timestamp -
5155*5113495bSYour Name 					 timestamp_hw_enqueue);
5156*5113495bSYour Name 
5157*5113495bSYour Name 	if (!timestamp_hw_enqueue)
5158*5113495bSYour Name 		return;
5159*5113495bSYour Name 	/*
5160*5113495bSYour Name 	 * Delay between packet enqueued to HW and Tx completion in ms
5161*5113495bSYour Name 	 */
5162*5113495bSYour Name 	dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
5163*5113495bSYour Name 				 CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
5164*5113495bSYour Name 				 false);
5165*5113495bSYour Name 
5166*5113495bSYour Name 	timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
5167*5113495bSYour Name 	sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
5168*5113495bSYour Name 	interframe_delay = (uint32_t)(timestamp_ingress -
5169*5113495bSYour Name 				      vdev->prev_tx_enq_tstamp);
5170*5113495bSYour Name 
5171*5113495bSYour Name 	/*
5172*5113495bSYour Name 	 * Delay in software enqueue
5173*5113495bSYour Name 	 */
5174*5113495bSYour Name 	dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
5175*5113495bSYour Name 				 CDP_DELAY_STATS_SW_ENQ, ring_id,
5176*5113495bSYour Name 				 false);
5177*5113495bSYour Name 
5178*5113495bSYour Name 	/*
5179*5113495bSYour Name 	 * Update interframe delay stats calculated at hardstart receive point.
5180*5113495bSYour Name 	 * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
5181*5113495bSYour Name 	 * interframe delay will not be calculate correctly for 1st frame.
5182*5113495bSYour Name 	 * On the other side, this will help in avoiding extra per packet check
5183*5113495bSYour Name 	 * of !vdev->prev_tx_enq_tstamp.
5184*5113495bSYour Name 	 */
5185*5113495bSYour Name 	dp_update_tx_delay_stats(vdev, interframe_delay, tid,
5186*5113495bSYour Name 				 CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
5187*5113495bSYour Name 				 false);
5188*5113495bSYour Name 	vdev->prev_tx_enq_tstamp = timestamp_ingress;
5189*5113495bSYour Name }
5190*5113495bSYour Name 
5191*5113495bSYour Name #ifdef DISABLE_DP_STATS
5192*5113495bSYour Name static
5193*5113495bSYour Name inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
5194*5113495bSYour Name 				   struct dp_txrx_peer *txrx_peer,
5195*5113495bSYour Name 				   uint8_t link_id)
5196*5113495bSYour Name {
5197*5113495bSYour Name }
5198*5113495bSYour Name #else
5199*5113495bSYour Name static inline void
5200*5113495bSYour Name dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
5201*5113495bSYour Name 		       uint8_t link_id)
5202*5113495bSYour Name {
5203*5113495bSYour Name 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
5204*5113495bSYour Name 
5205*5113495bSYour Name 	DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
5206*5113495bSYour Name 	if (subtype != QDF_PROTO_INVALID)
5207*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
5208*5113495bSYour Name 					  1, link_id);
5209*5113495bSYour Name }
5210*5113495bSYour Name #endif
5211*5113495bSYour Name 
5212*5113495bSYour Name #ifndef QCA_ENHANCED_STATS_SUPPORT
5213*5113495bSYour Name #ifdef DP_PEER_EXTENDED_API
5214*5113495bSYour Name static inline uint8_t
5215*5113495bSYour Name dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
5216*5113495bSYour Name {
5217*5113495bSYour Name 	return txrx_peer->mpdu_retry_threshold;
5218*5113495bSYour Name }
5219*5113495bSYour Name #else
5220*5113495bSYour Name static inline uint8_t
5221*5113495bSYour Name dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
5222*5113495bSYour Name {
5223*5113495bSYour Name 	return 0;
5224*5113495bSYour Name }
5225*5113495bSYour Name #endif
5226*5113495bSYour Name 
5227*5113495bSYour Name /**
5228*5113495bSYour Name  * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
5229*5113495bSYour Name  *
5230*5113495bSYour Name  * @ts: Tx compltion status
5231*5113495bSYour Name  * @txrx_peer: datapath txrx_peer handle
5232*5113495bSYour Name  * @link_id: Link id
5233*5113495bSYour Name  *
5234*5113495bSYour Name  * Return: void
5235*5113495bSYour Name  */
5236*5113495bSYour Name static inline void
5237*5113495bSYour Name dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
5238*5113495bSYour Name 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
5239*5113495bSYour Name {
5240*5113495bSYour Name 	uint8_t mcs, pkt_type, dst_mcs_idx;
5241*5113495bSYour Name 	uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
5242*5113495bSYour Name 
5243*5113495bSYour Name 	mcs = ts->mcs;
5244*5113495bSYour Name 	pkt_type = ts->pkt_type;
5245*5113495bSYour Name 	/* do HW to SW pkt type conversion */
5246*5113495bSYour Name 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
5247*5113495bSYour Name 		    hal_2_dp_pkt_type_map[pkt_type]);
5248*5113495bSYour Name 
5249*5113495bSYour Name 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
5250*5113495bSYour Name 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
5251*5113495bSYour Name 		DP_PEER_EXTD_STATS_INC(txrx_peer,
5252*5113495bSYour Name 				       tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
5253*5113495bSYour Name 				       1, link_id);
5254*5113495bSYour Name 
5255*5113495bSYour Name 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
5256*5113495bSYour Name 	DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
5257*5113495bSYour Name 	DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
5258*5113495bSYour Name 			       link_id);
5259*5113495bSYour Name 	DP_PEER_EXTD_STATS_INC(txrx_peer,
5260*5113495bSYour Name 			       tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
5261*5113495bSYour Name 			       link_id);
5262*5113495bSYour Name 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
5263*5113495bSYour Name 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
5264*5113495bSYour Name 	DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
5265*5113495bSYour Name 				link_id);
5266*5113495bSYour Name 	if (ts->first_msdu) {
5267*5113495bSYour Name 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
5268*5113495bSYour Name 					ts->transmit_cnt > 1, link_id);
5269*5113495bSYour Name 
5270*5113495bSYour Name 		if (!retry_threshold)
5271*5113495bSYour Name 			return;
5272*5113495bSYour Name 		DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
5273*5113495bSYour Name 					qdf_do_div(ts->transmit_cnt,
5274*5113495bSYour Name 						   retry_threshold),
5275*5113495bSYour Name 					ts->transmit_cnt > retry_threshold,
5276*5113495bSYour Name 					link_id);
5277*5113495bSYour Name 	}
5278*5113495bSYour Name }
5279*5113495bSYour Name #else
5280*5113495bSYour Name static inline void
5281*5113495bSYour Name dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
5282*5113495bSYour Name 			     struct dp_txrx_peer *txrx_peer, uint8_t link_id)
5283*5113495bSYour Name {
5284*5113495bSYour Name }
5285*5113495bSYour Name #endif
5286*5113495bSYour Name 
5287*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && \
5288*5113495bSYour Name 	(defined(QCA_ENHANCED_STATS_SUPPORT) || \
5289*5113495bSYour Name 		defined(DP_MLO_LINK_STATS_SUPPORT))
5290*5113495bSYour Name static inline uint8_t
5291*5113495bSYour Name dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
5292*5113495bSYour Name 			       struct hal_tx_completion_status *ts,
5293*5113495bSYour Name 			       struct dp_txrx_peer *txrx_peer,
5294*5113495bSYour Name 			       struct dp_vdev *vdev)
5295*5113495bSYour Name {
5296*5113495bSYour Name 	uint8_t hw_link_id = 0;
5297*5113495bSYour Name 	uint32_t ppdu_id;
5298*5113495bSYour Name 	uint8_t link_id_offset, link_id_bits;
5299*5113495bSYour Name 
5300*5113495bSYour Name 	if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
5301*5113495bSYour Name 		return 0;
5302*5113495bSYour Name 
5303*5113495bSYour Name 	link_id_offset = soc->link_id_offset;
5304*5113495bSYour Name 	link_id_bits = soc->link_id_bits;
5305*5113495bSYour Name 	ppdu_id = ts->ppdu_id;
5306*5113495bSYour Name 	hw_link_id = ((DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
5307*5113495bSYour Name 						   link_id_bits)) + 1);
5308*5113495bSYour Name 	if (hw_link_id > DP_MAX_MLO_LINKS) {
5309*5113495bSYour Name 		hw_link_id = 0;
5310*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(
5311*5113495bSYour Name 				txrx_peer,
5312*5113495bSYour Name 				tx.inval_link_id_pkt_cnt, 1, hw_link_id);
5313*5113495bSYour Name 	}
5314*5113495bSYour Name 
5315*5113495bSYour Name 	return hw_link_id;
5316*5113495bSYour Name }
5317*5113495bSYour Name #else
5318*5113495bSYour Name static inline uint8_t
5319*5113495bSYour Name dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
5320*5113495bSYour Name 			       struct hal_tx_completion_status *ts,
5321*5113495bSYour Name 			       struct dp_txrx_peer *txrx_peer,
5322*5113495bSYour Name 			       struct dp_vdev *vdev)
5323*5113495bSYour Name {
5324*5113495bSYour Name 	return 0;
5325*5113495bSYour Name }
5326*5113495bSYour Name #endif
5327*5113495bSYour Name 
5328*5113495bSYour Name /**
5329*5113495bSYour Name  * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
5330*5113495bSYour Name  *				per wbm ring
5331*5113495bSYour Name  *
5332*5113495bSYour Name  * @tx_desc: software descriptor head pointer
5333*5113495bSYour Name  * @ts: Tx completion status
5334*5113495bSYour Name  * @txrx_peer: peer handle
5335*5113495bSYour Name  * @ring_id: ring number
5336*5113495bSYour Name  * @link_id: Link id
5337*5113495bSYour Name  *
5338*5113495bSYour Name  * Return: None
5339*5113495bSYour Name  */
5340*5113495bSYour Name static inline void
5341*5113495bSYour Name dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
5342*5113495bSYour Name 			struct hal_tx_completion_status *ts,
5343*5113495bSYour Name 			struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
5344*5113495bSYour Name 			uint8_t link_id)
5345*5113495bSYour Name {
5346*5113495bSYour Name 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
5347*5113495bSYour Name 	uint8_t tid = ts->tid;
5348*5113495bSYour Name 	uint32_t length;
5349*5113495bSYour Name 	struct cdp_tid_tx_stats *tid_stats;
5350*5113495bSYour Name 
5351*5113495bSYour Name 	if (!pdev)
5352*5113495bSYour Name 		return;
5353*5113495bSYour Name 
5354*5113495bSYour Name 	if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
5355*5113495bSYour Name 		tid = CDP_MAX_DATA_TIDS - 1;
5356*5113495bSYour Name 
5357*5113495bSYour Name 	tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
5358*5113495bSYour Name 
5359*5113495bSYour Name 	if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
5360*5113495bSYour Name 		dp_err_rl("Release source:%d is not from TQM", ts->release_src);
5361*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
5362*5113495bSYour Name 					  link_id);
5363*5113495bSYour Name 		return;
5364*5113495bSYour Name 	}
5365*5113495bSYour Name 
5366*5113495bSYour Name 	length = qdf_nbuf_len(tx_desc->nbuf);
5367*5113495bSYour Name 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5368*5113495bSYour Name 
5369*5113495bSYour Name 	if (qdf_unlikely(pdev->delay_stats_flag) ||
5370*5113495bSYour Name 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
5371*5113495bSYour Name 		dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
5372*5113495bSYour Name 
5373*5113495bSYour Name 	if (ts->status < CDP_MAX_TX_TQM_STATUS) {
5374*5113495bSYour Name 		tid_stats->tqm_status_cnt[ts->status]++;
5375*5113495bSYour Name 	}
5376*5113495bSYour Name 
5377*5113495bSYour Name 	if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
5378*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
5379*5113495bSYour Name 					   ts->transmit_cnt > 1, link_id);
5380*5113495bSYour Name 
5381*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
5382*5113495bSYour Name 					   1, ts->transmit_cnt > 2, link_id);
5383*5113495bSYour Name 
5384*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
5385*5113495bSYour Name 					   link_id);
5386*5113495bSYour Name 
5387*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
5388*5113495bSYour Name 					   ts->msdu_part_of_amsdu, link_id);
5389*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
5390*5113495bSYour Name 					   !ts->msdu_part_of_amsdu, link_id);
5391*5113495bSYour Name 
5392*5113495bSYour Name 		txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
5393*5113495bSYour Name 							qdf_system_ticks();
5394*5113495bSYour Name 
5395*5113495bSYour Name 		dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
5396*5113495bSYour Name 
5397*5113495bSYour Name 		return;
5398*5113495bSYour Name 	}
5399*5113495bSYour Name 
5400*5113495bSYour Name 	/*
5401*5113495bSYour Name 	 * tx_failed is ideally supposed to be updated from HTT ppdu
5402*5113495bSYour Name 	 * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
5403*5113495bSYour Name 	 * hw limitation there are no completions for failed cases.
5404*5113495bSYour Name 	 * Hence updating tx_failed from data path. Please note that
5405*5113495bSYour Name 	 * if tx_failed is fixed to be from ppdu, then this has to be
5406*5113495bSYour Name 	 * removed
5407*5113495bSYour Name 	 */
5408*5113495bSYour Name 	DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5409*5113495bSYour Name 
5410*5113495bSYour Name 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
5411*5113495bSYour Name 				   ts->transmit_cnt > DP_RETRY_COUNT,
5412*5113495bSYour Name 				   link_id);
5413*5113495bSYour Name 	dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
5414*5113495bSYour Name 
5415*5113495bSYour Name 	if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
5416*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
5417*5113495bSYour Name 					  link_id);
5418*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
5419*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
5420*5113495bSYour Name 					      length, link_id);
5421*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
5422*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
5423*5113495bSYour Name 					  link_id);
5424*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
5425*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
5426*5113495bSYour Name 					  link_id);
5427*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
5428*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
5429*5113495bSYour Name 					  link_id);
5430*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
5431*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
5432*5113495bSYour Name 					  link_id);
5433*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
5434*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
5435*5113495bSYour Name 					  link_id);
5436*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
5437*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5438*5113495bSYour Name 					  tx.dropped.fw_rem_queue_disable, 1,
5439*5113495bSYour Name 					  link_id);
5440*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
5441*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5442*5113495bSYour Name 					  tx.dropped.fw_rem_no_match, 1,
5443*5113495bSYour Name 					  link_id);
5444*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
5445*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5446*5113495bSYour Name 					  tx.dropped.drop_threshold, 1,
5447*5113495bSYour Name 					  link_id);
5448*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
5449*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5450*5113495bSYour Name 					  tx.dropped.drop_link_desc_na, 1,
5451*5113495bSYour Name 					  link_id);
5452*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
5453*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5454*5113495bSYour Name 					  tx.dropped.invalid_drop, 1,
5455*5113495bSYour Name 					  link_id);
5456*5113495bSYour Name 	} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
5457*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5458*5113495bSYour Name 					  tx.dropped.mcast_vdev_drop, 1,
5459*5113495bSYour Name 					  link_id);
5460*5113495bSYour Name 	} else {
5461*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
5462*5113495bSYour Name 					  link_id);
5463*5113495bSYour Name 	}
5464*5113495bSYour Name }
5465*5113495bSYour Name 
5466*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5467*5113495bSYour Name /**
5468*5113495bSYour Name  * dp_tx_flow_pool_lock() - take flow pool lock
5469*5113495bSYour Name  * @soc: core txrx main context
5470*5113495bSYour Name  * @tx_desc: tx desc
5471*5113495bSYour Name  *
5472*5113495bSYour Name  * Return: None
5473*5113495bSYour Name  */
5474*5113495bSYour Name static inline
5475*5113495bSYour Name void dp_tx_flow_pool_lock(struct dp_soc *soc,
5476*5113495bSYour Name 			  struct dp_tx_desc_s *tx_desc)
5477*5113495bSYour Name {
5478*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool;
5479*5113495bSYour Name 	uint8_t desc_pool_id;
5480*5113495bSYour Name 
5481*5113495bSYour Name 	desc_pool_id = tx_desc->pool_id;
5482*5113495bSYour Name 	pool = &soc->tx_desc[desc_pool_id];
5483*5113495bSYour Name 
5484*5113495bSYour Name 	qdf_spin_lock_bh(&pool->flow_pool_lock);
5485*5113495bSYour Name }
5486*5113495bSYour Name 
5487*5113495bSYour Name /**
5488*5113495bSYour Name  * dp_tx_flow_pool_unlock() - release flow pool lock
5489*5113495bSYour Name  * @soc: core txrx main context
5490*5113495bSYour Name  * @tx_desc: tx desc
5491*5113495bSYour Name  *
5492*5113495bSYour Name  * Return: None
5493*5113495bSYour Name  */
5494*5113495bSYour Name static inline
5495*5113495bSYour Name void dp_tx_flow_pool_unlock(struct dp_soc *soc,
5496*5113495bSYour Name 			    struct dp_tx_desc_s *tx_desc)
5497*5113495bSYour Name {
5498*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool;
5499*5113495bSYour Name 	uint8_t desc_pool_id;
5500*5113495bSYour Name 
5501*5113495bSYour Name 	desc_pool_id = tx_desc->pool_id;
5502*5113495bSYour Name 	pool = &soc->tx_desc[desc_pool_id];
5503*5113495bSYour Name 
5504*5113495bSYour Name 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
5505*5113495bSYour Name }
5506*5113495bSYour Name #else
5507*5113495bSYour Name static inline
5508*5113495bSYour Name void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
5509*5113495bSYour Name {
5510*5113495bSYour Name }
5511*5113495bSYour Name 
5512*5113495bSYour Name static inline
5513*5113495bSYour Name void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
5514*5113495bSYour Name {
5515*5113495bSYour Name }
5516*5113495bSYour Name #endif
5517*5113495bSYour Name 
5518*5113495bSYour Name /**
5519*5113495bSYour Name  * dp_tx_notify_completion() - Notify tx completion for this desc
5520*5113495bSYour Name  * @soc: core txrx main context
5521*5113495bSYour Name  * @vdev: datapath vdev handle
5522*5113495bSYour Name  * @tx_desc: tx desc
5523*5113495bSYour Name  * @netbuf:  buffer
5524*5113495bSYour Name  * @status: tx status
5525*5113495bSYour Name  *
5526*5113495bSYour Name  * Return: none
5527*5113495bSYour Name  */
5528*5113495bSYour Name static inline void dp_tx_notify_completion(struct dp_soc *soc,
5529*5113495bSYour Name 					   struct dp_vdev *vdev,
5530*5113495bSYour Name 					   struct dp_tx_desc_s *tx_desc,
5531*5113495bSYour Name 					   qdf_nbuf_t netbuf,
5532*5113495bSYour Name 					   uint8_t status)
5533*5113495bSYour Name {
5534*5113495bSYour Name 	void *osif_dev;
5535*5113495bSYour Name 	ol_txrx_completion_fp tx_compl_cbk = NULL;
5536*5113495bSYour Name 	uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
5537*5113495bSYour Name 
5538*5113495bSYour Name 	qdf_assert(tx_desc);
5539*5113495bSYour Name 
5540*5113495bSYour Name 	if (!vdev ||
5541*5113495bSYour Name 	    !vdev->osif_vdev) {
5542*5113495bSYour Name 		return;
5543*5113495bSYour Name 	}
5544*5113495bSYour Name 
5545*5113495bSYour Name 	osif_dev = vdev->osif_vdev;
5546*5113495bSYour Name 	tx_compl_cbk = vdev->tx_comp;
5547*5113495bSYour Name 
5548*5113495bSYour Name 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5549*5113495bSYour Name 		flag |= BIT(QDF_TX_RX_STATUS_OK);
5550*5113495bSYour Name 
5551*5113495bSYour Name 	if (tx_compl_cbk)
5552*5113495bSYour Name 		tx_compl_cbk(netbuf, osif_dev, flag);
5553*5113495bSYour Name }
5554*5113495bSYour Name 
5555*5113495bSYour Name /**
5556*5113495bSYour Name  * dp_tx_sojourn_stats_process() - Collect sojourn stats
5557*5113495bSYour Name  * @pdev: pdev handle
5558*5113495bSYour Name  * @txrx_peer: DP peer context
5559*5113495bSYour Name  * @tid: tid value
5560*5113495bSYour Name  * @txdesc_ts: timestamp from txdesc
5561*5113495bSYour Name  * @ppdu_id: ppdu id
5562*5113495bSYour Name  * @link_id: link id
5563*5113495bSYour Name  *
5564*5113495bSYour Name  * Return: none
5565*5113495bSYour Name  */
5566*5113495bSYour Name #ifdef FEATURE_PERPKT_INFO
5567*5113495bSYour Name static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
5568*5113495bSYour Name 					       struct dp_txrx_peer *txrx_peer,
5569*5113495bSYour Name 					       uint8_t tid,
5570*5113495bSYour Name 					       uint64_t txdesc_ts,
5571*5113495bSYour Name 					       uint32_t ppdu_id,
5572*5113495bSYour Name 					       uint8_t link_id)
5573*5113495bSYour Name {
5574*5113495bSYour Name 	uint64_t delta_ms;
5575*5113495bSYour Name 	struct cdp_tx_sojourn_stats *sojourn_stats;
5576*5113495bSYour Name 	struct dp_peer *primary_link_peer = NULL;
5577*5113495bSYour Name 	struct dp_soc *link_peer_soc = NULL;
5578*5113495bSYour Name 
5579*5113495bSYour Name 	if (qdf_unlikely(!pdev->enhanced_stats_en))
5580*5113495bSYour Name 		return;
5581*5113495bSYour Name 
5582*5113495bSYour Name 	if (qdf_unlikely(tid == HTT_INVALID_TID ||
5583*5113495bSYour Name 			 tid >= CDP_DATA_TID_MAX))
5584*5113495bSYour Name 		return;
5585*5113495bSYour Name 
5586*5113495bSYour Name 	if (qdf_unlikely(!pdev->sojourn_buf))
5587*5113495bSYour Name 		return;
5588*5113495bSYour Name 
5589*5113495bSYour Name 	primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
5590*5113495bSYour Name 							   txrx_peer->peer_id,
5591*5113495bSYour Name 							   DP_MOD_ID_TX_COMP);
5592*5113495bSYour Name 
5593*5113495bSYour Name 	if (qdf_unlikely(!primary_link_peer))
5594*5113495bSYour Name 		return;
5595*5113495bSYour Name 
5596*5113495bSYour Name 	sojourn_stats = (struct cdp_tx_sojourn_stats *)
5597*5113495bSYour Name 		qdf_nbuf_data(pdev->sojourn_buf);
5598*5113495bSYour Name 
5599*5113495bSYour Name 	link_peer_soc = primary_link_peer->vdev->pdev->soc;
5600*5113495bSYour Name 	sojourn_stats->cookie = (void *)
5601*5113495bSYour Name 			dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
5602*5113495bSYour Name 							  primary_link_peer);
5603*5113495bSYour Name 
5604*5113495bSYour Name 	delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
5605*5113495bSYour Name 				txdesc_ts;
5606*5113495bSYour Name 	qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
5607*5113495bSYour Name 			    delta_ms);
5608*5113495bSYour Name 	sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
5609*5113495bSYour Name 	sojourn_stats->num_msdus[tid] = 1;
5610*5113495bSYour Name 	sojourn_stats->avg_sojourn_msdu[tid].internal =
5611*5113495bSYour Name 		txrx_peer->stats[link_id].
5612*5113495bSYour Name 			per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
5613*5113495bSYour Name 	dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
5614*5113495bSYour Name 			     pdev->sojourn_buf, HTT_INVALID_PEER,
5615*5113495bSYour Name 			     WDI_NO_VAL, pdev->pdev_id);
5616*5113495bSYour Name 	sojourn_stats->sum_sojourn_msdu[tid] = 0;
5617*5113495bSYour Name 	sojourn_stats->num_msdus[tid] = 0;
5618*5113495bSYour Name 	sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
5619*5113495bSYour Name 
5620*5113495bSYour Name 	dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
5621*5113495bSYour Name }
5622*5113495bSYour Name #else
5623*5113495bSYour Name static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
5624*5113495bSYour Name 					       struct dp_txrx_peer *txrx_peer,
5625*5113495bSYour Name 					       uint8_t tid,
5626*5113495bSYour Name 					       uint64_t txdesc_ts,
5627*5113495bSYour Name 					       uint32_t ppdu_id)
5628*5113495bSYour Name {
5629*5113495bSYour Name }
5630*5113495bSYour Name #endif
5631*5113495bSYour Name 
5632*5113495bSYour Name #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
5633*5113495bSYour Name void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
5634*5113495bSYour Name 				       struct dp_tx_desc_s *desc,
5635*5113495bSYour Name 				       struct hal_tx_completion_status *ts)
5636*5113495bSYour Name {
5637*5113495bSYour Name 	dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
5638*5113495bSYour Name 			     desc, ts->peer_id,
5639*5113495bSYour Name 			     WDI_NO_VAL, desc->pdev->pdev_id);
5640*5113495bSYour Name }
5641*5113495bSYour Name #endif
5642*5113495bSYour Name 
5643*5113495bSYour Name void
5644*5113495bSYour Name dp_tx_comp_process_desc(struct dp_soc *soc,
5645*5113495bSYour Name 			struct dp_tx_desc_s *desc,
5646*5113495bSYour Name 			struct hal_tx_completion_status *ts,
5647*5113495bSYour Name 			struct dp_txrx_peer *txrx_peer)
5648*5113495bSYour Name {
5649*5113495bSYour Name 	uint64_t time_latency = 0;
5650*5113495bSYour Name 	uint16_t peer_id = DP_INVALID_PEER_ID;
5651*5113495bSYour Name 
5652*5113495bSYour Name 	/*
5653*5113495bSYour Name 	 * m_copy/tx_capture modes are not supported for
5654*5113495bSYour Name 	 * scatter gather packets
5655*5113495bSYour Name 	 */
5656*5113495bSYour Name 	if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
5657*5113495bSYour Name 		time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
5658*5113495bSYour Name 				qdf_ktime_to_ms(desc->timestamp));
5659*5113495bSYour Name 	}
5660*5113495bSYour Name 
5661*5113495bSYour Name 	dp_send_completion_to_pkt_capture(soc, desc, ts);
5662*5113495bSYour Name 
5663*5113495bSYour Name 	if (dp_tx_pkt_tracepoints_enabled())
5664*5113495bSYour Name 		qdf_trace_dp_packet(desc->nbuf, QDF_TX,
5665*5113495bSYour Name 				    desc->msdu_ext_desc ?
5666*5113495bSYour Name 				    desc->msdu_ext_desc->tso_desc : NULL,
5667*5113495bSYour Name 				    qdf_ktime_to_us(desc->timestamp));
5668*5113495bSYour Name 
5669*5113495bSYour Name 	if (!(desc->msdu_ext_desc)) {
5670*5113495bSYour Name 		dp_tx_enh_unmap(soc, desc);
5671*5113495bSYour Name 		if (txrx_peer)
5672*5113495bSYour Name 			peer_id = txrx_peer->peer_id;
5673*5113495bSYour Name 
5674*5113495bSYour Name 		if (QDF_STATUS_SUCCESS ==
5675*5113495bSYour Name 		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
5676*5113495bSYour Name 			return;
5677*5113495bSYour Name 		}
5678*5113495bSYour Name 
5679*5113495bSYour Name 		if (QDF_STATUS_SUCCESS ==
5680*5113495bSYour Name 		    dp_get_completion_indication_for_stack(soc,
5681*5113495bSYour Name 							   desc->pdev,
5682*5113495bSYour Name 							   txrx_peer, ts,
5683*5113495bSYour Name 							   desc->nbuf,
5684*5113495bSYour Name 							   time_latency)) {
5685*5113495bSYour Name 			dp_send_completion_to_stack(soc,
5686*5113495bSYour Name 						    desc->pdev,
5687*5113495bSYour Name 						    ts->peer_id,
5688*5113495bSYour Name 						    ts->ppdu_id,
5689*5113495bSYour Name 						    desc->nbuf);
5690*5113495bSYour Name 			return;
5691*5113495bSYour Name 		}
5692*5113495bSYour Name 	}
5693*5113495bSYour Name 
5694*5113495bSYour Name 	desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
5695*5113495bSYour Name 	dp_tx_comp_free_buf(soc, desc, false);
5696*5113495bSYour Name }
5697*5113495bSYour Name 
5698*5113495bSYour Name #ifdef DISABLE_DP_STATS
5699*5113495bSYour Name /**
5700*5113495bSYour Name  * dp_tx_update_connectivity_stats() - update tx connectivity stats
5701*5113495bSYour Name  * @soc: core txrx main context
5702*5113495bSYour Name  * @vdev: virtual device instance
5703*5113495bSYour Name  * @tx_desc: tx desc
5704*5113495bSYour Name  * @status: tx status
5705*5113495bSYour Name  *
5706*5113495bSYour Name  * Return: none
5707*5113495bSYour Name  */
5708*5113495bSYour Name static inline
5709*5113495bSYour Name void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5710*5113495bSYour Name 				     struct dp_vdev *vdev,
5711*5113495bSYour Name 				     struct dp_tx_desc_s *tx_desc,
5712*5113495bSYour Name 				     uint8_t status)
5713*5113495bSYour Name {
5714*5113495bSYour Name }
5715*5113495bSYour Name #else
5716*5113495bSYour Name static inline
5717*5113495bSYour Name void dp_tx_update_connectivity_stats(struct dp_soc *soc,
5718*5113495bSYour Name 				     struct dp_vdev *vdev,
5719*5113495bSYour Name 				     struct dp_tx_desc_s *tx_desc,
5720*5113495bSYour Name 				     uint8_t status)
5721*5113495bSYour Name {
5722*5113495bSYour Name 	void *osif_dev;
5723*5113495bSYour Name 	ol_txrx_stats_rx_fp stats_cbk;
5724*5113495bSYour Name 	uint8_t pkt_type;
5725*5113495bSYour Name 
5726*5113495bSYour Name 	qdf_assert(tx_desc);
5727*5113495bSYour Name 
5728*5113495bSYour Name 	if (!vdev || vdev->delete.pending || !vdev->osif_vdev ||
5729*5113495bSYour Name 	    !vdev->stats_cb)
5730*5113495bSYour Name 		return;
5731*5113495bSYour Name 
5732*5113495bSYour Name 	osif_dev = vdev->osif_vdev;
5733*5113495bSYour Name 	stats_cbk = vdev->stats_cb;
5734*5113495bSYour Name 
5735*5113495bSYour Name 	stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
5736*5113495bSYour Name 	if (status == HAL_TX_TQM_RR_FRAME_ACKED)
5737*5113495bSYour Name 		stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
5738*5113495bSYour Name 			  &pkt_type);
5739*5113495bSYour Name }
5740*5113495bSYour Name #endif
5741*5113495bSYour Name 
5742*5113495bSYour Name #if defined(WLAN_FEATURE_TSF_AUTO_REPORT) || defined(WLAN_CONFIG_TX_DELAY)
5743*5113495bSYour Name /* Mask for bit29 ~ bit31 */
5744*5113495bSYour Name #define DP_TX_TS_BIT29_31_MASK 0xE0000000
5745*5113495bSYour Name /* Timestamp value (unit us) if bit29 is set */
5746*5113495bSYour Name #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
5747*5113495bSYour Name /**
5748*5113495bSYour Name  * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
5749*5113495bSYour Name  * @ack_ts: OTA ack timestamp, unit us.
5750*5113495bSYour Name  * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
5751*5113495bSYour Name  * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
5752*5113495bSYour Name  *
5753*5113495bSYour Name  * this function will restore the bit29 ~ bit31 3 bits value for
5754*5113495bSYour Name  * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
5755*5113495bSYour Name  * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
5756*5113495bSYour Name  * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
5757*5113495bSYour Name  *
5758*5113495bSYour Name  * Return: the adjusted buffer_timestamp value
5759*5113495bSYour Name  */
5760*5113495bSYour Name static inline
5761*5113495bSYour Name uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
5762*5113495bSYour Name 					uint32_t enqueue_ts,
5763*5113495bSYour Name 					uint32_t base_delta_ts)
5764*5113495bSYour Name {
5765*5113495bSYour Name 	uint32_t ack_buffer_ts;
5766*5113495bSYour Name 	uint32_t ack_buffer_ts_bit29_31;
5767*5113495bSYour Name 	uint32_t adjusted_enqueue_ts;
5768*5113495bSYour Name 
5769*5113495bSYour Name 	/* corresponding buffer_timestamp value when receive OTA Ack */
5770*5113495bSYour Name 	ack_buffer_ts = ack_ts - base_delta_ts;
5771*5113495bSYour Name 	ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
5772*5113495bSYour Name 
5773*5113495bSYour Name 	/* restore the bit29 ~ bit31 value */
5774*5113495bSYour Name 	adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
5775*5113495bSYour Name 
5776*5113495bSYour Name 	/*
5777*5113495bSYour Name 	 * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
5778*5113495bSYour Name 	 * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
5779*5113495bSYour Name 	 * should not be marked, otherwise extra 0x20000000 us is added to
5780*5113495bSYour Name 	 * enqueue_ts.
5781*5113495bSYour Name 	 */
5782*5113495bSYour Name 	if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
5783*5113495bSYour Name 		adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
5784*5113495bSYour Name 
5785*5113495bSYour Name 	return adjusted_enqueue_ts;
5786*5113495bSYour Name }
5787*5113495bSYour Name 
5788*5113495bSYour Name QDF_STATUS
5789*5113495bSYour Name dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
5790*5113495bSYour Name 			  uint32_t delta_tsf,
5791*5113495bSYour Name 			  uint32_t *delay_us)
5792*5113495bSYour Name {
5793*5113495bSYour Name 	uint32_t buffer_ts;
5794*5113495bSYour Name 	uint32_t delay;
5795*5113495bSYour Name 
5796*5113495bSYour Name 	if (!delay_us)
5797*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5798*5113495bSYour Name 
5799*5113495bSYour Name 	/* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
5800*5113495bSYour Name 	if (!ts->valid)
5801*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5802*5113495bSYour Name 
5803*5113495bSYour Name 	/* buffer_timestamp is in units of 1024 us and is [31:13] of
5804*5113495bSYour Name 	 * WBM_RELEASE_RING_4. After left shift 10 bits, it's
5805*5113495bSYour Name 	 * valid up to 29 bits.
5806*5113495bSYour Name 	 */
5807*5113495bSYour Name 	buffer_ts = ts->buffer_timestamp << 10;
5808*5113495bSYour Name 	buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
5809*5113495bSYour Name 						   buffer_ts, delta_tsf);
5810*5113495bSYour Name 
5811*5113495bSYour Name 	delay = ts->tsf - buffer_ts - delta_tsf;
5812*5113495bSYour Name 
5813*5113495bSYour Name 	if (qdf_unlikely(delay & 0x80000000)) {
5814*5113495bSYour Name 		dp_err_rl("delay = 0x%x (-ve)\n"
5815*5113495bSYour Name 			  "release_src = %d\n"
5816*5113495bSYour Name 			  "ppdu_id = 0x%x\n"
5817*5113495bSYour Name 			  "peer_id = 0x%x\n"
5818*5113495bSYour Name 			  "tid = 0x%x\n"
5819*5113495bSYour Name 			  "release_reason = %d\n"
5820*5113495bSYour Name 			  "tsf = %u (0x%x)\n"
5821*5113495bSYour Name 			  "buffer_timestamp = %u (0x%x)\n"
5822*5113495bSYour Name 			  "delta_tsf = %u (0x%x)\n",
5823*5113495bSYour Name 			  delay, ts->release_src, ts->ppdu_id, ts->peer_id,
5824*5113495bSYour Name 			  ts->tid, ts->status, ts->tsf, ts->tsf,
5825*5113495bSYour Name 			  ts->buffer_timestamp, ts->buffer_timestamp,
5826*5113495bSYour Name 			  delta_tsf, delta_tsf);
5827*5113495bSYour Name 
5828*5113495bSYour Name 		delay = 0;
5829*5113495bSYour Name 		goto end;
5830*5113495bSYour Name 	}
5831*5113495bSYour Name 
5832*5113495bSYour Name 	delay &= 0x1FFFFFFF; /* mask 29 BITS */
5833*5113495bSYour Name 	if (delay > 0x1000000) {
5834*5113495bSYour Name 		dp_info_rl("----------------------\n"
5835*5113495bSYour Name 			   "Tx completion status:\n"
5836*5113495bSYour Name 			   "----------------------\n"
5837*5113495bSYour Name 			   "release_src = %d\n"
5838*5113495bSYour Name 			   "ppdu_id = 0x%x\n"
5839*5113495bSYour Name 			   "release_reason = %d\n"
5840*5113495bSYour Name 			   "tsf = %u (0x%x)\n"
5841*5113495bSYour Name 			   "buffer_timestamp = %u (0x%x)\n"
5842*5113495bSYour Name 			   "delta_tsf = %u (0x%x)\n",
5843*5113495bSYour Name 			   ts->release_src, ts->ppdu_id, ts->status,
5844*5113495bSYour Name 			   ts->tsf, ts->tsf, ts->buffer_timestamp,
5845*5113495bSYour Name 			   ts->buffer_timestamp, delta_tsf, delta_tsf);
5846*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5847*5113495bSYour Name 	}
5848*5113495bSYour Name 
5849*5113495bSYour Name 
5850*5113495bSYour Name end:
5851*5113495bSYour Name 	*delay_us = delay;
5852*5113495bSYour Name 
5853*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5854*5113495bSYour Name }
5855*5113495bSYour Name 
5856*5113495bSYour Name void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5857*5113495bSYour Name 		      uint32_t delta_tsf)
5858*5113495bSYour Name {
5859*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5860*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5861*5113495bSYour Name 						     DP_MOD_ID_CDP);
5862*5113495bSYour Name 
5863*5113495bSYour Name 	if (!vdev) {
5864*5113495bSYour Name 		dp_err_rl("vdev %d does not exist", vdev_id);
5865*5113495bSYour Name 		return;
5866*5113495bSYour Name 	}
5867*5113495bSYour Name 
5868*5113495bSYour Name 	vdev->delta_tsf = delta_tsf;
5869*5113495bSYour Name 	dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
5870*5113495bSYour Name 
5871*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5872*5113495bSYour Name }
5873*5113495bSYour Name #endif
5874*5113495bSYour Name #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
5875*5113495bSYour Name QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
5876*5113495bSYour Name 				      uint8_t vdev_id, bool enable)
5877*5113495bSYour Name {
5878*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5879*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5880*5113495bSYour Name 						     DP_MOD_ID_CDP);
5881*5113495bSYour Name 
5882*5113495bSYour Name 	if (!vdev) {
5883*5113495bSYour Name 		dp_err_rl("vdev %d does not exist", vdev_id);
5884*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5885*5113495bSYour Name 	}
5886*5113495bSYour Name 
5887*5113495bSYour Name 	qdf_atomic_set(&vdev->ul_delay_report, enable);
5888*5113495bSYour Name 
5889*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5890*5113495bSYour Name 
5891*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5892*5113495bSYour Name }
5893*5113495bSYour Name 
5894*5113495bSYour Name QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5895*5113495bSYour Name 			       uint32_t *val)
5896*5113495bSYour Name {
5897*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5898*5113495bSYour Name 	struct dp_vdev *vdev;
5899*5113495bSYour Name 	uint32_t delay_accum;
5900*5113495bSYour Name 	uint32_t pkts_accum;
5901*5113495bSYour Name 
5902*5113495bSYour Name 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5903*5113495bSYour Name 	if (!vdev) {
5904*5113495bSYour Name 		dp_err_rl("vdev %d does not exist", vdev_id);
5905*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5906*5113495bSYour Name 	}
5907*5113495bSYour Name 
5908*5113495bSYour Name 	if (!qdf_atomic_read(&vdev->ul_delay_report)) {
5909*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5910*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5911*5113495bSYour Name 	}
5912*5113495bSYour Name 
5913*5113495bSYour Name 	/* Average uplink delay based on current accumulated values */
5914*5113495bSYour Name 	delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
5915*5113495bSYour Name 	pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
5916*5113495bSYour Name 
5917*5113495bSYour Name 	*val = delay_accum / pkts_accum;
5918*5113495bSYour Name 	dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
5919*5113495bSYour Name 		 delay_accum, pkts_accum);
5920*5113495bSYour Name 
5921*5113495bSYour Name 	/* Reset accumulated values to 0 */
5922*5113495bSYour Name 	qdf_atomic_set(&vdev->ul_delay_accum, 0);
5923*5113495bSYour Name 	qdf_atomic_set(&vdev->ul_pkts_accum, 0);
5924*5113495bSYour Name 
5925*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5926*5113495bSYour Name 
5927*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5928*5113495bSYour Name }
5929*5113495bSYour Name 
5930*5113495bSYour Name static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5931*5113495bSYour Name 				      struct hal_tx_completion_status *ts)
5932*5113495bSYour Name {
5933*5113495bSYour Name 	uint32_t ul_delay;
5934*5113495bSYour Name 
5935*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
5936*5113495bSYour Name 		dp_info_rl("vdev is null or delete in progress");
5937*5113495bSYour Name 		return;
5938*5113495bSYour Name 	}
5939*5113495bSYour Name 
5940*5113495bSYour Name 	if (!qdf_atomic_read(&vdev->ul_delay_report))
5941*5113495bSYour Name 		return;
5942*5113495bSYour Name 
5943*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
5944*5113495bSYour Name 							  vdev->delta_tsf,
5945*5113495bSYour Name 							  &ul_delay)))
5946*5113495bSYour Name 		return;
5947*5113495bSYour Name 
5948*5113495bSYour Name 	ul_delay /= 1000; /* in unit of ms */
5949*5113495bSYour Name 
5950*5113495bSYour Name 	qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
5951*5113495bSYour Name 	qdf_atomic_inc(&vdev->ul_pkts_accum);
5952*5113495bSYour Name }
5953*5113495bSYour Name #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
5954*5113495bSYour Name static inline
5955*5113495bSYour Name void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
5956*5113495bSYour Name 			       struct hal_tx_completion_status *ts)
5957*5113495bSYour Name {
5958*5113495bSYour Name }
5959*5113495bSYour Name #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
5960*5113495bSYour Name 
5961*5113495bSYour Name #ifndef CONFIG_AP_PLATFORM
5962*5113495bSYour Name /**
5963*5113495bSYour Name  * dp_update_mcast_stats() - Update Tx Mcast stats
5964*5113495bSYour Name  * @txrx_peer: txrx_peer pointer
5965*5113495bSYour Name  * @link_id: Link ID
5966*5113495bSYour Name  * @length: packet length
5967*5113495bSYour Name  * @nbuf: nbuf handle
5968*5113495bSYour Name  *
5969*5113495bSYour Name  * Return: None
5970*5113495bSYour Name  */
5971*5113495bSYour Name static inline void
5972*5113495bSYour Name dp_update_mcast_stats(struct dp_txrx_peer *txrx_peer, uint8_t link_id,
5973*5113495bSYour Name 		      uint32_t length, qdf_nbuf_t nbuf)
5974*5113495bSYour Name {
5975*5113495bSYour Name 	if (QDF_NBUF_CB_GET_IS_MCAST(nbuf))
5976*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5977*5113495bSYour Name 					      length, link_id);
5978*5113495bSYour Name }
5979*5113495bSYour Name #else
5980*5113495bSYour Name static inline void
5981*5113495bSYour Name dp_update_mcast_stats(struct dp_txrx_peer *txrx_peer, uint8_t link_id,
5982*5113495bSYour Name 		      uint32_t length, qdf_nbuf_t nbuf)
5983*5113495bSYour Name {
5984*5113495bSYour Name }
5985*5113495bSYour Name #endif
5986*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
5987*5113495bSYour Name /**
5988*5113495bSYour Name  * dp_tx_comp_set_nbuf_band() - set nbuf band.
5989*5113495bSYour Name  * @soc: dp soc handle
5990*5113495bSYour Name  * @nbuf: nbuf handle
5991*5113495bSYour Name  * @ts: tx completion status
5992*5113495bSYour Name  *
5993*5113495bSYour Name  * Return: None
5994*5113495bSYour Name  */
5995*5113495bSYour Name static inline void
5996*5113495bSYour Name dp_tx_comp_set_nbuf_band(struct dp_soc *soc, qdf_nbuf_t nbuf,
5997*5113495bSYour Name 			 struct hal_tx_completion_status *ts)
5998*5113495bSYour Name {
5999*5113495bSYour Name 	struct qdf_mac_addr *mac_addr;
6000*5113495bSYour Name 	struct dp_peer *peer;
6001*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
6002*5113495bSYour Name 	uint8_t link_id;
6003*5113495bSYour Name 
6004*5113495bSYour Name 	if ((QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) !=
6005*5113495bSYour Name 		QDF_NBUF_CB_PACKET_TYPE_EAPOL &&
6006*5113495bSYour Name 	     QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) !=
6007*5113495bSYour Name 		QDF_NBUF_CB_PACKET_TYPE_DHCP &&
6008*5113495bSYour Name 	     QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) !=
6009*5113495bSYour Name 		QDF_NBUF_CB_PACKET_TYPE_DHCPV6) ||
6010*5113495bSYour Name 	    QDF_NBUF_CB_GET_IS_BCAST(nbuf))
6011*5113495bSYour Name 		return;
6012*5113495bSYour Name 
6013*5113495bSYour Name 	mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
6014*5113495bSYour Name 					   QDF_NBUF_DEST_MAC_OFFSET);
6015*5113495bSYour Name 
6016*5113495bSYour Name 	peer = dp_mld_peer_find_hash_find(soc, mac_addr->bytes, 0,
6017*5113495bSYour Name 					  DP_VDEV_ALL, DP_MOD_ID_TX_COMP);
6018*5113495bSYour Name 	if (qdf_likely(peer)) {
6019*5113495bSYour Name 		txrx_peer = dp_get_txrx_peer(peer);
6020*5113495bSYour Name 		if (qdf_likely(txrx_peer)) {
6021*5113495bSYour Name 			link_id =
6022*5113495bSYour Name 				dp_tx_get_link_id_from_ppdu_id(soc, ts,
6023*5113495bSYour Name 						  txrx_peer,
6024*5113495bSYour Name 						  txrx_peer->vdev);
6025*5113495bSYour Name 			qdf_nbuf_tx_set_band(nbuf, txrx_peer->ll_band[link_id]);
6026*5113495bSYour Name 		}
6027*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
6028*5113495bSYour Name 	}
6029*5113495bSYour Name }
6030*5113495bSYour Name #else
6031*5113495bSYour Name static inline void
6032*5113495bSYour Name dp_tx_comp_set_nbuf_band(struct dp_soc *soc, qdf_nbuf_t nbuf,
6033*5113495bSYour Name 			 struct hal_tx_completion_status *ts)
6034*5113495bSYour Name {
6035*5113495bSYour Name }
6036*5113495bSYour Name #endif
6037*5113495bSYour Name 
6038*5113495bSYour Name void dp_tx_comp_process_tx_status(struct dp_soc *soc,
6039*5113495bSYour Name 				  struct dp_tx_desc_s *tx_desc,
6040*5113495bSYour Name 				  struct hal_tx_completion_status *ts,
6041*5113495bSYour Name 				  struct dp_txrx_peer *txrx_peer,
6042*5113495bSYour Name 				  uint8_t ring_id)
6043*5113495bSYour Name {
6044*5113495bSYour Name 	uint32_t length;
6045*5113495bSYour Name 	qdf_ether_header_t *eh;
6046*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
6047*5113495bSYour Name 	qdf_nbuf_t nbuf = tx_desc->nbuf;
6048*5113495bSYour Name 	enum qdf_dp_tx_rx_status dp_status;
6049*5113495bSYour Name 	uint8_t link_id = 0;
6050*5113495bSYour Name 	enum QDF_OPMODE op_mode = QDF_MAX_NO_OF_MODE;
6051*5113495bSYour Name 
6052*5113495bSYour Name 	if (!nbuf) {
6053*5113495bSYour Name 		dp_info_rl("invalid tx descriptor. nbuf NULL");
6054*5113495bSYour Name 		goto out;
6055*5113495bSYour Name 	}
6056*5113495bSYour Name 
6057*5113495bSYour Name 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
6058*5113495bSYour Name 	length = dp_tx_get_pkt_len(tx_desc);
6059*5113495bSYour Name 
6060*5113495bSYour Name 	dp_status = dp_tx_hw_to_qdf(ts->status);
6061*5113495bSYour Name 	if (soc->dp_debug_log_en) {
6062*5113495bSYour Name 		dp_tx_comp_debug("--------------------\n"
6063*5113495bSYour Name 				 "Tx Completion Stats:\n"
6064*5113495bSYour Name 				 "--------------------\n"
6065*5113495bSYour Name 				 "ack_frame_rssi = %d\n"
6066*5113495bSYour Name 				 "first_msdu = %d\n"
6067*5113495bSYour Name 				 "last_msdu = %d\n"
6068*5113495bSYour Name 				 "msdu_part_of_amsdu = %d\n"
6069*5113495bSYour Name 				 "rate_stats valid = %d\n"
6070*5113495bSYour Name 				 "bw = %d\n"
6071*5113495bSYour Name 				 "pkt_type = %d\n"
6072*5113495bSYour Name 				 "stbc = %d\n"
6073*5113495bSYour Name 				 "ldpc = %d\n"
6074*5113495bSYour Name 				 "sgi = %d\n"
6075*5113495bSYour Name 				 "mcs = %d\n"
6076*5113495bSYour Name 				 "ofdma = %d\n"
6077*5113495bSYour Name 				 "tones_in_ru = %d\n"
6078*5113495bSYour Name 				 "tsf = %d\n"
6079*5113495bSYour Name 				 "ppdu_id = %d\n"
6080*5113495bSYour Name 				 "transmit_cnt = %d\n"
6081*5113495bSYour Name 				 "tid = %d\n"
6082*5113495bSYour Name 				 "peer_id = %d\n"
6083*5113495bSYour Name 				 "tx_status = %d\n"
6084*5113495bSYour Name 				 "tx_release_source = %d\n",
6085*5113495bSYour Name 				 ts->ack_frame_rssi, ts->first_msdu,
6086*5113495bSYour Name 				 ts->last_msdu, ts->msdu_part_of_amsdu,
6087*5113495bSYour Name 				 ts->valid, ts->bw, ts->pkt_type, ts->stbc,
6088*5113495bSYour Name 				 ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
6089*5113495bSYour Name 				 ts->tones_in_ru, ts->tsf, ts->ppdu_id,
6090*5113495bSYour Name 				 ts->transmit_cnt, ts->tid, ts->peer_id,
6091*5113495bSYour Name 				 ts->status, ts->release_src);
6092*5113495bSYour Name 	}
6093*5113495bSYour Name 
6094*5113495bSYour Name 	/* Update SoC level stats */
6095*5113495bSYour Name 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
6096*5113495bSYour Name 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
6097*5113495bSYour Name 
6098*5113495bSYour Name 	if (!txrx_peer) {
6099*5113495bSYour Name 		dp_tx_comp_set_nbuf_band(soc, nbuf, ts);
6100*5113495bSYour Name 		dp_info_rl("peer is null or deletion in progress");
6101*5113495bSYour Name 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
6102*5113495bSYour Name 
6103*5113495bSYour Name 		vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
6104*5113495bSYour Name 					     DP_MOD_ID_CDP);
6105*5113495bSYour Name 		if (qdf_likely(vdev)) {
6106*5113495bSYour Name 			op_mode = vdev->qdf_opmode;
6107*5113495bSYour Name 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6108*5113495bSYour Name 		}
6109*5113495bSYour Name 
6110*5113495bSYour Name 		goto out_log;
6111*5113495bSYour Name 	}
6112*5113495bSYour Name 	vdev = txrx_peer->vdev;
6113*5113495bSYour Name 
6114*5113495bSYour Name 	link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
6115*5113495bSYour Name 
6116*5113495bSYour Name 	dp_tx_set_nbuf_band(nbuf, txrx_peer, link_id);
6117*5113495bSYour Name 
6118*5113495bSYour Name 	op_mode = vdev->qdf_opmode;
6119*5113495bSYour Name 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
6120*5113495bSYour Name 	dp_tx_update_uplink_delay(soc, vdev, ts);
6121*5113495bSYour Name 
6122*5113495bSYour Name 	/* check tx complete notification */
6123*5113495bSYour Name 	if (qdf_nbuf_tx_notify_comp_get(nbuf))
6124*5113495bSYour Name 		dp_tx_notify_completion(soc, vdev, tx_desc,
6125*5113495bSYour Name 					nbuf, ts->status);
6126*5113495bSYour Name 
6127*5113495bSYour Name 	/* Update per-packet stats for mesh mode */
6128*5113495bSYour Name 	if (qdf_unlikely(vdev->mesh_vdev) &&
6129*5113495bSYour Name 			!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
6130*5113495bSYour Name 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
6131*5113495bSYour Name 
6132*5113495bSYour Name 	/* Update peer level stats */
6133*5113495bSYour Name 	if (qdf_unlikely(txrx_peer->bss_peer &&
6134*5113495bSYour Name 			 vdev->opmode == wlan_op_mode_ap)) {
6135*5113495bSYour Name 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
6136*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
6137*5113495bSYour Name 						      length, link_id);
6138*5113495bSYour Name 
6139*5113495bSYour Name 			if (txrx_peer->vdev->tx_encap_type ==
6140*5113495bSYour Name 				htt_cmn_pkt_type_ethernet &&
6141*5113495bSYour Name 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
6142*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
6143*5113495bSYour Name 							      tx.bcast, 1,
6144*5113495bSYour Name 							      length, link_id);
6145*5113495bSYour Name 			}
6146*5113495bSYour Name 		}
6147*5113495bSYour Name 	} else {
6148*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
6149*5113495bSYour Name 					      link_id);
6150*5113495bSYour Name 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
6151*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
6152*5113495bSYour Name 						      1, length, link_id);
6153*5113495bSYour Name 			if (qdf_unlikely(txrx_peer->in_twt)) {
6154*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
6155*5113495bSYour Name 							      tx.tx_success_twt,
6156*5113495bSYour Name 							      1, length,
6157*5113495bSYour Name 							      link_id);
6158*5113495bSYour Name 			}
6159*5113495bSYour Name 
6160*5113495bSYour Name 			dp_update_mcast_stats(txrx_peer, link_id, length, nbuf);
6161*5113495bSYour Name 		}
6162*5113495bSYour Name 	}
6163*5113495bSYour Name 
6164*5113495bSYour Name 	dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
6165*5113495bSYour Name 	dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
6166*5113495bSYour Name 	dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
6167*5113495bSYour Name 	dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
6168*5113495bSYour Name 				     ts, ts->tid);
6169*5113495bSYour Name 	dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
6170*5113495bSYour Name 	dp_tx_latency_stats_update(soc, txrx_peer, tx_desc, ts, link_id);
6171*5113495bSYour Name 
6172*5113495bSYour Name #ifdef QCA_SUPPORT_RDK_STATS
6173*5113495bSYour Name 	if (soc->peerstats_enabled)
6174*5113495bSYour Name 		dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
6175*5113495bSYour Name 					    qdf_ktime_to_ms(tx_desc->timestamp),
6176*5113495bSYour Name 					    ts->ppdu_id, link_id);
6177*5113495bSYour Name #endif
6178*5113495bSYour Name 
6179*5113495bSYour Name out_log:
6180*5113495bSYour Name 	DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
6181*5113495bSYour Name 			 QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
6182*5113495bSYour Name 			 QDF_TRACE_DEFAULT_PDEV_ID,
6183*5113495bSYour Name 			 qdf_nbuf_data_addr(nbuf),
6184*5113495bSYour Name 			 sizeof(qdf_nbuf_data(nbuf)),
6185*5113495bSYour Name 			 tx_desc->id, ts->status, dp_status, op_mode));
6186*5113495bSYour Name out:
6187*5113495bSYour Name 	return;
6188*5113495bSYour Name }
6189*5113495bSYour Name 
6190*5113495bSYour Name #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
6191*5113495bSYour Name 	defined(QCA_ENHANCED_STATS_SUPPORT)
6192*5113495bSYour Name void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
6193*5113495bSYour Name 				   uint32_t length, uint8_t tx_status,
6194*5113495bSYour Name 				   bool update)
6195*5113495bSYour Name {
6196*5113495bSYour Name 	if (update || (!txrx_peer->hw_txrx_stats_en)) {
6197*5113495bSYour Name 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6198*5113495bSYour Name 
6199*5113495bSYour Name 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
6200*5113495bSYour Name 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6201*5113495bSYour Name 	}
6202*5113495bSYour Name }
6203*5113495bSYour Name #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
6204*5113495bSYour Name void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
6205*5113495bSYour Name 				   uint32_t length, uint8_t tx_status,
6206*5113495bSYour Name 				   bool update)
6207*5113495bSYour Name {
6208*5113495bSYour Name 	if (!txrx_peer->hw_txrx_stats_en) {
6209*5113495bSYour Name 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6210*5113495bSYour Name 
6211*5113495bSYour Name 		if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
6212*5113495bSYour Name 			DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6213*5113495bSYour Name 	}
6214*5113495bSYour Name }
6215*5113495bSYour Name 
6216*5113495bSYour Name #else
6217*5113495bSYour Name void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
6218*5113495bSYour Name 				   uint32_t length, uint8_t tx_status,
6219*5113495bSYour Name 				   bool update)
6220*5113495bSYour Name {
6221*5113495bSYour Name 	DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6222*5113495bSYour Name 
6223*5113495bSYour Name 	if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
6224*5113495bSYour Name 		DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6225*5113495bSYour Name }
6226*5113495bSYour Name #endif
6227*5113495bSYour Name 
6228*5113495bSYour Name /**
6229*5113495bSYour Name  * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
6230*5113495bSYour Name  * @next: descriptor of the nrxt buffer
6231*5113495bSYour Name  *
6232*5113495bSYour Name  * Return: none
6233*5113495bSYour Name  */
6234*5113495bSYour Name #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
6235*5113495bSYour Name static inline
6236*5113495bSYour Name void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
6237*5113495bSYour Name {
6238*5113495bSYour Name 	qdf_nbuf_t nbuf = NULL;
6239*5113495bSYour Name 
6240*5113495bSYour Name 	if (next)
6241*5113495bSYour Name 		nbuf = next->nbuf;
6242*5113495bSYour Name 	if (nbuf)
6243*5113495bSYour Name 		qdf_prefetch(nbuf);
6244*5113495bSYour Name }
6245*5113495bSYour Name #else
6246*5113495bSYour Name static inline
6247*5113495bSYour Name void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
6248*5113495bSYour Name {
6249*5113495bSYour Name }
6250*5113495bSYour Name #endif
6251*5113495bSYour Name 
6252*5113495bSYour Name /**
6253*5113495bSYour Name  * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
6254*5113495bSYour Name  * @soc: core txrx main context
6255*5113495bSYour Name  * @desc: software descriptor
6256*5113495bSYour Name  *
6257*5113495bSYour Name  * Return: true when packet is reinjected
6258*5113495bSYour Name  */
6259*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
6260*5113495bSYour Name 	defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
6261*5113495bSYour Name static inline bool
6262*5113495bSYour Name dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
6263*5113495bSYour Name {
6264*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
6265*5113495bSYour Name 	uint8_t xmit_type;
6266*5113495bSYour Name 
6267*5113495bSYour Name 	if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
6268*5113495bSYour Name 		if (!soc->arch_ops.dp_tx_mcast_handler ||
6269*5113495bSYour Name 		    !soc->arch_ops.dp_tx_is_mcast_primary)
6270*5113495bSYour Name 			return false;
6271*5113495bSYour Name 
6272*5113495bSYour Name 		vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
6273*5113495bSYour Name 					     DP_MOD_ID_REINJECT);
6274*5113495bSYour Name 
6275*5113495bSYour Name 		if (qdf_unlikely(!vdev)) {
6276*5113495bSYour Name 			dp_tx_comp_info_rl("Unable to get vdev ref  %d",
6277*5113495bSYour Name 					   desc->id);
6278*5113495bSYour Name 			return false;
6279*5113495bSYour Name 		}
6280*5113495bSYour Name 
6281*5113495bSYour Name 		if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
6282*5113495bSYour Name 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
6283*5113495bSYour Name 			return false;
6284*5113495bSYour Name 		}
6285*5113495bSYour Name 		xmit_type = qdf_nbuf_get_vdev_xmit_type(desc->nbuf);
6286*5113495bSYour Name 		DP_STATS_INC_PKT(vdev, tx_i[xmit_type].reinject_pkts, 1,
6287*5113495bSYour Name 				 qdf_nbuf_len(desc->nbuf));
6288*5113495bSYour Name 		soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
6289*5113495bSYour Name 		dp_tx_desc_release(soc, desc, desc->pool_id);
6290*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
6291*5113495bSYour Name 		return true;
6292*5113495bSYour Name 	}
6293*5113495bSYour Name 
6294*5113495bSYour Name 	return false;
6295*5113495bSYour Name }
6296*5113495bSYour Name #else
6297*5113495bSYour Name static inline bool
6298*5113495bSYour Name dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
6299*5113495bSYour Name {
6300*5113495bSYour Name 	return false;
6301*5113495bSYour Name }
6302*5113495bSYour Name #endif
6303*5113495bSYour Name 
6304*5113495bSYour Name #ifdef QCA_DP_TX_NBUF_LIST_FREE
6305*5113495bSYour Name static inline void
6306*5113495bSYour Name dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
6307*5113495bSYour Name {
6308*5113495bSYour Name 	qdf_nbuf_queue_head_init(nbuf_queue_head);
6309*5113495bSYour Name }
6310*5113495bSYour Name 
6311*5113495bSYour Name static inline void
6312*5113495bSYour Name dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
6313*5113495bSYour Name 			  struct dp_tx_desc_s *desc)
6314*5113495bSYour Name {
6315*5113495bSYour Name 	qdf_nbuf_t nbuf = NULL;
6316*5113495bSYour Name 
6317*5113495bSYour Name 	nbuf = desc->nbuf;
6318*5113495bSYour Name 	if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
6319*5113495bSYour Name 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
6320*5113495bSYour Name 	else
6321*5113495bSYour Name 		qdf_nbuf_free(nbuf);
6322*5113495bSYour Name }
6323*5113495bSYour Name 
6324*5113495bSYour Name static inline void
6325*5113495bSYour Name dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
6326*5113495bSYour Name 				  qdf_nbuf_t nbuf)
6327*5113495bSYour Name {
6328*5113495bSYour Name 	if (!nbuf)
6329*5113495bSYour Name 		return;
6330*5113495bSYour Name 
6331*5113495bSYour Name 	if (nbuf->is_from_recycler)
6332*5113495bSYour Name 		qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
6333*5113495bSYour Name 	else
6334*5113495bSYour Name 		qdf_nbuf_free(nbuf);
6335*5113495bSYour Name }
6336*5113495bSYour Name 
6337*5113495bSYour Name static inline void
6338*5113495bSYour Name dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
6339*5113495bSYour Name {
6340*5113495bSYour Name 	qdf_nbuf_dev_kfree_list(nbuf_queue_head);
6341*5113495bSYour Name }
6342*5113495bSYour Name #else
6343*5113495bSYour Name static inline void
6344*5113495bSYour Name dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
6345*5113495bSYour Name {
6346*5113495bSYour Name }
6347*5113495bSYour Name 
6348*5113495bSYour Name static inline void
6349*5113495bSYour Name dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
6350*5113495bSYour Name 			  struct dp_tx_desc_s *desc)
6351*5113495bSYour Name {
6352*5113495bSYour Name 	qdf_nbuf_free(desc->nbuf);
6353*5113495bSYour Name }
6354*5113495bSYour Name 
6355*5113495bSYour Name static inline void
6356*5113495bSYour Name dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
6357*5113495bSYour Name 				  qdf_nbuf_t nbuf)
6358*5113495bSYour Name {
6359*5113495bSYour Name 	qdf_nbuf_free(nbuf);
6360*5113495bSYour Name }
6361*5113495bSYour Name 
6362*5113495bSYour Name static inline void
6363*5113495bSYour Name dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
6364*5113495bSYour Name {
6365*5113495bSYour Name }
6366*5113495bSYour Name #endif
6367*5113495bSYour Name 
6368*5113495bSYour Name #ifdef WLAN_SUPPORT_PPEDS
6369*5113495bSYour Name static inline void
6370*5113495bSYour Name dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
6371*5113495bSYour Name 				 struct dp_txrx_peer *txrx_peer,
6372*5113495bSYour Name 				 struct hal_tx_completion_status *ts,
6373*5113495bSYour Name 				 struct dp_tx_desc_s *desc,
6374*5113495bSYour Name 				 uint8_t ring_id)
6375*5113495bSYour Name {
6376*5113495bSYour Name 	uint8_t link_id = 0;
6377*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
6378*5113495bSYour Name 
6379*5113495bSYour Name 	if (qdf_likely(txrx_peer)) {
6380*5113495bSYour Name 		if (!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
6381*5113495bSYour Name 			hal_tx_comp_get_status(&desc->comp,
6382*5113495bSYour Name 					       ts,
6383*5113495bSYour Name 					       soc->hal_soc);
6384*5113495bSYour Name 			vdev = txrx_peer->vdev;
6385*5113495bSYour Name 			link_id = dp_tx_get_link_id_from_ppdu_id(soc,
6386*5113495bSYour Name 								 ts,
6387*5113495bSYour Name 								 txrx_peer,
6388*5113495bSYour Name 								 vdev);
6389*5113495bSYour Name 			if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
6390*5113495bSYour Name 				link_id = 0;
6391*5113495bSYour Name 			dp_tx_update_peer_stats(desc, ts,
6392*5113495bSYour Name 						txrx_peer,
6393*5113495bSYour Name 						ring_id,
6394*5113495bSYour Name 						link_id);
6395*5113495bSYour Name 		} else {
6396*5113495bSYour Name 			dp_tx_update_peer_basic_stats(txrx_peer, desc->length,
6397*5113495bSYour Name 						      desc->tx_status, false);
6398*5113495bSYour Name 		}
6399*5113495bSYour Name 	}
6400*5113495bSYour Name }
6401*5113495bSYour Name #else
6402*5113495bSYour Name static inline void
6403*5113495bSYour Name dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
6404*5113495bSYour Name 				 struct dp_txrx_peer *txrx_peer,
6405*5113495bSYour Name 				 struct hal_tx_completion_status *ts,
6406*5113495bSYour Name 				 struct dp_tx_desc_s *desc,
6407*5113495bSYour Name 				 uint8_t ring_id)
6408*5113495bSYour Name {
6409*5113495bSYour Name }
6410*5113495bSYour Name #endif
6411*5113495bSYour Name 
6412*5113495bSYour Name void
6413*5113495bSYour Name dp_tx_comp_process_desc_list_fast(struct dp_soc *soc,
6414*5113495bSYour Name 				  struct dp_tx_desc_s *head_desc,
6415*5113495bSYour Name 				  struct dp_tx_desc_s *tail_desc,
6416*5113495bSYour Name 				  uint8_t ring_id,
6417*5113495bSYour Name 				  uint32_t fast_desc_count)
6418*5113495bSYour Name {
6419*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
6420*5113495bSYour Name 
6421*5113495bSYour Name 	pool = dp_get_tx_desc_pool(soc, head_desc->pool_id);
6422*5113495bSYour Name 	dp_tx_outstanding_sub(head_desc->pdev, fast_desc_count);
6423*5113495bSYour Name 	dp_tx_desc_free_list(pool, head_desc, tail_desc, fast_desc_count);
6424*5113495bSYour Name }
6425*5113495bSYour Name 
6426*5113495bSYour Name void
6427*5113495bSYour Name dp_tx_comp_process_desc_list(struct dp_soc *soc,
6428*5113495bSYour Name 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id)
6429*5113495bSYour Name {
6430*5113495bSYour Name 	struct dp_tx_desc_s *desc;
6431*5113495bSYour Name 	struct dp_tx_desc_s *next;
6432*5113495bSYour Name 	struct hal_tx_completion_status ts;
6433*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
6434*5113495bSYour Name 	uint16_t peer_id = DP_INVALID_PEER;
6435*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
6436*5113495bSYour Name 	qdf_nbuf_queue_head_t h;
6437*5113495bSYour Name 
6438*5113495bSYour Name 	desc = comp_head;
6439*5113495bSYour Name 
6440*5113495bSYour Name 	dp_tx_nbuf_queue_head_init(&h);
6441*5113495bSYour Name 
6442*5113495bSYour Name 	while (desc) {
6443*5113495bSYour Name 		next = desc->next;
6444*5113495bSYour Name 		dp_tx_prefetch_next_nbuf_data(next);
6445*5113495bSYour Name 
6446*5113495bSYour Name 		if (peer_id != desc->peer_id) {
6447*5113495bSYour Name 			if (txrx_peer)
6448*5113495bSYour Name 				dp_txrx_peer_unref_delete(txrx_ref_handle,
6449*5113495bSYour Name 							  DP_MOD_ID_TX_COMP);
6450*5113495bSYour Name 			peer_id = desc->peer_id;
6451*5113495bSYour Name 			txrx_peer =
6452*5113495bSYour Name 				dp_txrx_peer_get_ref_by_id(soc, peer_id,
6453*5113495bSYour Name 							   &txrx_ref_handle,
6454*5113495bSYour Name 							   DP_MOD_ID_TX_COMP);
6455*5113495bSYour Name 		}
6456*5113495bSYour Name 
6457*5113495bSYour Name 		if (dp_tx_mcast_reinject_handler(soc, desc)) {
6458*5113495bSYour Name 			desc = next;
6459*5113495bSYour Name 			continue;
6460*5113495bSYour Name 		}
6461*5113495bSYour Name 
6462*5113495bSYour Name 		if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
6463*5113495bSYour Name 			qdf_nbuf_t nbuf;
6464*5113495bSYour Name 			dp_tx_update_ppeds_tx_comp_stats(soc, txrx_peer, &ts,
6465*5113495bSYour Name 							 desc, ring_id);
6466*5113495bSYour Name 
6467*5113495bSYour Name 			if (desc->pool_id != DP_TX_PPEDS_POOL_ID) {
6468*5113495bSYour Name 				nbuf = desc->nbuf;
6469*5113495bSYour Name 				dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
6470*5113495bSYour Name 				if (desc->flags & DP_TX_DESC_FLAG_SPECIAL)
6471*5113495bSYour Name 					dp_tx_spcl_desc_free(soc, desc,
6472*5113495bSYour Name 							     desc->pool_id);
6473*5113495bSYour Name 				else
6474*5113495bSYour Name 					dp_tx_desc_free(soc, desc,
6475*5113495bSYour Name 							desc->pool_id);
6476*5113495bSYour Name 
6477*5113495bSYour Name 				__dp_tx_outstanding_dec(soc);
6478*5113495bSYour Name 			} else {
6479*5113495bSYour Name 				nbuf = dp_ppeds_tx_desc_free(soc, desc);
6480*5113495bSYour Name 				dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
6481*5113495bSYour Name 			}
6482*5113495bSYour Name 			desc = next;
6483*5113495bSYour Name 			continue;
6484*5113495bSYour Name 		}
6485*5113495bSYour Name 
6486*5113495bSYour Name 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
6487*5113495bSYour Name 			struct dp_pdev *pdev = desc->pdev;
6488*5113495bSYour Name 
6489*5113495bSYour Name 			if (qdf_likely(txrx_peer))
6490*5113495bSYour Name 				dp_tx_update_peer_basic_stats(txrx_peer,
6491*5113495bSYour Name 							      desc->length,
6492*5113495bSYour Name 							      desc->tx_status,
6493*5113495bSYour Name 							      false);
6494*5113495bSYour Name 			qdf_assert(pdev);
6495*5113495bSYour Name 			dp_tx_outstanding_dec(pdev);
6496*5113495bSYour Name 			/*
6497*5113495bSYour Name 			 * Calling a QDF WRAPPER here is creating significant
6498*5113495bSYour Name 			 * performance impact so avoided the wrapper call here
6499*5113495bSYour Name 			 */
6500*5113495bSYour Name 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
6501*5113495bSYour Name 					       desc->id, DP_TX_COMP_UNMAP);
6502*5113495bSYour Name 			dp_tx_nbuf_unmap(soc, desc);
6503*5113495bSYour Name 			dp_tx_nbuf_dev_queue_free(&h, desc);
6504*5113495bSYour Name 			dp_tx_desc_free(soc, desc, desc->pool_id);
6505*5113495bSYour Name 			desc = next;
6506*5113495bSYour Name 			continue;
6507*5113495bSYour Name 		}
6508*5113495bSYour Name 
6509*5113495bSYour Name 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
6510*5113495bSYour Name 
6511*5113495bSYour Name 		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
6512*5113495bSYour Name 					     ring_id);
6513*5113495bSYour Name 
6514*5113495bSYour Name 		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
6515*5113495bSYour Name 
6516*5113495bSYour Name 		dp_tx_desc_release(soc, desc, desc->pool_id);
6517*5113495bSYour Name 		desc = next;
6518*5113495bSYour Name 	}
6519*5113495bSYour Name 	dp_tx_nbuf_dev_kfree_list(&h);
6520*5113495bSYour Name 	if (txrx_peer)
6521*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
6522*5113495bSYour Name }
6523*5113495bSYour Name 
6524*5113495bSYour Name #ifndef WLAN_SOFTUMAC_SUPPORT
6525*5113495bSYour Name /**
6526*5113495bSYour Name  * dp_tx_dump_tx_desc() - Dump tx desc for debugging
6527*5113495bSYour Name  * @tx_desc: software descriptor head pointer
6528*5113495bSYour Name  *
6529*5113495bSYour Name  * This function will dump tx desc for further debugging
6530*5113495bSYour Name  *
6531*5113495bSYour Name  * Return: none
6532*5113495bSYour Name  */
6533*5113495bSYour Name static
6534*5113495bSYour Name void dp_tx_dump_tx_desc(struct dp_tx_desc_s *tx_desc)
6535*5113495bSYour Name {
6536*5113495bSYour Name 	if (tx_desc) {
6537*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->nbuf: %pK", tx_desc->nbuf);
6538*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->flags: 0x%x", tx_desc->flags);
6539*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->id: %u", tx_desc->id);
6540*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->dma_addr: 0x%x",
6541*5113495bSYour Name 				(unsigned int)tx_desc->dma_addr);
6542*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->vdev_id: %u",
6543*5113495bSYour Name 				tx_desc->vdev_id);
6544*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->tx_status: %u",
6545*5113495bSYour Name 				tx_desc->tx_status);
6546*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->pdev: %pK",
6547*5113495bSYour Name 				tx_desc->pdev);
6548*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->tx_encap_type: %u",
6549*5113495bSYour Name 				tx_desc->tx_encap_type);
6550*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->buffer_src: %u",
6551*5113495bSYour Name 				tx_desc->buffer_src);
6552*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->frm_type: %u",
6553*5113495bSYour Name 				tx_desc->frm_type);
6554*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->pkt_offset: %u",
6555*5113495bSYour Name 				tx_desc->pkt_offset);
6556*5113495bSYour Name 		dp_tx_comp_warn("tx_desc->pool_id: %u",
6557*5113495bSYour Name 				tx_desc->pool_id);
6558*5113495bSYour Name 	}
6559*5113495bSYour Name }
6560*5113495bSYour Name #endif
6561*5113495bSYour Name 
6562*5113495bSYour Name #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
6563*5113495bSYour Name static inline
6564*5113495bSYour Name bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
6565*5113495bSYour Name 				   int max_reap_limit)
6566*5113495bSYour Name {
6567*5113495bSYour Name 	bool limit_hit = false;
6568*5113495bSYour Name 
6569*5113495bSYour Name 	limit_hit =
6570*5113495bSYour Name 		(num_reaped >= max_reap_limit) ? true : false;
6571*5113495bSYour Name 
6572*5113495bSYour Name 	if (limit_hit)
6573*5113495bSYour Name 		DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
6574*5113495bSYour Name 
6575*5113495bSYour Name 	return limit_hit;
6576*5113495bSYour Name }
6577*5113495bSYour Name 
6578*5113495bSYour Name static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
6579*5113495bSYour Name {
6580*5113495bSYour Name 	return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
6581*5113495bSYour Name }
6582*5113495bSYour Name 
6583*5113495bSYour Name static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
6584*5113495bSYour Name {
6585*5113495bSYour Name 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
6586*5113495bSYour Name 
6587*5113495bSYour Name 	return cfg->tx_comp_loop_pkt_limit;
6588*5113495bSYour Name }
6589*5113495bSYour Name #else
6590*5113495bSYour Name static inline
6591*5113495bSYour Name bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
6592*5113495bSYour Name 				   int max_reap_limit)
6593*5113495bSYour Name {
6594*5113495bSYour Name 	return false;
6595*5113495bSYour Name }
6596*5113495bSYour Name 
6597*5113495bSYour Name static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
6598*5113495bSYour Name {
6599*5113495bSYour Name 	return false;
6600*5113495bSYour Name }
6601*5113495bSYour Name 
6602*5113495bSYour Name static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
6603*5113495bSYour Name {
6604*5113495bSYour Name 	return 0;
6605*5113495bSYour Name }
6606*5113495bSYour Name #endif
6607*5113495bSYour Name 
6608*5113495bSYour Name #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
6609*5113495bSYour Name static inline int
6610*5113495bSYour Name dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
6611*5113495bSYour Name 				  int *max_reap_limit)
6612*5113495bSYour Name {
6613*5113495bSYour Name 	return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
6614*5113495bSYour Name 							       max_reap_limit);
6615*5113495bSYour Name }
6616*5113495bSYour Name #else
6617*5113495bSYour Name static inline int
6618*5113495bSYour Name dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
6619*5113495bSYour Name 				  int *max_reap_limit)
6620*5113495bSYour Name {
6621*5113495bSYour Name 	return 0;
6622*5113495bSYour Name }
6623*5113495bSYour Name #endif
6624*5113495bSYour Name 
6625*5113495bSYour Name #ifdef DP_TX_TRACKING
6626*5113495bSYour Name void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
6627*5113495bSYour Name {
6628*5113495bSYour Name 	if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
6629*5113495bSYour Name 	    (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
6630*5113495bSYour Name 		dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
6631*5113495bSYour Name 		qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
6632*5113495bSYour Name 	}
6633*5113495bSYour Name }
6634*5113495bSYour Name #endif
6635*5113495bSYour Name 
6636*5113495bSYour Name #ifndef WLAN_SOFTUMAC_SUPPORT
6637*5113495bSYour Name #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
6638*5113495bSYour Name 
6639*5113495bSYour Name /* Increasing this value, runs the risk of srng backpressure */
6640*5113495bSYour Name #define DP_STALE_TX_COMP_WAIT_TIMEOUT_US 1000
6641*5113495bSYour Name 
6642*5113495bSYour Name static inline void
6643*5113495bSYour Name dp_tx_comp_reset_stale_entry_detection(struct dp_soc *soc, uint32_t ring_num)
6644*5113495bSYour Name {
6645*5113495bSYour Name 	soc->stale_entry[ring_num].detected = 0;
6646*5113495bSYour Name }
6647*5113495bSYour Name 
6648*5113495bSYour Name /**
6649*5113495bSYour Name  * dp_tx_comp_stale_entry_handle() - Detect stale entry condition in tx
6650*5113495bSYour Name  *				     completion srng.
6651*5113495bSYour Name  * @soc: DP SoC handle
6652*5113495bSYour Name  * @ring_num: tx completion ring number
6653*5113495bSYour Name  * @status: QDF_STATUS from tx_comp_get_params_from_hal_desc arch ops
6654*5113495bSYour Name  *
6655*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS if stale entry is detected and handled
6656*5113495bSYour Name  *	   QDF_STATUS error code in other cases.
6657*5113495bSYour Name  */
6658*5113495bSYour Name static inline QDF_STATUS
6659*5113495bSYour Name dp_tx_comp_stale_entry_handle(struct dp_soc *soc, uint32_t ring_num,
6660*5113495bSYour Name 			      QDF_STATUS status)
6661*5113495bSYour Name {
6662*5113495bSYour Name 	uint64_t curr_timestamp = qdf_get_log_timestamp_usecs();
6663*5113495bSYour Name 	uint64_t delta_us;
6664*5113495bSYour Name 
6665*5113495bSYour Name 	if (status != QDF_STATUS_E_PENDING) {
6666*5113495bSYour Name 		dp_tx_comp_reset_stale_entry_detection(soc, ring_num);
6667*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
6668*5113495bSYour Name 	}
6669*5113495bSYour Name 
6670*5113495bSYour Name 	if (soc->stale_entry[ring_num].detected) {
6671*5113495bSYour Name 		/* stale entry process continuation */
6672*5113495bSYour Name 		delta_us = curr_timestamp -
6673*5113495bSYour Name 				soc->stale_entry[ring_num].start_time;
6674*5113495bSYour Name 		if (delta_us > DP_STALE_TX_COMP_WAIT_TIMEOUT_US) {
6675*5113495bSYour Name 			dp_err("Stale tx comp desc, waited %d us", delta_us);
6676*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
6677*5113495bSYour Name 		}
6678*5113495bSYour Name 	} else {
6679*5113495bSYour Name 		/* This is the start of stale entry detection */
6680*5113495bSYour Name 		soc->stale_entry[ring_num].detected = 1;
6681*5113495bSYour Name 		soc->stale_entry[ring_num].start_time = curr_timestamp;
6682*5113495bSYour Name 	}
6683*5113495bSYour Name 
6684*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
6685*5113495bSYour Name }
6686*5113495bSYour Name #else
6687*5113495bSYour Name 
6688*5113495bSYour Name static inline void
6689*5113495bSYour Name dp_tx_comp_reset_stale_entry_detection(struct dp_soc *soc, uint32_t ring_num)
6690*5113495bSYour Name {
6691*5113495bSYour Name }
6692*5113495bSYour Name 
6693*5113495bSYour Name static inline QDF_STATUS
6694*5113495bSYour Name dp_tx_comp_stale_entry_handle(struct dp_soc *soc, uint32_t ring_num,
6695*5113495bSYour Name 			      QDF_STATUS status)
6696*5113495bSYour Name {
6697*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
6698*5113495bSYour Name }
6699*5113495bSYour Name #endif
6700*5113495bSYour Name 
6701*5113495bSYour Name uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
6702*5113495bSYour Name 			    hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
6703*5113495bSYour Name 			    uint32_t quota)
6704*5113495bSYour Name {
6705*5113495bSYour Name 	void *tx_comp_hal_desc;
6706*5113495bSYour Name 	void *last_prefetched_hw_desc = NULL;
6707*5113495bSYour Name 	void *last_hw_desc = NULL;
6708*5113495bSYour Name 	struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
6709*5113495bSYour Name 	hal_soc_handle_t hal_soc;
6710*5113495bSYour Name 	uint8_t buffer_src;
6711*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
6712*5113495bSYour Name 	struct dp_tx_desc_s *head_desc = NULL;
6713*5113495bSYour Name 	struct dp_tx_desc_s *tail_desc = NULL;
6714*5113495bSYour Name 	struct dp_tx_desc_s *fast_head_desc = NULL;
6715*5113495bSYour Name 	struct dp_tx_desc_s *fast_tail_desc = NULL;
6716*5113495bSYour Name 	uint32_t num_processed = 0;
6717*5113495bSYour Name 	uint32_t fast_desc_count = 0;
6718*5113495bSYour Name 	uint32_t count;
6719*5113495bSYour Name 	uint32_t num_avail_for_reap = 0;
6720*5113495bSYour Name 	bool force_break = false;
6721*5113495bSYour Name 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
6722*5113495bSYour Name 	int max_reap_limit, ring_near_full;
6723*5113495bSYour Name 	uint32_t num_entries;
6724*5113495bSYour Name 	qdf_nbuf_queue_head_t h;
6725*5113495bSYour Name 	QDF_STATUS status;
6726*5113495bSYour Name 
6727*5113495bSYour Name 	DP_HIST_INIT();
6728*5113495bSYour Name 
6729*5113495bSYour Name 	num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
6730*5113495bSYour Name 
6731*5113495bSYour Name more_data:
6732*5113495bSYour Name 
6733*5113495bSYour Name 	hal_soc = soc->hal_soc;
6734*5113495bSYour Name 	/* Re-initialize local variables to be re-used */
6735*5113495bSYour Name 	head_desc = NULL;
6736*5113495bSYour Name 	tail_desc = NULL;
6737*5113495bSYour Name 	count = 0;
6738*5113495bSYour Name 	max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
6739*5113495bSYour Name 
6740*5113495bSYour Name 	ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
6741*5113495bSYour Name 							   &max_reap_limit);
6742*5113495bSYour Name 
6743*5113495bSYour Name 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
6744*5113495bSYour Name 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
6745*5113495bSYour Name 		return 0;
6746*5113495bSYour Name 	}
6747*5113495bSYour Name 
6748*5113495bSYour Name 	hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
6749*5113495bSYour Name 
6750*5113495bSYour Name 	if (!num_avail_for_reap)
6751*5113495bSYour Name 		num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
6752*5113495bSYour Name 							    hal_ring_hdl, 0);
6753*5113495bSYour Name 
6754*5113495bSYour Name 	if (num_avail_for_reap >= quota)
6755*5113495bSYour Name 		num_avail_for_reap = quota;
6756*5113495bSYour Name 
6757*5113495bSYour Name 	last_hw_desc = dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl,
6758*5113495bSYour Name 						    num_avail_for_reap);
6759*5113495bSYour Name 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
6760*5113495bSYour Name 							    hal_ring_hdl,
6761*5113495bSYour Name 							    num_avail_for_reap);
6762*5113495bSYour Name 
6763*5113495bSYour Name 	dp_tx_nbuf_queue_head_init(&h);
6764*5113495bSYour Name 
6765*5113495bSYour Name 	/* Find head descriptor from completion ring */
6766*5113495bSYour Name 	while (qdf_likely(num_avail_for_reap--)) {
6767*5113495bSYour Name 
6768*5113495bSYour Name 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
6769*5113495bSYour Name 		if (qdf_unlikely(!tx_comp_hal_desc))
6770*5113495bSYour Name 			break;
6771*5113495bSYour Name 		buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
6772*5113495bSYour Name 							   tx_comp_hal_desc);
6773*5113495bSYour Name 
6774*5113495bSYour Name 		/* If this buffer was not released by TQM or FW, then it is not
6775*5113495bSYour Name 		 * Tx completion indication, assert */
6776*5113495bSYour Name 		if (qdf_unlikely(buffer_src !=
6777*5113495bSYour Name 					HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
6778*5113495bSYour Name 				 (qdf_unlikely(buffer_src !=
6779*5113495bSYour Name 					HAL_TX_COMP_RELEASE_SOURCE_FW))) {
6780*5113495bSYour Name 			uint8_t wbm_internal_error;
6781*5113495bSYour Name 
6782*5113495bSYour Name 			dp_err_rl(
6783*5113495bSYour Name 				"Tx comp release_src != TQM | FW but from %d",
6784*5113495bSYour Name 				buffer_src);
6785*5113495bSYour Name 			hal_dump_comp_desc(tx_comp_hal_desc);
6786*5113495bSYour Name 			DP_STATS_INC(soc, tx.invalid_release_source, 1);
6787*5113495bSYour Name 
6788*5113495bSYour Name 			/* When WBM sees NULL buffer_addr_info in any of
6789*5113495bSYour Name 			 * ingress rings it sends an error indication,
6790*5113495bSYour Name 			 * with wbm_internal_error=1, to a specific ring.
6791*5113495bSYour Name 			 * The WBM2SW ring used to indicate these errors is
6792*5113495bSYour Name 			 * fixed in HW, and that ring is being used as Tx
6793*5113495bSYour Name 			 * completion ring. These errors are not related to
6794*5113495bSYour Name 			 * Tx completions, and should just be ignored
6795*5113495bSYour Name 			 */
6796*5113495bSYour Name 			wbm_internal_error = hal_get_wbm_internal_error(
6797*5113495bSYour Name 							hal_soc,
6798*5113495bSYour Name 							tx_comp_hal_desc);
6799*5113495bSYour Name 
6800*5113495bSYour Name 			if (wbm_internal_error) {
6801*5113495bSYour Name 				dp_err_rl("Tx comp wbm_internal_error!!");
6802*5113495bSYour Name 				DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
6803*5113495bSYour Name 
6804*5113495bSYour Name 				if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
6805*5113495bSYour Name 								buffer_src)
6806*5113495bSYour Name 					dp_handle_wbm_internal_error(
6807*5113495bSYour Name 						soc,
6808*5113495bSYour Name 						tx_comp_hal_desc,
6809*5113495bSYour Name 						hal_tx_comp_get_buffer_type(
6810*5113495bSYour Name 							tx_comp_hal_desc));
6811*5113495bSYour Name 
6812*5113495bSYour Name 			} else {
6813*5113495bSYour Name 				dp_err_rl("Tx comp wbm_internal_error false");
6814*5113495bSYour Name 				DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
6815*5113495bSYour Name 			}
6816*5113495bSYour Name 			continue;
6817*5113495bSYour Name 		}
6818*5113495bSYour Name 
6819*5113495bSYour Name 		status = soc->arch_ops.tx_comp_get_params_from_hal_desc(
6820*5113495bSYour Name 							soc, tx_comp_hal_desc,
6821*5113495bSYour Name 							&tx_desc);
6822*5113495bSYour Name 		if (qdf_unlikely(!tx_desc)) {
6823*5113495bSYour Name 			if (QDF_IS_STATUS_SUCCESS(
6824*5113495bSYour Name 				dp_tx_comp_stale_entry_handle(soc, ring_id,
6825*5113495bSYour Name 							      status))) {
6826*5113495bSYour Name 				hal_srng_dst_dec_tp(hal_soc, hal_ring_hdl);
6827*5113495bSYour Name 				break;
6828*5113495bSYour Name 			}
6829*5113495bSYour Name 
6830*5113495bSYour Name 			dp_err("unable to retrieve tx_desc!");
6831*5113495bSYour Name 			hal_dump_comp_desc(tx_comp_hal_desc);
6832*5113495bSYour Name 			DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
6833*5113495bSYour Name 			QDF_BUG(0);
6834*5113495bSYour Name 			continue;
6835*5113495bSYour Name 		}
6836*5113495bSYour Name 
6837*5113495bSYour Name 		dp_tx_comp_reset_stale_entry_detection(soc, ring_id);
6838*5113495bSYour Name 		tx_desc->buffer_src = buffer_src;
6839*5113495bSYour Name 
6840*5113495bSYour Name 		/*
6841*5113495bSYour Name 		 * If the release source is FW, process the HTT status
6842*5113495bSYour Name 		 */
6843*5113495bSYour Name 		if (qdf_unlikely(buffer_src ==
6844*5113495bSYour Name 					HAL_TX_COMP_RELEASE_SOURCE_FW)) {
6845*5113495bSYour Name 			uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
6846*5113495bSYour Name 
6847*5113495bSYour Name 			hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
6848*5113495bSYour Name 					htt_tx_status);
6849*5113495bSYour Name 			/* Collect hw completion contents */
6850*5113495bSYour Name 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
6851*5113495bSYour Name 					      &tx_desc->comp, 1);
6852*5113495bSYour Name 			soc->arch_ops.dp_tx_process_htt_completion(
6853*5113495bSYour Name 							soc,
6854*5113495bSYour Name 							tx_desc,
6855*5113495bSYour Name 							htt_tx_status,
6856*5113495bSYour Name 							ring_id);
6857*5113495bSYour Name 			if (qdf_unlikely(!tx_desc->pdev)) {
6858*5113495bSYour Name 				dp_tx_dump_tx_desc(tx_desc);
6859*5113495bSYour Name 			}
6860*5113495bSYour Name 		} else {
6861*5113495bSYour Name 			if (tx_desc->flags & DP_TX_DESC_FLAG_FASTPATH_SIMPLE ||
6862*5113495bSYour Name 			    tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
6863*5113495bSYour Name 				goto add_to_pool2;
6864*5113495bSYour Name 
6865*5113495bSYour Name 			tx_desc->tx_status =
6866*5113495bSYour Name 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
6867*5113495bSYour Name 			tx_desc->buffer_src = buffer_src;
6868*5113495bSYour Name 			/*
6869*5113495bSYour Name 			 * If the fast completion mode is enabled extended
6870*5113495bSYour Name 			 * metadata from descriptor is not copied
6871*5113495bSYour Name 			 */
6872*5113495bSYour Name 			if (qdf_likely(tx_desc->flags &
6873*5113495bSYour Name 						DP_TX_DESC_FLAG_SIMPLE))
6874*5113495bSYour Name 				goto add_to_pool;
6875*5113495bSYour Name 
6876*5113495bSYour Name 			/*
6877*5113495bSYour Name 			 * If the descriptor is already freed in vdev_detach,
6878*5113495bSYour Name 			 * continue to next descriptor
6879*5113495bSYour Name 			 */
6880*5113495bSYour Name 			if (qdf_unlikely
6881*5113495bSYour Name 				((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
6882*5113495bSYour Name 				 !tx_desc->flags)) {
6883*5113495bSYour Name 				dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
6884*5113495bSYour Name 						   tx_desc->id);
6885*5113495bSYour Name 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
6886*5113495bSYour Name 				dp_tx_desc_check_corruption(tx_desc);
6887*5113495bSYour Name 				continue;
6888*5113495bSYour Name 			}
6889*5113495bSYour Name 
6890*5113495bSYour Name 			if (qdf_unlikely(!tx_desc->pdev)) {
6891*5113495bSYour Name 				dp_tx_comp_warn("The pdev is NULL in TX desc, ignored.");
6892*5113495bSYour Name 				dp_tx_dump_tx_desc(tx_desc);
6893*5113495bSYour Name 				DP_STATS_INC(soc, tx.tx_comp_exception, 1);
6894*5113495bSYour Name 				continue;
6895*5113495bSYour Name 			}
6896*5113495bSYour Name 
6897*5113495bSYour Name 			if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
6898*5113495bSYour Name 				dp_tx_comp_info_rl("pdev in down state %d",
6899*5113495bSYour Name 						   tx_desc->id);
6900*5113495bSYour Name 				tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
6901*5113495bSYour Name 				dp_tx_comp_free_buf(soc, tx_desc, false);
6902*5113495bSYour Name 				dp_tx_desc_release(soc, tx_desc,
6903*5113495bSYour Name 						   tx_desc->pool_id);
6904*5113495bSYour Name 				goto next_desc;
6905*5113495bSYour Name 			}
6906*5113495bSYour Name 
6907*5113495bSYour Name 			if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
6908*5113495bSYour Name 				!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
6909*5113495bSYour Name 				dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
6910*5113495bSYour Name 						 tx_desc->flags, tx_desc->id);
6911*5113495bSYour Name 				qdf_assert_always(0);
6912*5113495bSYour Name 			}
6913*5113495bSYour Name 
6914*5113495bSYour Name 			/* Collect hw completion contents */
6915*5113495bSYour Name 			hal_tx_comp_desc_sync(tx_comp_hal_desc,
6916*5113495bSYour Name 					      &tx_desc->comp, 1);
6917*5113495bSYour Name add_to_pool:
6918*5113495bSYour Name 			DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
6919*5113495bSYour Name 
6920*5113495bSYour Name add_to_pool2:
6921*5113495bSYour Name 			/* First ring descriptor on the cycle */
6922*5113495bSYour Name 
6923*5113495bSYour Name 			if (tx_desc->flags & DP_TX_DESC_FLAG_FASTPATH_SIMPLE ||
6924*5113495bSYour Name 			    tx_desc->flags & DP_TX_DESC_FLAG_PPEDS) {
6925*5113495bSYour Name 				dp_tx_nbuf_dev_queue_free(&h, tx_desc);
6926*5113495bSYour Name 				fast_desc_count++;
6927*5113495bSYour Name 				if (!fast_head_desc) {
6928*5113495bSYour Name 					fast_head_desc = tx_desc;
6929*5113495bSYour Name 					fast_tail_desc = tx_desc;
6930*5113495bSYour Name 				}
6931*5113495bSYour Name 				fast_tail_desc->next = tx_desc;
6932*5113495bSYour Name 				fast_tail_desc = tx_desc;
6933*5113495bSYour Name 				dp_tx_desc_clear(tx_desc);
6934*5113495bSYour Name 			} else {
6935*5113495bSYour Name 				if (!head_desc) {
6936*5113495bSYour Name 					head_desc = tx_desc;
6937*5113495bSYour Name 					tail_desc = tx_desc;
6938*5113495bSYour Name 				}
6939*5113495bSYour Name 
6940*5113495bSYour Name 				tail_desc->next = tx_desc;
6941*5113495bSYour Name 				tx_desc->next = NULL;
6942*5113495bSYour Name 				tail_desc = tx_desc;
6943*5113495bSYour Name 			}
6944*5113495bSYour Name 		}
6945*5113495bSYour Name next_desc:
6946*5113495bSYour Name 		num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
6947*5113495bSYour Name 
6948*5113495bSYour Name 		/*
6949*5113495bSYour Name 		 * Processed packet count is more than given quota
6950*5113495bSYour Name 		 * stop to processing
6951*5113495bSYour Name 		 */
6952*5113495bSYour Name 
6953*5113495bSYour Name 		count++;
6954*5113495bSYour Name 
6955*5113495bSYour Name 		dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
6956*5113495bSYour Name 					       num_avail_for_reap,
6957*5113495bSYour Name 					       hal_ring_hdl,
6958*5113495bSYour Name 					       &last_prefetched_hw_desc,
6959*5113495bSYour Name 					       &last_prefetched_sw_desc,
6960*5113495bSYour Name 					       last_hw_desc);
6961*5113495bSYour Name 
6962*5113495bSYour Name 		if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
6963*5113495bSYour Name 			break;
6964*5113495bSYour Name 	}
6965*5113495bSYour Name 
6966*5113495bSYour Name 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
6967*5113495bSYour Name 
6968*5113495bSYour Name 	/* Process the reaped descriptors that were sent via fast path */
6969*5113495bSYour Name 	if (fast_head_desc) {
6970*5113495bSYour Name 		dp_tx_comp_process_desc_list_fast(soc, fast_head_desc,
6971*5113495bSYour Name 						  fast_tail_desc, ring_id,
6972*5113495bSYour Name 						  fast_desc_count);
6973*5113495bSYour Name 		dp_tx_nbuf_dev_kfree_list(&h);
6974*5113495bSYour Name 	}
6975*5113495bSYour Name 
6976*5113495bSYour Name 	/* Process the reaped descriptors */
6977*5113495bSYour Name 	if (head_desc)
6978*5113495bSYour Name 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
6979*5113495bSYour Name 
6980*5113495bSYour Name 	DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
6981*5113495bSYour Name 
6982*5113495bSYour Name 	/*
6983*5113495bSYour Name 	 * If we are processing in near-full condition, there are 3 scenario
6984*5113495bSYour Name 	 * 1) Ring entries has reached critical state
6985*5113495bSYour Name 	 * 2) Ring entries are still near high threshold
6986*5113495bSYour Name 	 * 3) Ring entries are below the safe level
6987*5113495bSYour Name 	 *
6988*5113495bSYour Name 	 * One more loop will move the state to normal processing and yield
6989*5113495bSYour Name 	 */
6990*5113495bSYour Name 	if (ring_near_full)
6991*5113495bSYour Name 		goto more_data;
6992*5113495bSYour Name 
6993*5113495bSYour Name 	if (dp_tx_comp_enable_eol_data_check(soc)) {
6994*5113495bSYour Name 
6995*5113495bSYour Name 		if (num_processed >= quota)
6996*5113495bSYour Name 			force_break = true;
6997*5113495bSYour Name 
6998*5113495bSYour Name 		if (!force_break &&
6999*5113495bSYour Name 		    hal_srng_dst_peek_sync_locked(soc->hal_soc,
7000*5113495bSYour Name 						  hal_ring_hdl)) {
7001*5113495bSYour Name 			DP_STATS_INC(soc, tx.hp_oos2, 1);
7002*5113495bSYour Name 			if (!hif_exec_should_yield(soc->hif_handle,
7003*5113495bSYour Name 						   int_ctx->dp_intr_id))
7004*5113495bSYour Name 				goto more_data;
7005*5113495bSYour Name 
7006*5113495bSYour Name 			num_avail_for_reap =
7007*5113495bSYour Name 				hal_srng_dst_num_valid_locked(soc->hal_soc,
7008*5113495bSYour Name 							      hal_ring_hdl,
7009*5113495bSYour Name 							      true);
7010*5113495bSYour Name 			if (qdf_unlikely(num_entries &&
7011*5113495bSYour Name 					 (num_avail_for_reap >=
7012*5113495bSYour Name 					  num_entries >> 1))) {
7013*5113495bSYour Name 				DP_STATS_INC(soc, tx.near_full, 1);
7014*5113495bSYour Name 				goto more_data;
7015*5113495bSYour Name 			}
7016*5113495bSYour Name 		}
7017*5113495bSYour Name 	}
7018*5113495bSYour Name 	DP_TX_HIST_STATS_PER_PDEV();
7019*5113495bSYour Name 
7020*5113495bSYour Name 	return num_processed;
7021*5113495bSYour Name }
7022*5113495bSYour Name #endif
7023*5113495bSYour Name 
7024*5113495bSYour Name #ifdef FEATURE_WLAN_TDLS
7025*5113495bSYour Name qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7026*5113495bSYour Name 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
7027*5113495bSYour Name {
7028*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7029*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7030*5113495bSYour Name 						     DP_MOD_ID_TDLS);
7031*5113495bSYour Name 
7032*5113495bSYour Name 	if (!vdev) {
7033*5113495bSYour Name 		dp_err("vdev handle for id %d is NULL", vdev_id);
7034*5113495bSYour Name 		return NULL;
7035*5113495bSYour Name 	}
7036*5113495bSYour Name 
7037*5113495bSYour Name 	if (tx_spec & OL_TX_SPEC_NO_FREE)
7038*5113495bSYour Name 		vdev->is_tdls_frame = true;
7039*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
7040*5113495bSYour Name 
7041*5113495bSYour Name 	return dp_tx_send(soc_hdl, vdev_id, msdu_list);
7042*5113495bSYour Name }
7043*5113495bSYour Name #endif
7044*5113495bSYour Name 
7045*5113495bSYour Name QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
7046*5113495bSYour Name {
7047*5113495bSYour Name 	int pdev_id;
7048*5113495bSYour Name 	/*
7049*5113495bSYour Name 	 * Fill HTT TCL Metadata with Vdev ID and MAC ID
7050*5113495bSYour Name 	 */
7051*5113495bSYour Name 	DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
7052*5113495bSYour Name 				    DP_TCL_METADATA_TYPE_VDEV_BASED);
7053*5113495bSYour Name 
7054*5113495bSYour Name 	DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
7055*5113495bSYour Name 				       vdev->vdev_id);
7056*5113495bSYour Name 
7057*5113495bSYour Name 	pdev_id =
7058*5113495bSYour Name 		dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
7059*5113495bSYour Name 						       vdev->pdev->pdev_id);
7060*5113495bSYour Name 	DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
7061*5113495bSYour Name 
7062*5113495bSYour Name 	/*
7063*5113495bSYour Name 	 * Set HTT Extension Valid bit to 0 by default
7064*5113495bSYour Name 	 */
7065*5113495bSYour Name 	DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
7066*5113495bSYour Name 
7067*5113495bSYour Name 	dp_tx_vdev_update_search_flags(vdev);
7068*5113495bSYour Name 
7069*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7070*5113495bSYour Name }
7071*5113495bSYour Name 
7072*5113495bSYour Name #ifndef FEATURE_WDS
7073*5113495bSYour Name static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
7074*5113495bSYour Name {
7075*5113495bSYour Name 	return false;
7076*5113495bSYour Name }
7077*5113495bSYour Name #endif
7078*5113495bSYour Name 
7079*5113495bSYour Name void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
7080*5113495bSYour Name {
7081*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
7082*5113495bSYour Name 
7083*5113495bSYour Name 	/*
7084*5113495bSYour Name 	 * Enable both AddrY (SA based search) and AddrX (Da based search)
7085*5113495bSYour Name 	 * for TDLS link
7086*5113495bSYour Name 	 *
7087*5113495bSYour Name 	 * Enable AddrY (SA based search) only for non-WDS STA and
7088*5113495bSYour Name 	 * ProxySTA VAP (in HKv1) modes.
7089*5113495bSYour Name 	 *
7090*5113495bSYour Name 	 * In all other VAP modes, only DA based search should be
7091*5113495bSYour Name 	 * enabled
7092*5113495bSYour Name 	 */
7093*5113495bSYour Name 	if (vdev->opmode == wlan_op_mode_sta &&
7094*5113495bSYour Name 	    vdev->tdls_link_connected)
7095*5113495bSYour Name 		vdev->hal_desc_addr_search_flags =
7096*5113495bSYour Name 			(HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
7097*5113495bSYour Name 	else if ((vdev->opmode == wlan_op_mode_sta) &&
7098*5113495bSYour Name 		 !dp_tx_da_search_override(vdev))
7099*5113495bSYour Name 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
7100*5113495bSYour Name 	else
7101*5113495bSYour Name 		vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
7102*5113495bSYour Name 
7103*5113495bSYour Name 	if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
7104*5113495bSYour Name 		vdev->search_type = soc->sta_mode_search_policy;
7105*5113495bSYour Name 	else
7106*5113495bSYour Name 		vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
7107*5113495bSYour Name }
7108*5113495bSYour Name 
7109*5113495bSYour Name #ifdef WLAN_SUPPORT_PPEDS
7110*5113495bSYour Name static inline bool
7111*5113495bSYour Name dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
7112*5113495bSYour Name 			  struct dp_vdev *vdev,
7113*5113495bSYour Name 			  struct dp_tx_desc_s *tx_desc)
7114*5113495bSYour Name {
7115*5113495bSYour Name 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
7116*5113495bSYour Name 		return false;
7117*5113495bSYour Name 
7118*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
7119*5113495bSYour Name 		return true;
7120*5113495bSYour Name 	/*
7121*5113495bSYour Name 	 * if vdev is given, then only check whether desc
7122*5113495bSYour Name 	 * vdev match. if vdev is NULL, then check whether
7123*5113495bSYour Name 	 * desc pdev match.
7124*5113495bSYour Name 	 */
7125*5113495bSYour Name 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
7126*5113495bSYour Name 		(tx_desc->pdev == pdev);
7127*5113495bSYour Name }
7128*5113495bSYour Name #else
7129*5113495bSYour Name static inline bool
7130*5113495bSYour Name dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
7131*5113495bSYour Name 			  struct dp_vdev *vdev,
7132*5113495bSYour Name 			  struct dp_tx_desc_s *tx_desc)
7133*5113495bSYour Name {
7134*5113495bSYour Name 	if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
7135*5113495bSYour Name 		return false;
7136*5113495bSYour Name 
7137*5113495bSYour Name 	/*
7138*5113495bSYour Name 	 * if vdev is given, then only check whether desc
7139*5113495bSYour Name 	 * vdev match. if vdev is NULL, then check whether
7140*5113495bSYour Name 	 * desc pdev match.
7141*5113495bSYour Name 	 */
7142*5113495bSYour Name 	return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
7143*5113495bSYour Name 		(tx_desc->pdev == pdev);
7144*5113495bSYour Name }
7145*5113495bSYour Name #endif
7146*5113495bSYour Name 
7147*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7148*5113495bSYour Name void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
7149*5113495bSYour Name 		      bool force_free)
7150*5113495bSYour Name {
7151*5113495bSYour Name 	uint8_t i;
7152*5113495bSYour Name 	uint32_t j;
7153*5113495bSYour Name 	uint32_t num_desc, page_id, offset;
7154*5113495bSYour Name 	uint16_t num_desc_per_page;
7155*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
7156*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
7157*5113495bSYour Name 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
7158*5113495bSYour Name 
7159*5113495bSYour Name 	if (!vdev && !force_free) {
7160*5113495bSYour Name 		dp_err("Reset TX desc vdev, Vdev param is required!");
7161*5113495bSYour Name 		return;
7162*5113495bSYour Name 	}
7163*5113495bSYour Name 
7164*5113495bSYour Name 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
7165*5113495bSYour Name 		tx_desc_pool = &soc->tx_desc[i];
7166*5113495bSYour Name 		if (!(tx_desc_pool->pool_size) ||
7167*5113495bSYour Name 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
7168*5113495bSYour Name 		    !(tx_desc_pool->desc_pages.cacheable_pages))
7169*5113495bSYour Name 			continue;
7170*5113495bSYour Name 
7171*5113495bSYour Name 		/*
7172*5113495bSYour Name 		 * Add flow pool lock protection in case pool is freed
7173*5113495bSYour Name 		 * due to all tx_desc is recycled when handle TX completion.
7174*5113495bSYour Name 		 * this is not necessary when do force flush as:
7175*5113495bSYour Name 		 * a. double lock will happen if dp_tx_desc_release is
7176*5113495bSYour Name 		 *    also trying to acquire it.
7177*5113495bSYour Name 		 * b. dp interrupt has been disabled before do force TX desc
7178*5113495bSYour Name 		 *    flush in dp_pdev_deinit().
7179*5113495bSYour Name 		 */
7180*5113495bSYour Name 		if (!force_free)
7181*5113495bSYour Name 			qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
7182*5113495bSYour Name 		num_desc = tx_desc_pool->pool_size;
7183*5113495bSYour Name 		num_desc_per_page =
7184*5113495bSYour Name 			tx_desc_pool->desc_pages.num_element_per_page;
7185*5113495bSYour Name 		for (j = 0; j < num_desc; j++) {
7186*5113495bSYour Name 			page_id = j / num_desc_per_page;
7187*5113495bSYour Name 			offset = j % num_desc_per_page;
7188*5113495bSYour Name 
7189*5113495bSYour Name 			if (qdf_unlikely(!(tx_desc_pool->
7190*5113495bSYour Name 					 desc_pages.cacheable_pages)))
7191*5113495bSYour Name 				break;
7192*5113495bSYour Name 
7193*5113495bSYour Name 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset,
7194*5113495bSYour Name 						  false);
7195*5113495bSYour Name 
7196*5113495bSYour Name 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
7197*5113495bSYour Name 				/*
7198*5113495bSYour Name 				 * Free TX desc if force free is
7199*5113495bSYour Name 				 * required, otherwise only reset vdev
7200*5113495bSYour Name 				 * in this TX desc.
7201*5113495bSYour Name 				 */
7202*5113495bSYour Name 				if (force_free) {
7203*5113495bSYour Name 					tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
7204*5113495bSYour Name 					dp_tx_comp_free_buf(soc, tx_desc,
7205*5113495bSYour Name 							    false);
7206*5113495bSYour Name 					dp_tx_desc_release(soc, tx_desc, i);
7207*5113495bSYour Name 				} else {
7208*5113495bSYour Name 					tx_desc->vdev_id = DP_INVALID_VDEV_ID;
7209*5113495bSYour Name 				}
7210*5113495bSYour Name 			}
7211*5113495bSYour Name 		}
7212*5113495bSYour Name 		if (!force_free)
7213*5113495bSYour Name 			qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
7214*5113495bSYour Name 	}
7215*5113495bSYour Name }
7216*5113495bSYour Name #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
7217*5113495bSYour Name /**
7218*5113495bSYour Name  * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
7219*5113495bSYour Name  *
7220*5113495bSYour Name  * @soc: Handle to DP soc structure
7221*5113495bSYour Name  * @tx_desc: pointer of one TX desc
7222*5113495bSYour Name  * @desc_pool_id: TX Desc pool id
7223*5113495bSYour Name  * @spcl_pool: Special pool
7224*5113495bSYour Name  */
7225*5113495bSYour Name static inline void
7226*5113495bSYour Name dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
7227*5113495bSYour Name 		      uint8_t desc_pool_id, bool spcl_pool)
7228*5113495bSYour Name {
7229*5113495bSYour Name 	struct dp_tx_desc_pool_s *pool = NULL;
7230*5113495bSYour Name 
7231*5113495bSYour Name 	pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, desc_pool_id) :
7232*5113495bSYour Name 				dp_get_tx_desc_pool(soc, desc_pool_id);
7233*5113495bSYour Name 	TX_DESC_LOCK_LOCK(&pool->lock);
7234*5113495bSYour Name 
7235*5113495bSYour Name 	tx_desc->vdev_id = DP_INVALID_VDEV_ID;
7236*5113495bSYour Name 
7237*5113495bSYour Name 	TX_DESC_LOCK_UNLOCK(&pool->lock);
7238*5113495bSYour Name }
7239*5113495bSYour Name 
7240*5113495bSYour Name void __dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
7241*5113495bSYour Name 			bool force_free, bool spcl_pool)
7242*5113495bSYour Name {
7243*5113495bSYour Name 	uint8_t i, num_pool;
7244*5113495bSYour Name 	uint32_t j;
7245*5113495bSYour Name 	uint32_t num_desc_t, page_id, offset;
7246*5113495bSYour Name 	uint16_t num_desc_per_page;
7247*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
7248*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
7249*5113495bSYour Name 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
7250*5113495bSYour Name 
7251*5113495bSYour Name 	if (!vdev && !force_free) {
7252*5113495bSYour Name 		dp_err("Reset TX desc vdev, Vdev param is required!");
7253*5113495bSYour Name 		return;
7254*5113495bSYour Name 	}
7255*5113495bSYour Name 
7256*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7257*5113495bSYour Name 
7258*5113495bSYour Name 	for (i = 0; i < num_pool; i++) {
7259*5113495bSYour Name 		tx_desc_pool = spcl_pool ? dp_get_spcl_tx_desc_pool(soc, i) :
7260*5113495bSYour Name 						dp_get_tx_desc_pool(soc, i);
7261*5113495bSYour Name 
7262*5113495bSYour Name 		num_desc_t = tx_desc_pool->elem_count;
7263*5113495bSYour Name 		if (!tx_desc_pool->desc_pages.cacheable_pages)
7264*5113495bSYour Name 			continue;
7265*5113495bSYour Name 
7266*5113495bSYour Name 		num_desc_per_page =
7267*5113495bSYour Name 			tx_desc_pool->desc_pages.num_element_per_page;
7268*5113495bSYour Name 		for (j = 0; j < num_desc_t; j++) {
7269*5113495bSYour Name 			page_id = j / num_desc_per_page;
7270*5113495bSYour Name 			offset = j % num_desc_per_page;
7271*5113495bSYour Name 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset,
7272*5113495bSYour Name 						  spcl_pool);
7273*5113495bSYour Name 
7274*5113495bSYour Name 			if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
7275*5113495bSYour Name 				if (force_free) {
7276*5113495bSYour Name 					dp_tx_comp_free_buf(soc, tx_desc,
7277*5113495bSYour Name 							    false);
7278*5113495bSYour Name 					dp_tx_desc_release(soc, tx_desc, i);
7279*5113495bSYour Name 				} else {
7280*5113495bSYour Name 					dp_tx_desc_reset_vdev(soc, tx_desc,
7281*5113495bSYour Name 							      i, spcl_pool);
7282*5113495bSYour Name 				}
7283*5113495bSYour Name 			}
7284*5113495bSYour Name 		}
7285*5113495bSYour Name 	}
7286*5113495bSYour Name }
7287*5113495bSYour Name 
7288*5113495bSYour Name void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
7289*5113495bSYour Name 		      bool force_free)
7290*5113495bSYour Name {
7291*5113495bSYour Name 	__dp_tx_desc_flush(pdev, vdev, force_free, false);
7292*5113495bSYour Name 	__dp_tx_desc_flush(pdev, vdev, force_free, true);
7293*5113495bSYour Name }
7294*5113495bSYour Name #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
7295*5113495bSYour Name 
7296*5113495bSYour Name QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
7297*5113495bSYour Name {
7298*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
7299*5113495bSYour Name 
7300*5113495bSYour Name 	/* Reset TX desc associated to this Vdev as NULL */
7301*5113495bSYour Name 	dp_tx_desc_flush(pdev, vdev, false);
7302*5113495bSYour Name 
7303*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7304*5113495bSYour Name }
7305*5113495bSYour Name 
7306*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
7307*5113495bSYour Name /* Pools will be allocated dynamically */
7308*5113495bSYour Name static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
7309*5113495bSYour Name 					   int num_desc)
7310*5113495bSYour Name {
7311*5113495bSYour Name 	uint8_t i;
7312*5113495bSYour Name 
7313*5113495bSYour Name 	for (i = 0; i < num_pool; i++) {
7314*5113495bSYour Name 		qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
7315*5113495bSYour Name 		soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
7316*5113495bSYour Name 	}
7317*5113495bSYour Name 
7318*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7319*5113495bSYour Name }
7320*5113495bSYour Name 
7321*5113495bSYour Name static QDF_STATUS dp_tx_spcl_alloc_static_pools(struct dp_soc *soc,
7322*5113495bSYour Name 						int num_pool,
7323*5113495bSYour Name 						int num_spcl_desc)
7324*5113495bSYour Name {
7325*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7326*5113495bSYour Name }
7327*5113495bSYour Name 
7328*5113495bSYour Name static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
7329*5113495bSYour Name 					  uint32_t num_desc)
7330*5113495bSYour Name {
7331*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7332*5113495bSYour Name }
7333*5113495bSYour Name 
7334*5113495bSYour Name static QDF_STATUS dp_tx_spcl_init_static_pools(struct dp_soc *soc, int num_pool,
7335*5113495bSYour Name 					       uint32_t num_spcl_desc)
7336*5113495bSYour Name {
7337*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7338*5113495bSYour Name }
7339*5113495bSYour Name 
7340*5113495bSYour Name static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
7341*5113495bSYour Name {
7342*5113495bSYour Name }
7343*5113495bSYour Name 
7344*5113495bSYour Name static void dp_tx_spcl_deinit_static_pools(struct dp_soc *soc, int num_pool)
7345*5113495bSYour Name {
7346*5113495bSYour Name }
7347*5113495bSYour Name static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
7348*5113495bSYour Name {
7349*5113495bSYour Name 	uint8_t i;
7350*5113495bSYour Name 
7351*5113495bSYour Name 	for (i = 0; i < num_pool; i++)
7352*5113495bSYour Name 		qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
7353*5113495bSYour Name }
7354*5113495bSYour Name 
7355*5113495bSYour Name static void dp_tx_spcl_delete_static_pools(struct dp_soc *soc, int num_pool)
7356*5113495bSYour Name {
7357*5113495bSYour Name }
7358*5113495bSYour Name #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
7359*5113495bSYour Name static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
7360*5113495bSYour Name 					   uint32_t num_desc)
7361*5113495bSYour Name {
7362*5113495bSYour Name 	uint8_t i, count;
7363*5113495bSYour Name 	struct dp_global_context *dp_global;
7364*5113495bSYour Name 
7365*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7366*5113495bSYour Name 
7367*5113495bSYour Name 	/* Allocate software Tx descriptor pools */
7368*5113495bSYour Name 
7369*5113495bSYour Name 	if (dp_global->tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7370*5113495bSYour Name 		for (i = 0; i < num_pool; i++) {
7371*5113495bSYour Name 			if (dp_tx_desc_pool_alloc(soc, i, num_desc, false)) {
7372*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7373*5113495bSYour Name 				  FL("Tx Desc Pool alloc %d failed %pK"),
7374*5113495bSYour Name 				      i, soc);
7375*5113495bSYour Name 			goto fail;
7376*5113495bSYour Name 			}
7377*5113495bSYour Name 		}
7378*5113495bSYour Name 	}
7379*5113495bSYour Name 	dp_global->tx_desc_pool_alloc_cnt[soc->arch_id]++;
7380*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7381*5113495bSYour Name 
7382*5113495bSYour Name fail:
7383*5113495bSYour Name 	for (count = 0; count < i; count++)
7384*5113495bSYour Name 		dp_tx_desc_pool_free(soc, count, false);
7385*5113495bSYour Name 	return QDF_STATUS_E_NOMEM;
7386*5113495bSYour Name }
7387*5113495bSYour Name 
7388*5113495bSYour Name static QDF_STATUS dp_tx_spcl_alloc_static_pools(struct dp_soc *soc,
7389*5113495bSYour Name 						int num_pool,
7390*5113495bSYour Name 						uint32_t num_spcl_desc)
7391*5113495bSYour Name {
7392*5113495bSYour Name 	uint8_t j, count;
7393*5113495bSYour Name 	struct dp_global_context *dp_global;
7394*5113495bSYour Name 
7395*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7396*5113495bSYour Name 
7397*5113495bSYour Name 	/* Allocate software Tx descriptor pools */
7398*5113495bSYour Name 	if (dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7399*5113495bSYour Name 		for (j = 0; j < num_pool; j++) {
7400*5113495bSYour Name 			if (dp_tx_desc_pool_alloc(soc, j, num_spcl_desc, true)) {
7401*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_DP,
7402*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
7403*5113495bSYour Name 					  FL("Tx special Desc Pool alloc %d failed %pK"),
7404*5113495bSYour Name 					      j, soc);
7405*5113495bSYour Name 				goto fail;
7406*5113495bSYour Name 			}
7407*5113495bSYour Name 		}
7408*5113495bSYour Name 	}
7409*5113495bSYour Name 	dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id]++;
7410*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7411*5113495bSYour Name 
7412*5113495bSYour Name fail:
7413*5113495bSYour Name 	for (count = 0; count < j; count++)
7414*5113495bSYour Name 		dp_tx_desc_pool_free(soc, count, true);
7415*5113495bSYour Name 	return QDF_STATUS_E_NOMEM;
7416*5113495bSYour Name }
7417*5113495bSYour Name 
7418*5113495bSYour Name static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
7419*5113495bSYour Name 					  uint32_t num_desc)
7420*5113495bSYour Name {
7421*5113495bSYour Name 	uint8_t i;
7422*5113495bSYour Name 	struct dp_global_context *dp_global;
7423*5113495bSYour Name 
7424*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7425*5113495bSYour Name 
7426*5113495bSYour Name 	if (dp_global->tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7427*5113495bSYour Name 		for (i = 0; i < num_pool; i++) {
7428*5113495bSYour Name 			if (dp_tx_desc_pool_init(soc, i, num_desc, false)) {
7429*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_DP,
7430*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
7431*5113495bSYour Name 					  FL("Tx Desc Pool init %d failed %pK"),
7432*5113495bSYour Name 					  i, soc);
7433*5113495bSYour Name 				return QDF_STATUS_E_NOMEM;
7434*5113495bSYour Name 			}
7435*5113495bSYour Name 		}
7436*5113495bSYour Name 	}
7437*5113495bSYour Name 	dp_global->tx_desc_pool_init_cnt[soc->arch_id]++;
7438*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7439*5113495bSYour Name }
7440*5113495bSYour Name 
7441*5113495bSYour Name static QDF_STATUS dp_tx_spcl_init_static_pools(struct dp_soc *soc, int num_pool,
7442*5113495bSYour Name 					       uint32_t num_spcl_desc)
7443*5113495bSYour Name {
7444*5113495bSYour Name 	uint8_t i;
7445*5113495bSYour Name 	struct dp_global_context *dp_global;
7446*5113495bSYour Name 
7447*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7448*5113495bSYour Name 
7449*5113495bSYour Name 	if (dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7450*5113495bSYour Name 		for (i = 0; i < num_pool; i++) {
7451*5113495bSYour Name 			if (dp_tx_desc_pool_init(soc, i, num_spcl_desc, true)) {
7452*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_DP,
7453*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
7454*5113495bSYour Name 					  FL("Tx special Desc Pool init %d failed %pK"),
7455*5113495bSYour Name 					  i, soc);
7456*5113495bSYour Name 				return QDF_STATUS_E_NOMEM;
7457*5113495bSYour Name 			}
7458*5113495bSYour Name 		}
7459*5113495bSYour Name 	}
7460*5113495bSYour Name 	dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id]++;
7461*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7462*5113495bSYour Name }
7463*5113495bSYour Name 
7464*5113495bSYour Name static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
7465*5113495bSYour Name {
7466*5113495bSYour Name 	uint8_t i;
7467*5113495bSYour Name 	struct dp_global_context *dp_global;
7468*5113495bSYour Name 
7469*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7470*5113495bSYour Name 
7471*5113495bSYour Name 	dp_global->tx_desc_pool_init_cnt[soc->arch_id]--;
7472*5113495bSYour Name 	if (dp_global->tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7473*5113495bSYour Name 		for (i = 0; i < num_pool; i++)
7474*5113495bSYour Name 			dp_tx_desc_pool_deinit(soc, i, false);
7475*5113495bSYour Name 	}
7476*5113495bSYour Name }
7477*5113495bSYour Name 
7478*5113495bSYour Name static void dp_tx_spcl_deinit_static_pools(struct dp_soc *soc, int num_pool)
7479*5113495bSYour Name {
7480*5113495bSYour Name 	uint8_t i;
7481*5113495bSYour Name 	struct dp_global_context *dp_global;
7482*5113495bSYour Name 
7483*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7484*5113495bSYour Name 
7485*5113495bSYour Name 	dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id]--;
7486*5113495bSYour Name 	if (dp_global->spcl_tx_desc_pool_init_cnt[soc->arch_id] == 0) {
7487*5113495bSYour Name 		for (i = 0; i < num_pool; i++)
7488*5113495bSYour Name 			dp_tx_desc_pool_deinit(soc, i, true);
7489*5113495bSYour Name 	}
7490*5113495bSYour Name }
7491*5113495bSYour Name 
7492*5113495bSYour Name static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
7493*5113495bSYour Name {
7494*5113495bSYour Name 	uint8_t i;
7495*5113495bSYour Name 	struct dp_global_context *dp_global;
7496*5113495bSYour Name 
7497*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7498*5113495bSYour Name 
7499*5113495bSYour Name 	dp_global->tx_desc_pool_alloc_cnt[soc->arch_id]--;
7500*5113495bSYour Name 	if (dp_global->tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7501*5113495bSYour Name 		for (i = 0; i < num_pool; i++)
7502*5113495bSYour Name 			dp_tx_desc_pool_free(soc, i, false);
7503*5113495bSYour Name 	}
7504*5113495bSYour Name }
7505*5113495bSYour Name 
7506*5113495bSYour Name static void dp_tx_spcl_delete_static_pools(struct dp_soc *soc, int num_pool)
7507*5113495bSYour Name {
7508*5113495bSYour Name 	uint8_t i;
7509*5113495bSYour Name 	struct dp_global_context *dp_global;
7510*5113495bSYour Name 
7511*5113495bSYour Name 	dp_global = wlan_objmgr_get_global_ctx();
7512*5113495bSYour Name 
7513*5113495bSYour Name 	dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id]--;
7514*5113495bSYour Name 	if (dp_global->spcl_tx_desc_pool_alloc_cnt[soc->arch_id] == 0) {
7515*5113495bSYour Name 		for (i = 0; i < num_pool; i++)
7516*5113495bSYour Name 			dp_tx_desc_pool_free(soc, i, true);
7517*5113495bSYour Name 	}
7518*5113495bSYour Name }
7519*5113495bSYour Name #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
7520*5113495bSYour Name 
7521*5113495bSYour Name /**
7522*5113495bSYour Name  * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
7523*5113495bSYour Name  * @soc: core txrx main context
7524*5113495bSYour Name  * @num_pool: number of pools
7525*5113495bSYour Name  *
7526*5113495bSYour Name  */
7527*5113495bSYour Name static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
7528*5113495bSYour Name {
7529*5113495bSYour Name 	dp_tx_tso_desc_pool_deinit(soc, num_pool);
7530*5113495bSYour Name 	dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
7531*5113495bSYour Name }
7532*5113495bSYour Name 
7533*5113495bSYour Name /**
7534*5113495bSYour Name  * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
7535*5113495bSYour Name  * @soc: core txrx main context
7536*5113495bSYour Name  * @num_pool: number of pools
7537*5113495bSYour Name  *
7538*5113495bSYour Name  */
7539*5113495bSYour Name static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
7540*5113495bSYour Name {
7541*5113495bSYour Name 	dp_tx_tso_desc_pool_free(soc, num_pool);
7542*5113495bSYour Name 	dp_tx_tso_num_seg_pool_free(soc, num_pool);
7543*5113495bSYour Name }
7544*5113495bSYour Name 
7545*5113495bSYour Name #ifndef WLAN_SOFTUMAC_SUPPORT
7546*5113495bSYour Name void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
7547*5113495bSYour Name {
7548*5113495bSYour Name 	uint8_t num_pool, num_ext_pool;
7549*5113495bSYour Name 
7550*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7551*5113495bSYour Name 		return;
7552*5113495bSYour Name 
7553*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7554*5113495bSYour Name 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7555*5113495bSYour Name 
7556*5113495bSYour Name 	dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
7557*5113495bSYour Name 	dp_tx_ext_desc_pool_free(soc, num_ext_pool);
7558*5113495bSYour Name 	dp_tx_delete_static_pools(soc, num_pool);
7559*5113495bSYour Name 	dp_tx_spcl_delete_static_pools(soc, num_pool);
7560*5113495bSYour Name }
7561*5113495bSYour Name 
7562*5113495bSYour Name void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
7563*5113495bSYour Name {
7564*5113495bSYour Name 	uint8_t num_pool, num_ext_pool;
7565*5113495bSYour Name 
7566*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7567*5113495bSYour Name 		return;
7568*5113495bSYour Name 
7569*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7570*5113495bSYour Name 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7571*5113495bSYour Name 
7572*5113495bSYour Name 	dp_tx_flow_control_deinit(soc);
7573*5113495bSYour Name 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
7574*5113495bSYour Name 	dp_tx_ext_desc_pool_deinit(soc, num_ext_pool);
7575*5113495bSYour Name 	dp_tx_deinit_static_pools(soc, num_pool);
7576*5113495bSYour Name 	dp_tx_spcl_deinit_static_pools(soc, num_pool);
7577*5113495bSYour Name }
7578*5113495bSYour Name #else
7579*5113495bSYour Name void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
7580*5113495bSYour Name {
7581*5113495bSYour Name 	uint8_t num_pool;
7582*5113495bSYour Name 
7583*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7584*5113495bSYour Name 		return;
7585*5113495bSYour Name 
7586*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7587*5113495bSYour Name 
7588*5113495bSYour Name 	dp_tx_delete_static_pools(soc, num_pool);
7589*5113495bSYour Name 	dp_tx_spcl_delete_static_pools(soc, num_pool);
7590*5113495bSYour Name }
7591*5113495bSYour Name 
7592*5113495bSYour Name void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
7593*5113495bSYour Name {
7594*5113495bSYour Name 	uint8_t num_pool;
7595*5113495bSYour Name 
7596*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7597*5113495bSYour Name 		return;
7598*5113495bSYour Name 
7599*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7600*5113495bSYour Name 
7601*5113495bSYour Name 	dp_tx_flow_control_deinit(soc);
7602*5113495bSYour Name 	dp_tx_deinit_static_pools(soc, num_pool);
7603*5113495bSYour Name 	dp_tx_spcl_deinit_static_pools(soc, num_pool);
7604*5113495bSYour Name }
7605*5113495bSYour Name #endif /*WLAN_SOFTUMAC_SUPPORT*/
7606*5113495bSYour Name 
7607*5113495bSYour Name /**
7608*5113495bSYour Name  * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
7609*5113495bSYour Name  * @soc: DP soc handle
7610*5113495bSYour Name  * @num_pool: Number of pools
7611*5113495bSYour Name  * @num_desc: Number of descriptors
7612*5113495bSYour Name  *
7613*5113495bSYour Name  * Reserve TSO descriptor buffers
7614*5113495bSYour Name  *
7615*5113495bSYour Name  * Return: QDF_STATUS_E_FAILURE on failure or
7616*5113495bSYour Name  *         QDF_STATUS_SUCCESS on success
7617*5113495bSYour Name  */
7618*5113495bSYour Name static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
7619*5113495bSYour Name 						uint8_t num_pool,
7620*5113495bSYour Name 						uint32_t num_desc)
7621*5113495bSYour Name {
7622*5113495bSYour Name 	if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
7623*5113495bSYour Name 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
7624*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
7625*5113495bSYour Name 	}
7626*5113495bSYour Name 
7627*5113495bSYour Name 	if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
7628*5113495bSYour Name 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
7629*5113495bSYour Name 		       num_pool, soc);
7630*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
7631*5113495bSYour Name 	}
7632*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7633*5113495bSYour Name }
7634*5113495bSYour Name 
7635*5113495bSYour Name /**
7636*5113495bSYour Name  * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
7637*5113495bSYour Name  * @soc: DP soc handle
7638*5113495bSYour Name  * @num_pool: Number of pools
7639*5113495bSYour Name  * @num_desc: Number of descriptors
7640*5113495bSYour Name  *
7641*5113495bSYour Name  * Initialize TSO descriptor pools
7642*5113495bSYour Name  *
7643*5113495bSYour Name  * Return: QDF_STATUS_E_FAILURE on failure or
7644*5113495bSYour Name  *         QDF_STATUS_SUCCESS on success
7645*5113495bSYour Name  */
7646*5113495bSYour Name 
7647*5113495bSYour Name static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
7648*5113495bSYour Name 					       uint8_t num_pool,
7649*5113495bSYour Name 					       uint32_t num_desc)
7650*5113495bSYour Name {
7651*5113495bSYour Name 	if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
7652*5113495bSYour Name 		dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
7653*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
7654*5113495bSYour Name 	}
7655*5113495bSYour Name 
7656*5113495bSYour Name 	if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
7657*5113495bSYour Name 		dp_err("TSO Num of seg Pool alloc %d failed %pK",
7658*5113495bSYour Name 		       num_pool, soc);
7659*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
7660*5113495bSYour Name 	}
7661*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7662*5113495bSYour Name }
7663*5113495bSYour Name 
7664*5113495bSYour Name #ifndef WLAN_SOFTUMAC_SUPPORT
7665*5113495bSYour Name QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
7666*5113495bSYour Name {
7667*5113495bSYour Name 	uint8_t num_pool, num_ext_pool;
7668*5113495bSYour Name 	uint32_t num_desc;
7669*5113495bSYour Name 	uint32_t num_spcl_desc;
7670*5113495bSYour Name 	uint32_t num_ext_desc;
7671*5113495bSYour Name 
7672*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7673*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
7674*5113495bSYour Name 
7675*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7676*5113495bSYour Name 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7677*5113495bSYour Name 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7678*5113495bSYour Name 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7679*5113495bSYour Name 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
7680*5113495bSYour Name 
7681*5113495bSYour Name 	dp_info("Tx Desc Alloc num_pool: %d descs: %d", num_pool, num_desc);
7682*5113495bSYour Name 
7683*5113495bSYour Name 	if ((num_pool > MAX_TXDESC_POOLS) ||
7684*5113495bSYour Name 	    (num_ext_pool > MAX_TXDESC_POOLS) ||
7685*5113495bSYour Name 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX) ||
7686*5113495bSYour Name 	    (num_spcl_desc > WLAN_CFG_NUM_TX_SPL_DESC_MAX))
7687*5113495bSYour Name 		goto fail1;
7688*5113495bSYour Name 
7689*5113495bSYour Name 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
7690*5113495bSYour Name 		goto fail1;
7691*5113495bSYour Name 
7692*5113495bSYour Name 	if (dp_tx_spcl_alloc_static_pools(soc, num_pool, num_spcl_desc))
7693*5113495bSYour Name 		goto fail2;
7694*5113495bSYour Name 
7695*5113495bSYour Name 	if (dp_tx_ext_desc_pool_alloc(soc, num_ext_pool, num_ext_desc))
7696*5113495bSYour Name 		goto fail3;
7697*5113495bSYour Name 
7698*5113495bSYour Name 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
7699*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
7700*5113495bSYour Name 
7701*5113495bSYour Name 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_ext_pool, num_ext_desc))
7702*5113495bSYour Name 		goto fail4;
7703*5113495bSYour Name 
7704*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7705*5113495bSYour Name 
7706*5113495bSYour Name fail4:
7707*5113495bSYour Name 	dp_tx_ext_desc_pool_free(soc, num_ext_pool);
7708*5113495bSYour Name fail3:
7709*5113495bSYour Name 	dp_tx_spcl_delete_static_pools(soc, num_pool);
7710*5113495bSYour Name fail2:
7711*5113495bSYour Name 	dp_tx_delete_static_pools(soc, num_pool);
7712*5113495bSYour Name fail1:
7713*5113495bSYour Name 	return QDF_STATUS_E_RESOURCES;
7714*5113495bSYour Name }
7715*5113495bSYour Name 
7716*5113495bSYour Name QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
7717*5113495bSYour Name {
7718*5113495bSYour Name 	uint8_t num_pool, num_ext_pool;
7719*5113495bSYour Name 	uint32_t num_desc;
7720*5113495bSYour Name 	uint32_t num_spcl_desc;
7721*5113495bSYour Name 	uint32_t num_ext_desc;
7722*5113495bSYour Name 
7723*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7724*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
7725*5113495bSYour Name 
7726*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7727*5113495bSYour Name 	num_ext_pool = dp_get_ext_tx_desc_pool_num(soc);
7728*5113495bSYour Name 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7729*5113495bSYour Name 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7730*5113495bSYour Name 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
7731*5113495bSYour Name 
7732*5113495bSYour Name 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
7733*5113495bSYour Name 		goto fail1;
7734*5113495bSYour Name 
7735*5113495bSYour Name 	if (dp_tx_spcl_init_static_pools(soc, num_pool, num_spcl_desc))
7736*5113495bSYour Name 		goto fail2;
7737*5113495bSYour Name 
7738*5113495bSYour Name 	if (dp_tx_ext_desc_pool_init(soc, num_ext_pool, num_ext_desc))
7739*5113495bSYour Name 		goto fail3;
7740*5113495bSYour Name 
7741*5113495bSYour Name 	if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
7742*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
7743*5113495bSYour Name 
7744*5113495bSYour Name 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_ext_pool, num_ext_desc))
7745*5113495bSYour Name 		goto fail4;
7746*5113495bSYour Name 
7747*5113495bSYour Name 	dp_tx_flow_control_init(soc);
7748*5113495bSYour Name 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
7749*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7750*5113495bSYour Name 
7751*5113495bSYour Name fail4:
7752*5113495bSYour Name 	dp_tx_ext_desc_pool_deinit(soc, num_ext_pool);
7753*5113495bSYour Name fail3:
7754*5113495bSYour Name 	dp_tx_spcl_deinit_static_pools(soc, num_pool);
7755*5113495bSYour Name fail2:
7756*5113495bSYour Name 	dp_tx_deinit_static_pools(soc, num_pool);
7757*5113495bSYour Name fail1:
7758*5113495bSYour Name 	return QDF_STATUS_E_RESOURCES;
7759*5113495bSYour Name }
7760*5113495bSYour Name 
7761*5113495bSYour Name #else
7762*5113495bSYour Name QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
7763*5113495bSYour Name {
7764*5113495bSYour Name 	uint8_t num_pool;
7765*5113495bSYour Name 	uint32_t num_desc;
7766*5113495bSYour Name 	uint32_t num_spcl_desc;
7767*5113495bSYour Name 
7768*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7769*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
7770*5113495bSYour Name 
7771*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7772*5113495bSYour Name 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7773*5113495bSYour Name 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7774*5113495bSYour Name 
7775*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7776*5113495bSYour Name 		  "%s Tx Desc Alloc num_pool = %d, descs = %d",
7777*5113495bSYour Name 		  __func__, num_pool, num_desc);
7778*5113495bSYour Name 
7779*5113495bSYour Name 	if ((num_pool > MAX_TXDESC_POOLS) ||
7780*5113495bSYour Name 	    (num_desc > WLAN_CFG_NUM_TX_DESC_MAX) ||
7781*5113495bSYour Name 	    (num_spcl_desc > WLAN_CFG_NUM_TX_SPL_DESC_MAX))
7782*5113495bSYour Name 		goto fail1;
7783*5113495bSYour Name 
7784*5113495bSYour Name 	if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
7785*5113495bSYour Name 		goto fail1;
7786*5113495bSYour Name 
7787*5113495bSYour Name 	if (dp_tx_spcl_alloc_static_pools(soc, num_pool, num_spcl_desc))
7788*5113495bSYour Name 		goto fail2;
7789*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7790*5113495bSYour Name 
7791*5113495bSYour Name fail2:
7792*5113495bSYour Name 	dp_tx_delete_static_pools(soc, num_pool);
7793*5113495bSYour Name fail1:
7794*5113495bSYour Name 	return QDF_STATUS_E_RESOURCES;
7795*5113495bSYour Name }
7796*5113495bSYour Name 
7797*5113495bSYour Name QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
7798*5113495bSYour Name {
7799*5113495bSYour Name 	uint8_t num_pool;
7800*5113495bSYour Name 	uint32_t num_desc;
7801*5113495bSYour Name 	uint32_t num_spcl_desc;
7802*5113495bSYour Name 
7803*5113495bSYour Name 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7804*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
7805*5113495bSYour Name 
7806*5113495bSYour Name 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
7807*5113495bSYour Name 	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
7808*5113495bSYour Name 	num_spcl_desc = wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
7809*5113495bSYour Name 
7810*5113495bSYour Name 	if (dp_tx_init_static_pools(soc, num_pool, num_desc))
7811*5113495bSYour Name 		goto fail;
7812*5113495bSYour Name 
7813*5113495bSYour Name 	if (dp_tx_spcl_init_static_pools(soc, num_pool, num_spcl_desc))
7814*5113495bSYour Name 		goto fail1;
7815*5113495bSYour Name 
7816*5113495bSYour Name 	dp_tx_flow_control_init(soc);
7817*5113495bSYour Name 	soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
7818*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7819*5113495bSYour Name fail1:
7820*5113495bSYour Name 	dp_tx_deinit_static_pools(soc, num_pool);
7821*5113495bSYour Name fail:
7822*5113495bSYour Name 	return QDF_STATUS_E_RESOURCES;
7823*5113495bSYour Name }
7824*5113495bSYour Name #endif
7825*5113495bSYour Name 
7826*5113495bSYour Name QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
7827*5113495bSYour Name {
7828*5113495bSYour Name 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7829*5113495bSYour Name 	uint8_t num_ext_desc_pool;
7830*5113495bSYour Name 	uint32_t num_ext_desc;
7831*5113495bSYour Name 
7832*5113495bSYour Name 	num_ext_desc_pool = dp_get_ext_tx_desc_pool_num(soc);
7833*5113495bSYour Name 	num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
7834*5113495bSYour Name 
7835*5113495bSYour Name 	if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_ext_desc_pool, num_ext_desc))
7836*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
7837*5113495bSYour Name 
7838*5113495bSYour Name 	if (dp_tx_tso_cmn_desc_pool_init(soc, num_ext_desc_pool, num_ext_desc))
7839*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
7840*5113495bSYour Name 
7841*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7842*5113495bSYour Name }
7843*5113495bSYour Name 
7844*5113495bSYour Name QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
7845*5113495bSYour Name {
7846*5113495bSYour Name 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7847*5113495bSYour Name 	uint8_t num_ext_desc_pool = dp_get_ext_tx_desc_pool_num(soc);
7848*5113495bSYour Name 
7849*5113495bSYour Name 	dp_tx_tso_cmn_desc_pool_deinit(soc, num_ext_desc_pool);
7850*5113495bSYour Name 	dp_tx_tso_cmn_desc_pool_free(soc, num_ext_desc_pool);
7851*5113495bSYour Name 
7852*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
7853*5113495bSYour Name }
7854*5113495bSYour Name 
7855*5113495bSYour Name #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
7856*5113495bSYour Name void dp_pkt_add_timestamp(struct dp_vdev *vdev,
7857*5113495bSYour Name 			  enum qdf_pkt_timestamp_index index, uint64_t time,
7858*5113495bSYour Name 			  qdf_nbuf_t nbuf)
7859*5113495bSYour Name {
7860*5113495bSYour Name 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
7861*5113495bSYour Name 		uint64_t tsf_time;
7862*5113495bSYour Name 
7863*5113495bSYour Name 		if (vdev->get_tsf_time) {
7864*5113495bSYour Name 			vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
7865*5113495bSYour Name 			qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
7866*5113495bSYour Name 		}
7867*5113495bSYour Name 	}
7868*5113495bSYour Name }
7869*5113495bSYour Name 
7870*5113495bSYour Name void dp_pkt_get_timestamp(uint64_t *time)
7871*5113495bSYour Name {
7872*5113495bSYour Name 	if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
7873*5113495bSYour Name 		*time = qdf_get_log_timestamp();
7874*5113495bSYour Name }
7875*5113495bSYour Name #endif
7876*5113495bSYour Name 
7877*5113495bSYour Name #ifdef QCA_MULTIPASS_SUPPORT
7878*5113495bSYour Name void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
7879*5113495bSYour Name 				 struct dp_tx_msdu_info_s *msdu_info,
7880*5113495bSYour Name 				 uint16_t group_key)
7881*5113495bSYour Name {
7882*5113495bSYour Name 	struct htt_tx_msdu_desc_ext2_t *meta_data =
7883*5113495bSYour Name 		(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
7884*5113495bSYour Name 
7885*5113495bSYour Name 	qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
7886*5113495bSYour Name 
7887*5113495bSYour Name 	/*
7888*5113495bSYour Name 	 * When attempting to send a multicast packet with multi-passphrase,
7889*5113495bSYour Name 	 * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
7890*5113495bSYour Name 	 * ref htt.h indicating the group_id field in "key_flags" also having
7891*5113495bSYour Name 	 * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
7892*5113495bSYour Name 	 */
7893*5113495bSYour Name 	HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0],
7894*5113495bSYour Name 						       1);
7895*5113495bSYour Name 	HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
7896*5113495bSYour Name }
7897*5113495bSYour Name 
7898*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
7899*5113495bSYour Name 	defined(WLAN_MCAST_MLO)
7900*5113495bSYour Name /**
7901*5113495bSYour Name  * dp_tx_need_mcast_reinject() - If frame needs to be processed in reinject path
7902*5113495bSYour Name  * @vdev: DP vdev handle
7903*5113495bSYour Name  *
7904*5113495bSYour Name  * Return: true if reinject handling is required else false
7905*5113495bSYour Name  */
7906*5113495bSYour Name static inline bool
7907*5113495bSYour Name dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
7908*5113495bSYour Name {
7909*5113495bSYour Name 	if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
7910*5113495bSYour Name 		return true;
7911*5113495bSYour Name 
7912*5113495bSYour Name 	return false;
7913*5113495bSYour Name }
7914*5113495bSYour Name #else
7915*5113495bSYour Name static inline bool
7916*5113495bSYour Name dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
7917*5113495bSYour Name {
7918*5113495bSYour Name 	return false;
7919*5113495bSYour Name }
7920*5113495bSYour Name #endif
7921*5113495bSYour Name 
7922*5113495bSYour Name /**
7923*5113495bSYour Name  * dp_tx_need_multipass_process() - If frame needs multipass phrase processing
7924*5113495bSYour Name  * @soc: dp soc handle
7925*5113495bSYour Name  * @vdev: DP vdev handle
7926*5113495bSYour Name  * @buf: frame
7927*5113495bSYour Name  * @vlan_id: vlan id of frame
7928*5113495bSYour Name  *
7929*5113495bSYour Name  * Return: whether peer is special or classic
7930*5113495bSYour Name  */
7931*5113495bSYour Name static
7932*5113495bSYour Name uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
7933*5113495bSYour Name 				     qdf_nbuf_t buf, uint16_t *vlan_id)
7934*5113495bSYour Name {
7935*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
7936*5113495bSYour Name 	struct dp_peer *peer = NULL;
7937*5113495bSYour Name 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
7938*5113495bSYour Name 	struct vlan_ethhdr *veh = NULL;
7939*5113495bSYour Name 	bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
7940*5113495bSYour Name 			(htons(eh->ether_type) != ETH_P_8021Q));
7941*5113495bSYour Name 	struct cdp_peer_info peer_info = { 0 };
7942*5113495bSYour Name 
7943*5113495bSYour Name 	if (qdf_unlikely(not_vlan))
7944*5113495bSYour Name 		return DP_VLAN_UNTAGGED;
7945*5113495bSYour Name 
7946*5113495bSYour Name 	veh = (struct vlan_ethhdr *)eh;
7947*5113495bSYour Name 	*vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
7948*5113495bSYour Name 
7949*5113495bSYour Name 	if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
7950*5113495bSYour Name 		/* look for handling of multicast packets in reinject path */
7951*5113495bSYour Name 		if (dp_tx_need_mcast_reinject(vdev))
7952*5113495bSYour Name 			return DP_VLAN_UNTAGGED;
7953*5113495bSYour Name 
7954*5113495bSYour Name 		qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
7955*5113495bSYour Name 		TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
7956*5113495bSYour Name 			      mpass_peer_list_elem) {
7957*5113495bSYour Name 			if (*vlan_id == txrx_peer->vlan_id) {
7958*5113495bSYour Name 				qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
7959*5113495bSYour Name 				return DP_VLAN_TAGGED_MULTICAST;
7960*5113495bSYour Name 			}
7961*5113495bSYour Name 		}
7962*5113495bSYour Name 		qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
7963*5113495bSYour Name 		return DP_VLAN_UNTAGGED;
7964*5113495bSYour Name 	}
7965*5113495bSYour Name 
7966*5113495bSYour Name 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, eh->ether_dhost,
7967*5113495bSYour Name 				 false, CDP_WILD_PEER_TYPE);
7968*5113495bSYour Name 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
7969*5113495bSYour Name 					 DP_MOD_ID_TX_MULTIPASS);
7970*5113495bSYour Name 	if (qdf_unlikely(!peer))
7971*5113495bSYour Name 		return DP_VLAN_UNTAGGED;
7972*5113495bSYour Name 
7973*5113495bSYour Name 	/*
7974*5113495bSYour Name 	 * Do not drop the frame when vlan_id doesn't match.
7975*5113495bSYour Name 	 * Send the frame as it is.
7976*5113495bSYour Name 	 */
7977*5113495bSYour Name 	if (*vlan_id == peer->txrx_peer->vlan_id) {
7978*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
7979*5113495bSYour Name 		return DP_VLAN_TAGGED_UNICAST;
7980*5113495bSYour Name 	}
7981*5113495bSYour Name 
7982*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
7983*5113495bSYour Name 	return DP_VLAN_UNTAGGED;
7984*5113495bSYour Name }
7985*5113495bSYour Name 
7986*5113495bSYour Name #ifndef WLAN_REPEATER_NOT_SUPPORTED
7987*5113495bSYour Name static inline void
7988*5113495bSYour Name dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
7989*5113495bSYour Name 				     qdf_nbuf_t nbuf,
7990*5113495bSYour Name 				     struct dp_tx_msdu_info_s *msdu_info)
7991*5113495bSYour Name {
7992*5113495bSYour Name 	qdf_nbuf_t nbuf_copy = NULL;
7993*5113495bSYour Name 
7994*5113495bSYour Name 	/* AP can have classic clients, special clients &
7995*5113495bSYour Name 	 * classic repeaters.
7996*5113495bSYour Name 	 * 1. Classic clients & special client:
7997*5113495bSYour Name 	 *	Remove vlan header, find corresponding group key
7998*5113495bSYour Name 	 *	index, fill in metaheader and enqueue multicast
7999*5113495bSYour Name 	 *	frame to TCL.
8000*5113495bSYour Name 	 * 2. Classic repeater:
8001*5113495bSYour Name 	 *	Pass through to classic repeater with vlan tag
8002*5113495bSYour Name 	 *	intact without any group key index. Hardware
8003*5113495bSYour Name 	 *	will know which key to use to send frame to
8004*5113495bSYour Name 	 *	repeater.
8005*5113495bSYour Name 	 */
8006*5113495bSYour Name 	nbuf_copy = qdf_nbuf_copy(nbuf);
8007*5113495bSYour Name 
8008*5113495bSYour Name 	/*
8009*5113495bSYour Name 	 * Send multicast frame to special peers even
8010*5113495bSYour Name 	 * if pass through to classic repeater fails.
8011*5113495bSYour Name 	 */
8012*5113495bSYour Name 	if (nbuf_copy) {
8013*5113495bSYour Name 		struct dp_tx_msdu_info_s msdu_info_copy;
8014*5113495bSYour Name 
8015*5113495bSYour Name 		qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
8016*5113495bSYour Name 		msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
8017*5113495bSYour Name 		msdu_info_copy.xmit_type =
8018*5113495bSYour Name 			qdf_nbuf_get_vdev_xmit_type(nbuf);
8019*5113495bSYour Name 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
8020*5113495bSYour Name 		nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
8021*5113495bSYour Name 						   &msdu_info_copy,
8022*5113495bSYour Name 						   HTT_INVALID_PEER, NULL);
8023*5113495bSYour Name 		if (nbuf_copy) {
8024*5113495bSYour Name 			qdf_nbuf_free(nbuf_copy);
8025*5113495bSYour Name 			dp_info_rl("nbuf_copy send failed");
8026*5113495bSYour Name 		}
8027*5113495bSYour Name 	}
8028*5113495bSYour Name }
8029*5113495bSYour Name #else
8030*5113495bSYour Name static inline void
8031*5113495bSYour Name dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
8032*5113495bSYour Name 				     qdf_nbuf_t nbuf,
8033*5113495bSYour Name 				     struct dp_tx_msdu_info_s *msdu_info)
8034*5113495bSYour Name {
8035*5113495bSYour Name }
8036*5113495bSYour Name #endif
8037*5113495bSYour Name 
8038*5113495bSYour Name bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
8039*5113495bSYour Name 			     qdf_nbuf_t nbuf,
8040*5113495bSYour Name 			     struct dp_tx_msdu_info_s *msdu_info)
8041*5113495bSYour Name {
8042*5113495bSYour Name 	uint16_t vlan_id = 0;
8043*5113495bSYour Name 	uint16_t group_key = 0;
8044*5113495bSYour Name 	uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
8045*5113495bSYour Name 
8046*5113495bSYour Name 	if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0]))
8047*5113495bSYour Name 		return true;
8048*5113495bSYour Name 
8049*5113495bSYour Name 	is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
8050*5113495bSYour Name 
8051*5113495bSYour Name 	if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
8052*5113495bSYour Name 	    (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
8053*5113495bSYour Name 		return true;
8054*5113495bSYour Name 
8055*5113495bSYour Name 	if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
8056*5113495bSYour Name 		dp_tx_remove_vlan_tag(vdev, nbuf);
8057*5113495bSYour Name 		return true;
8058*5113495bSYour Name 	}
8059*5113495bSYour Name 
8060*5113495bSYour Name 	dp_tx_multipass_send_pkt_to_repeater(soc, vdev, nbuf, msdu_info);
8061*5113495bSYour Name 	group_key = vdev->iv_vlan_map[vlan_id];
8062*5113495bSYour Name 
8063*5113495bSYour Name 	/*
8064*5113495bSYour Name 	 * If group key is not installed, drop the frame.
8065*5113495bSYour Name 	 */
8066*5113495bSYour Name 	if (!group_key)
8067*5113495bSYour Name 		return false;
8068*5113495bSYour Name 
8069*5113495bSYour Name 	dp_tx_remove_vlan_tag(vdev, nbuf);
8070*5113495bSYour Name 	dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
8071*5113495bSYour Name 	msdu_info->exception_fw = 1;
8072*5113495bSYour Name 	return true;
8073*5113495bSYour Name }
8074*5113495bSYour Name #endif /* QCA_MULTIPASS_SUPPORT */
8075