xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_tx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "cdp_txrx_cmn_struct.h"
21*5113495bSYour Name #include "dp_types.h"
22*5113495bSYour Name #include "dp_tx.h"
23*5113495bSYour Name #include "dp_be_tx.h"
24*5113495bSYour Name #include "dp_tx_desc.h"
25*5113495bSYour Name #include "hal_tx.h"
26*5113495bSYour Name #include <hal_be_api.h>
27*5113495bSYour Name #include <hal_be_tx.h>
28*5113495bSYour Name #include <dp_htt.h>
29*5113495bSYour Name #include "dp_internal.h"
30*5113495bSYour Name #ifdef FEATURE_WDS
31*5113495bSYour Name #include "dp_txrx_wds.h"
32*5113495bSYour Name #endif
33*5113495bSYour Name 
34*5113495bSYour Name #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
35*5113495bSYour Name #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
36*5113495bSYour Name #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
37*5113495bSYour Name #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
38*5113495bSYour Name #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
39*5113495bSYour Name #else
40*5113495bSYour Name #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
41*5113495bSYour Name #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
42*5113495bSYour Name #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
43*5113495bSYour Name #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
44*5113495bSYour Name #endif
45*5113495bSYour Name 
46*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
47*5113495bSYour Name #ifdef WLAN_MCAST_MLO
48*5113495bSYour Name /* MLO peer id for reinject*/
49*5113495bSYour Name #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
50*5113495bSYour Name #define MAX_GSN_NUM 0x0FFF
51*5113495bSYour Name 
52*5113495bSYour Name #ifdef QCA_MULTIPASS_SUPPORT
53*5113495bSYour Name #define INVALID_VLAN_ID         0xFFFF
54*5113495bSYour Name #define MULTIPASS_WITH_VLAN_ID 0xFFFE
55*5113495bSYour Name /**
56*5113495bSYour Name  * struct dp_mlo_mpass_buf - Multipass buffer
57*5113495bSYour Name  * @vlan_id: vlan_id of frame
58*5113495bSYour Name  * @nbuf: pointer to skb buf
59*5113495bSYour Name  */
60*5113495bSYour Name struct dp_mlo_mpass_buf {
61*5113495bSYour Name 	uint16_t vlan_id;
62*5113495bSYour Name 	qdf_nbuf_t  nbuf;
63*5113495bSYour Name };
64*5113495bSYour Name #endif
65*5113495bSYour Name #endif
66*5113495bSYour Name #endif
67*5113495bSYour Name 
68*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
69*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
70*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
71*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
72*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
73*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
74*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
75*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
76*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
77*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
78*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
79*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
80*5113495bSYour Name #define DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(_var) \
81*5113495bSYour Name 	HTT_TX_WBM_COMPLETION_V2_TRANSMIT_CNT_VALID_GET(_var)
82*5113495bSYour Name 
83*5113495bSYour Name extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
84*5113495bSYour Name 
85*5113495bSYour Name #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
86*5113495bSYour Name /*
87*5113495bSYour Name  * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
88*5113495bSYour Name  * of WBM2SW ring Desc.
89*5113495bSYour Name  */
90*5113495bSYour Name #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
91*5113495bSYour Name 
92*5113495bSYour Name /**
93*5113495bSYour Name  * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
94*5113495bSYour Name  *					    invalidate it after each reaping
95*5113495bSYour Name  * @tx_comp_hal_desc: ring desc virtual address
96*5113495bSYour Name  * @r_tx_desc: pointer to current dp TX Desc pointer
97*5113495bSYour Name  * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
98*5113495bSYour Name  * @hw_cc_done: HW cookie conversion done or not
99*5113495bSYour Name  *
100*5113495bSYour Name  * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
101*5113495bSYour Name  * ring Desc is stale or not. if HW CC is not done, then compare PA between
102*5113495bSYour Name  * ring Desc and current TX desc.
103*5113495bSYour Name  *
104*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS for success,
105*5113495bSYour Name  *	   QDF_STATUS_E_PENDING for stale entry,
106*5113495bSYour Name  *	   QDF_STATUS_E_INVAL for invalid entry.
107*5113495bSYour Name  */
108*5113495bSYour Name static inline
dp_tx_comp_desc_check_and_invalidate(void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc,uint64_t tx_desc_va,bool hw_cc_done)109*5113495bSYour Name QDF_STATUS dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
110*5113495bSYour Name 						struct dp_tx_desc_s **r_tx_desc,
111*5113495bSYour Name 						uint64_t tx_desc_va,
112*5113495bSYour Name 						bool hw_cc_done)
113*5113495bSYour Name {
114*5113495bSYour Name 	qdf_dma_addr_t desc_dma_addr;
115*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
116*5113495bSYour Name 
117*5113495bSYour Name 	if (qdf_likely(hw_cc_done)) {
118*5113495bSYour Name 		/* Check upper 32 bits */
119*5113495bSYour Name 		if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
120*5113495bSYour Name 		    (tx_desc_va >> 32)) {
121*5113495bSYour Name 			*r_tx_desc = NULL;
122*5113495bSYour Name 			status = QDF_STATUS_E_PENDING;
123*5113495bSYour Name 		} else
124*5113495bSYour Name 			/* Invalidate the ring desc for 32 ~ 63 bits of VA */
125*5113495bSYour Name 			hal_tx_comp_set_desc_va_63_32(
126*5113495bSYour Name 				tx_comp_hal_desc,
127*5113495bSYour Name 				DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
128*5113495bSYour Name 	} else {
129*5113495bSYour Name 		/* Compare PA between ring desc and current TX desc stored */
130*5113495bSYour Name 		desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
131*5113495bSYour Name 
132*5113495bSYour Name 		if (desc_dma_addr != (*r_tx_desc)->dma_addr) {
133*5113495bSYour Name 			*r_tx_desc = NULL;
134*5113495bSYour Name 			status = QDF_STATUS_E_INVAL;
135*5113495bSYour Name 		}
136*5113495bSYour Name 	}
137*5113495bSYour Name 
138*5113495bSYour Name 	return status;
139*5113495bSYour Name }
140*5113495bSYour Name #else
141*5113495bSYour Name static inline
dp_tx_comp_desc_check_and_invalidate(void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc,uint64_t tx_desc_va,bool hw_cc_done)142*5113495bSYour Name QDF_STATUS dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
143*5113495bSYour Name 						struct dp_tx_desc_s **r_tx_desc,
144*5113495bSYour Name 						uint64_t tx_desc_va,
145*5113495bSYour Name 						bool hw_cc_done)
146*5113495bSYour Name {
147*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
148*5113495bSYour Name }
149*5113495bSYour Name #endif
150*5113495bSYour Name 
151*5113495bSYour Name #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
152*5113495bSYour Name #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
153*5113495bSYour Name QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)154*5113495bSYour Name dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
155*5113495bSYour Name 				       void *tx_comp_hal_desc,
156*5113495bSYour Name 				       struct dp_tx_desc_s **r_tx_desc)
157*5113495bSYour Name {
158*5113495bSYour Name 	uint32_t tx_desc_id;
159*5113495bSYour Name 	uint64_t tx_desc_va = 0;
160*5113495bSYour Name 	QDF_STATUS status;
161*5113495bSYour Name 	bool hw_cc_done =
162*5113495bSYour Name 		hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
163*5113495bSYour Name 
164*5113495bSYour Name 	if (qdf_likely(hw_cc_done)) {
165*5113495bSYour Name 		/* HW cookie conversion done */
166*5113495bSYour Name 		tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
167*5113495bSYour Name 		*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
168*5113495bSYour Name 
169*5113495bSYour Name 	} else {
170*5113495bSYour Name 		/* SW do cookie conversion to VA */
171*5113495bSYour Name 		tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
172*5113495bSYour Name 		*r_tx_desc =
173*5113495bSYour Name 		(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
174*5113495bSYour Name 	}
175*5113495bSYour Name 
176*5113495bSYour Name 	status = dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
177*5113495bSYour Name 						      r_tx_desc, tx_desc_va,
178*5113495bSYour Name 						      hw_cc_done);
179*5113495bSYour Name 
180*5113495bSYour Name 	if (*r_tx_desc)
181*5113495bSYour Name 		(*r_tx_desc)->peer_id =
182*5113495bSYour Name 				dp_tx_comp_get_peer_id_be(soc,
183*5113495bSYour Name 							  tx_comp_hal_desc);
184*5113495bSYour Name 
185*5113495bSYour Name 	return status;
186*5113495bSYour Name }
187*5113495bSYour Name #else
188*5113495bSYour Name QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)189*5113495bSYour Name dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
190*5113495bSYour Name 				       void *tx_comp_hal_desc,
191*5113495bSYour Name 				       struct dp_tx_desc_s **r_tx_desc)
192*5113495bSYour Name {
193*5113495bSYour Name 	uint64_t tx_desc_va;
194*5113495bSYour Name 	QDF_STATUS status;
195*5113495bSYour Name 
196*5113495bSYour Name 	tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
197*5113495bSYour Name 	*r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
198*5113495bSYour Name 
199*5113495bSYour Name 	status = dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
200*5113495bSYour Name 						      r_tx_desc, tx_desc_va,
201*5113495bSYour Name 						      true);
202*5113495bSYour Name 	if (*r_tx_desc)
203*5113495bSYour Name 		(*r_tx_desc)->peer_id =
204*5113495bSYour Name 				dp_tx_comp_get_peer_id_be(soc,
205*5113495bSYour Name 							  tx_comp_hal_desc);
206*5113495bSYour Name 
207*5113495bSYour Name 	return status;
208*5113495bSYour Name }
209*5113495bSYour Name #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
210*5113495bSYour Name #else
211*5113495bSYour Name 
212*5113495bSYour Name QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)213*5113495bSYour Name dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
214*5113495bSYour Name 				       void *tx_comp_hal_desc,
215*5113495bSYour Name 				       struct dp_tx_desc_s **r_tx_desc)
216*5113495bSYour Name {
217*5113495bSYour Name 	uint32_t tx_desc_id;
218*5113495bSYour Name 	QDF_STATUS status;
219*5113495bSYour Name 
220*5113495bSYour Name 	/* SW do cookie conversion to VA */
221*5113495bSYour Name 	tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
222*5113495bSYour Name 	*r_tx_desc =
223*5113495bSYour Name 	(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
224*5113495bSYour Name 
225*5113495bSYour Name 	status = dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
226*5113495bSYour Name 						      r_tx_desc, 0, false);
227*5113495bSYour Name 
228*5113495bSYour Name 	if (*r_tx_desc)
229*5113495bSYour Name 		(*r_tx_desc)->peer_id =
230*5113495bSYour Name 				dp_tx_comp_get_peer_id_be(soc,
231*5113495bSYour Name 							  tx_comp_hal_desc);
232*5113495bSYour Name 
233*5113495bSYour Name 	return status;
234*5113495bSYour Name }
235*5113495bSYour Name #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
236*5113495bSYour Name 
237*5113495bSYour Name static inline
dp_tx_process_mec_notify_be(struct dp_soc * soc,uint8_t * status)238*5113495bSYour Name void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
239*5113495bSYour Name {
240*5113495bSYour Name 	struct dp_vdev *vdev;
241*5113495bSYour Name 	uint8_t vdev_id;
242*5113495bSYour Name 	uint32_t *htt_desc = (uint32_t *)status;
243*5113495bSYour Name 
244*5113495bSYour Name 	dp_assert_always_internal(soc->mec_fw_offload);
245*5113495bSYour Name 
246*5113495bSYour Name 	/*
247*5113495bSYour Name 	 * Get vdev id from HTT status word in case of MEC
248*5113495bSYour Name 	 * notification
249*5113495bSYour Name 	 */
250*5113495bSYour Name 	vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
251*5113495bSYour Name 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
252*5113495bSYour Name 		return;
253*5113495bSYour Name 
254*5113495bSYour Name 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
255*5113495bSYour Name 				     DP_MOD_ID_HTT_COMP);
256*5113495bSYour Name 	if (!vdev)
257*5113495bSYour Name 		return;
258*5113495bSYour Name 	dp_tx_mec_handler(vdev, status);
259*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
260*5113495bSYour Name }
261*5113495bSYour Name 
dp_tx_process_htt_completion_be(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t ring_id)262*5113495bSYour Name void dp_tx_process_htt_completion_be(struct dp_soc *soc,
263*5113495bSYour Name 				     struct dp_tx_desc_s *tx_desc,
264*5113495bSYour Name 				     uint8_t *status,
265*5113495bSYour Name 				     uint8_t ring_id)
266*5113495bSYour Name {
267*5113495bSYour Name 	uint8_t tx_status;
268*5113495bSYour Name 	struct dp_pdev *pdev;
269*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
270*5113495bSYour Name 	struct hal_tx_completion_status ts = {0};
271*5113495bSYour Name 	uint32_t *htt_desc = (uint32_t *)status;
272*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
273*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
274*5113495bSYour Name 	struct cdp_tid_tx_stats *tid_stats = NULL;
275*5113495bSYour Name 	struct htt_soc *htt_handle;
276*5113495bSYour Name 	uint8_t vdev_id;
277*5113495bSYour Name 	uint16_t peer_id;
278*5113495bSYour Name 	uint8_t xmit_type;
279*5113495bSYour Name 
280*5113495bSYour Name 	tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
281*5113495bSYour Name 	htt_handle = (struct htt_soc *)soc->htt_handle;
282*5113495bSYour Name 	htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
283*5113495bSYour Name 
284*5113495bSYour Name 	/*
285*5113495bSYour Name 	 * There can be scenario where WBM consuming descriptor enqueued
286*5113495bSYour Name 	 * from TQM2WBM first and TQM completion can happen before MEC
287*5113495bSYour Name 	 * notification comes from FW2WBM. Avoid access any field of tx
288*5113495bSYour Name 	 * descriptor in case of MEC notify.
289*5113495bSYour Name 	 */
290*5113495bSYour Name 	if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
291*5113495bSYour Name 		return dp_tx_process_mec_notify_be(soc, status);
292*5113495bSYour Name 
293*5113495bSYour Name 	/*
294*5113495bSYour Name 	 * If the descriptor is already freed in vdev_detach,
295*5113495bSYour Name 	 * continue to next descriptor
296*5113495bSYour Name 	 */
297*5113495bSYour Name 	if (qdf_unlikely(!tx_desc->flags)) {
298*5113495bSYour Name 		dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
299*5113495bSYour Name 				   tx_desc->id);
300*5113495bSYour Name 		return;
301*5113495bSYour Name 	}
302*5113495bSYour Name 
303*5113495bSYour Name 	if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
304*5113495bSYour Name 		dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
305*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
306*5113495bSYour Name 		goto release_tx_desc;
307*5113495bSYour Name 	}
308*5113495bSYour Name 
309*5113495bSYour Name 	pdev = tx_desc->pdev;
310*5113495bSYour Name 	if (qdf_unlikely(!pdev)) {
311*5113495bSYour Name 		dp_tx_comp_warn("The pdev in TX desc is NULL, dropped.");
312*5113495bSYour Name 		dp_tx_comp_warn("tx_status: %u", tx_status);
313*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
314*5113495bSYour Name 		goto release_tx_desc;
315*5113495bSYour Name 	}
316*5113495bSYour Name 
317*5113495bSYour Name 	if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
318*5113495bSYour Name 		dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
319*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
320*5113495bSYour Name 		goto release_tx_desc;
321*5113495bSYour Name 	}
322*5113495bSYour Name 
323*5113495bSYour Name 	qdf_assert(tx_desc->pdev);
324*5113495bSYour Name 
325*5113495bSYour Name 	vdev_id = tx_desc->vdev_id;
326*5113495bSYour Name 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
327*5113495bSYour Name 				     DP_MOD_ID_HTT_COMP);
328*5113495bSYour Name 
329*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
330*5113495bSYour Name 		dp_tx_comp_info_rl("Unable to get vdev ref  %d", tx_desc->id);
331*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
332*5113495bSYour Name 		goto release_tx_desc;
333*5113495bSYour Name 	}
334*5113495bSYour Name 
335*5113495bSYour Name 	switch (tx_status) {
336*5113495bSYour Name 	case HTT_TX_FW2WBM_TX_STATUS_OK:
337*5113495bSYour Name 	case HTT_TX_FW2WBM_TX_STATUS_DROP:
338*5113495bSYour Name 	case HTT_TX_FW2WBM_TX_STATUS_TTL:
339*5113495bSYour Name 	{
340*5113495bSYour Name 		uint8_t tid;
341*5113495bSYour Name 		uint8_t transmit_cnt_valid = 0;
342*5113495bSYour Name 
343*5113495bSYour Name 		if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
344*5113495bSYour Name 			ts.peer_id =
345*5113495bSYour Name 				DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
346*5113495bSYour Name 						htt_desc[3]);
347*5113495bSYour Name 			ts.tid =
348*5113495bSYour Name 				DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
349*5113495bSYour Name 						htt_desc[3]);
350*5113495bSYour Name 		} else {
351*5113495bSYour Name 			ts.peer_id = HTT_INVALID_PEER;
352*5113495bSYour Name 			ts.tid = HTT_INVALID_TID;
353*5113495bSYour Name 		}
354*5113495bSYour Name 		ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
355*5113495bSYour Name 		ts.ppdu_id =
356*5113495bSYour Name 			DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
357*5113495bSYour Name 					htt_desc[2]);
358*5113495bSYour Name 		ts.ack_frame_rssi =
359*5113495bSYour Name 			DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
360*5113495bSYour Name 					htt_desc[2]);
361*5113495bSYour Name 
362*5113495bSYour Name 		transmit_cnt_valid =
363*5113495bSYour Name 			DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(
364*5113495bSYour Name 					htt_desc[3]);
365*5113495bSYour Name 		if (transmit_cnt_valid)
366*5113495bSYour Name 			ts.transmit_cnt =
367*5113495bSYour Name 				HTT_TX_WBM_COMPLETION_V3_TRANSMIT_COUNT_GET(
368*5113495bSYour Name 						htt_desc[1]);
369*5113495bSYour Name 
370*5113495bSYour Name 		ts.tsf = htt_desc[4];
371*5113495bSYour Name 		ts.first_msdu = 1;
372*5113495bSYour Name 		ts.last_msdu = 1;
373*5113495bSYour Name 		switch (tx_status) {
374*5113495bSYour Name 		case HTT_TX_FW2WBM_TX_STATUS_OK:
375*5113495bSYour Name 			ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
376*5113495bSYour Name 			break;
377*5113495bSYour Name 		case HTT_TX_FW2WBM_TX_STATUS_DROP:
378*5113495bSYour Name 			ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
379*5113495bSYour Name 			break;
380*5113495bSYour Name 		case HTT_TX_FW2WBM_TX_STATUS_TTL:
381*5113495bSYour Name 			ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
382*5113495bSYour Name 			break;
383*5113495bSYour Name 		}
384*5113495bSYour Name 		tid = ts.tid;
385*5113495bSYour Name 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
386*5113495bSYour Name 			tid = CDP_MAX_DATA_TIDS - 1;
387*5113495bSYour Name 
388*5113495bSYour Name 		tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
389*5113495bSYour Name 
390*5113495bSYour Name 		if (qdf_unlikely(pdev->delay_stats_flag) ||
391*5113495bSYour Name 		    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
392*5113495bSYour Name 			dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
393*5113495bSYour Name 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
394*5113495bSYour Name 			tid_stats->htt_status_cnt[tx_status]++;
395*5113495bSYour Name 
396*5113495bSYour Name 		peer_id = dp_tx_comp_adjust_peer_id_be(soc, ts.peer_id);
397*5113495bSYour Name 		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
398*5113495bSYour Name 						       &txrx_ref_handle,
399*5113495bSYour Name 						       DP_MOD_ID_HTT_COMP);
400*5113495bSYour Name 		if (qdf_likely(txrx_peer))
401*5113495bSYour Name 			dp_tx_update_peer_basic_stats(
402*5113495bSYour Name 						txrx_peer,
403*5113495bSYour Name 						qdf_nbuf_len(tx_desc->nbuf),
404*5113495bSYour Name 						tx_status,
405*5113495bSYour Name 						pdev->enhanced_stats_en);
406*5113495bSYour Name 
407*5113495bSYour Name 		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
408*5113495bSYour Name 					     ring_id);
409*5113495bSYour Name 		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
410*5113495bSYour Name 		dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
411*5113495bSYour Name 
412*5113495bSYour Name 		if (qdf_likely(txrx_peer))
413*5113495bSYour Name 			dp_txrx_peer_unref_delete(txrx_ref_handle,
414*5113495bSYour Name 						  DP_MOD_ID_HTT_COMP);
415*5113495bSYour Name 
416*5113495bSYour Name 		break;
417*5113495bSYour Name 	}
418*5113495bSYour Name 	case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
419*5113495bSYour Name 	{
420*5113495bSYour Name 		uint8_t reinject_reason;
421*5113495bSYour Name 
422*5113495bSYour Name 		reinject_reason =
423*5113495bSYour Name 			HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
424*5113495bSYour Name 								htt_desc[1]);
425*5113495bSYour Name 		dp_tx_reinject_handler(soc, vdev, tx_desc,
426*5113495bSYour Name 				       status, reinject_reason);
427*5113495bSYour Name 		break;
428*5113495bSYour Name 	}
429*5113495bSYour Name 	case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
430*5113495bSYour Name 	{
431*5113495bSYour Name 		dp_tx_inspect_handler(soc, vdev, tx_desc, status);
432*5113495bSYour Name 		break;
433*5113495bSYour Name 	}
434*5113495bSYour Name 	case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
435*5113495bSYour Name 	{
436*5113495bSYour Name 		xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
437*5113495bSYour Name 		DP_STATS_INC(vdev,
438*5113495bSYour Name 			     tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
439*5113495bSYour Name 			     1);
440*5113495bSYour Name 		goto release_tx_desc;
441*5113495bSYour Name 	}
442*5113495bSYour Name 	default:
443*5113495bSYour Name 		dp_tx_comp_err("Invalid HTT tx_status %d\n",
444*5113495bSYour Name 			       tx_status);
445*5113495bSYour Name 		goto release_tx_desc;
446*5113495bSYour Name 	}
447*5113495bSYour Name 
448*5113495bSYour Name 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
449*5113495bSYour Name 	return;
450*5113495bSYour Name 
451*5113495bSYour Name release_tx_desc:
452*5113495bSYour Name 	dp_tx_comp_free_buf(soc, tx_desc, false);
453*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
454*5113495bSYour Name 	if (vdev)
455*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
456*5113495bSYour Name }
457*5113495bSYour Name 
458*5113495bSYour Name #ifdef QCA_OL_TX_MULTIQ_SUPPORT
459*5113495bSYour Name #ifdef DP_TX_IMPLICIT_RBM_MAPPING
460*5113495bSYour Name /**
461*5113495bSYour Name  * dp_tx_get_rbm_id_be() - Get the RBM ID for data transmission completion.
462*5113495bSYour Name  * @soc: DP soc structure pointer
463*5113495bSYour Name  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
464*5113495bSYour Name  *
465*5113495bSYour Name  * Return: RBM ID corresponding to TCL ring_id
466*5113495bSYour Name  */
dp_tx_get_rbm_id_be(struct dp_soc * soc,uint8_t ring_id)467*5113495bSYour Name static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
468*5113495bSYour Name 					  uint8_t ring_id)
469*5113495bSYour Name {
470*5113495bSYour Name 	return 0;
471*5113495bSYour Name }
472*5113495bSYour Name #else
dp_tx_get_rbm_id_be(struct dp_soc * soc,uint8_t ring_id)473*5113495bSYour Name static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
474*5113495bSYour Name 					  uint8_t ring_id)
475*5113495bSYour Name {
476*5113495bSYour Name 	return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
477*5113495bSYour Name 			  HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
478*5113495bSYour Name }
479*5113495bSYour Name #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
480*5113495bSYour Name #else
dp_tx_get_rbm_id_be(struct dp_soc * soc,uint8_t tcl_index)481*5113495bSYour Name static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
482*5113495bSYour Name 					  uint8_t tcl_index)
483*5113495bSYour Name {
484*5113495bSYour Name 	uint8_t rbm;
485*5113495bSYour Name 
486*5113495bSYour Name 	rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
487*5113495bSYour Name 	dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
488*5113495bSYour Name 	return rbm;
489*5113495bSYour Name }
490*5113495bSYour Name #endif
491*5113495bSYour Name 
492*5113495bSYour Name #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
493*5113495bSYour Name 
494*5113495bSYour Name /**
495*5113495bSYour Name  * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
496*5113495bSYour Name  * @soc: DP soc structure pointer
497*5113495bSYour Name  * @hal_tx_desc: HAL descriptor where fields are set
498*5113495bSYour Name  * @nbuf: skb to be considered for min rates
499*5113495bSYour Name  *
500*5113495bSYour Name  * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
501*5113495bSYour Name  * and uses it to determine if the frame is critical. For a critical frame,
502*5113495bSYour Name  * flow override bits are set to classify the frame into HW's high priority
503*5113495bSYour Name  * queue. The HW will pick pre-configured min rates for such packets.
504*5113495bSYour Name  *
505*5113495bSYour Name  * Return: None
506*5113495bSYour Name  */
507*5113495bSYour Name static void
dp_tx_set_min_rates_for_critical_frames(struct dp_soc * soc,uint32_t * hal_tx_desc,qdf_nbuf_t nbuf)508*5113495bSYour Name dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
509*5113495bSYour Name 					uint32_t *hal_tx_desc,
510*5113495bSYour Name 					qdf_nbuf_t nbuf)
511*5113495bSYour Name {
512*5113495bSYour Name /*
513*5113495bSYour Name  * Critical frames should be queued to the high priority queue for the TID on
514*5113495bSYour Name  * on which they are sent out (for the concerned peer).
515*5113495bSYour Name  * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
516*5113495bSYour Name  * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
517*5113495bSYour Name  * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
518*5113495bSYour Name  * HOL queue.
519*5113495bSYour Name  */
520*5113495bSYour Name 	if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
521*5113495bSYour Name 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
522*5113495bSYour Name 		hal_tx_desc_set_flow_override(hal_tx_desc, 0);
523*5113495bSYour Name 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
524*5113495bSYour Name 		hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
525*5113495bSYour Name 						TX_SEMI_HARD_NOTIFY_E);
526*5113495bSYour Name 	}
527*5113495bSYour Name }
528*5113495bSYour Name #else
529*5113495bSYour Name static inline void
dp_tx_set_min_rates_for_critical_frames(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,qdf_nbuf_t nbuf)530*5113495bSYour Name dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
531*5113495bSYour Name 					uint32_t *hal_tx_desc_cached,
532*5113495bSYour Name 					qdf_nbuf_t nbuf)
533*5113495bSYour Name {
534*5113495bSYour Name }
535*5113495bSYour Name #endif
536*5113495bSYour Name 
537*5113495bSYour Name #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
538*5113495bSYour Name /**
539*5113495bSYour Name  * dp_tx_set_particular_tx_queue() - set particular TX TQM flow queue 3 for
540*5113495bSYour Name  *				     TX packets, currently TCP ACK only
541*5113495bSYour Name  * @soc: DP soc structure pointer
542*5113495bSYour Name  * @hal_tx_desc: HAL descriptor where fields are set
543*5113495bSYour Name  * @nbuf: skb to be considered for particular TX queue
544*5113495bSYour Name  *
545*5113495bSYour Name  * Return: None
546*5113495bSYour Name  */
547*5113495bSYour Name static inline
dp_tx_set_particular_tx_queue(struct dp_soc * soc,uint32_t * hal_tx_desc,qdf_nbuf_t nbuf)548*5113495bSYour Name void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
549*5113495bSYour Name 				   uint32_t *hal_tx_desc,
550*5113495bSYour Name 				   qdf_nbuf_t nbuf)
551*5113495bSYour Name {
552*5113495bSYour Name 	if (!soc->tx_ilp_enable)
553*5113495bSYour Name 		return;
554*5113495bSYour Name 
555*5113495bSYour Name 	if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
556*5113495bSYour Name 			 QDF_NBUF_CB_PACKET_TYPE_TCP_ACK)) {
557*5113495bSYour Name 		hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
558*5113495bSYour Name 		hal_tx_desc_set_flow_override(hal_tx_desc, 1);
559*5113495bSYour Name 		hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
560*5113495bSYour Name 	}
561*5113495bSYour Name }
562*5113495bSYour Name #else
563*5113495bSYour Name static inline
dp_tx_set_particular_tx_queue(struct dp_soc * soc,uint32_t * hal_tx_desc,qdf_nbuf_t nbuf)564*5113495bSYour Name void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
565*5113495bSYour Name 				   uint32_t *hal_tx_desc,
566*5113495bSYour Name 				   qdf_nbuf_t nbuf)
567*5113495bSYour Name {
568*5113495bSYour Name }
569*5113495bSYour Name #endif
570*5113495bSYour Name 
571*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
572*5113495bSYour Name 	defined(WLAN_MCAST_MLO)
573*5113495bSYour Name #ifdef QCA_MULTIPASS_SUPPORT
574*5113495bSYour Name /**
575*5113495bSYour Name  * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
576*5113495bSYour Name  * @be_vdev: Handle to DP be_vdev structure
577*5113495bSYour Name  * @ptnr_vdev: DP ptnr_vdev handle
578*5113495bSYour Name  * @arg: pointer to dp_mlo_mpass_ buf
579*5113495bSYour Name  *
580*5113495bSYour Name  * Return: None
581*5113495bSYour Name  */
582*5113495bSYour Name static void
dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)583*5113495bSYour Name dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
584*5113495bSYour Name 				 struct dp_vdev *ptnr_vdev,
585*5113495bSYour Name 				 void *arg)
586*5113495bSYour Name {
587*5113495bSYour Name 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
588*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
589*5113495bSYour Name 	struct vlan_ethhdr *veh = NULL;
590*5113495bSYour Name 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
591*5113495bSYour Name 	uint16_t vlan_id = 0;
592*5113495bSYour Name 	bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
593*5113495bSYour Name 			(htons(eh->ether_type) != ETH_P_8021Q));
594*5113495bSYour Name 
595*5113495bSYour Name 	if (qdf_unlikely(not_vlan))
596*5113495bSYour Name 		return;
597*5113495bSYour Name 	veh = (struct vlan_ethhdr *)eh;
598*5113495bSYour Name 	vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
599*5113495bSYour Name 
600*5113495bSYour Name 	qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
601*5113495bSYour Name 	TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
602*5113495bSYour Name 		      mpass_peer_list_elem) {
603*5113495bSYour Name 		if (vlan_id == txrx_peer->vlan_id) {
604*5113495bSYour Name 			qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
605*5113495bSYour Name 			ptr->vlan_id = vlan_id;
606*5113495bSYour Name 			return;
607*5113495bSYour Name 		}
608*5113495bSYour Name 	}
609*5113495bSYour Name 	qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
610*5113495bSYour Name }
611*5113495bSYour Name 
612*5113495bSYour Name /**
613*5113495bSYour Name  * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
614*5113495bSYour Name  * @be_vdev: Handle to DP be_vdev structure
615*5113495bSYour Name  * @ptnr_vdev: DP ptnr_vdev handle
616*5113495bSYour Name  * @arg: pointer to dp_mlo_mpass_ buf
617*5113495bSYour Name  *
618*5113495bSYour Name  * Return: None
619*5113495bSYour Name  */
620*5113495bSYour Name static void
dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)621*5113495bSYour Name dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
622*5113495bSYour Name 			       struct dp_vdev *ptnr_vdev,
623*5113495bSYour Name 			       void *arg)
624*5113495bSYour Name {
625*5113495bSYour Name 	struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
626*5113495bSYour Name 	struct dp_tx_msdu_info_s msdu_info;
627*5113495bSYour Name 	struct dp_vdev_be *be_ptnr_vdev = NULL;
628*5113495bSYour Name 	qdf_nbuf_t  nbuf_clone;
629*5113495bSYour Name 	uint16_t group_key = 0;
630*5113495bSYour Name 
631*5113495bSYour Name 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
632*5113495bSYour Name 	if (be_vdev != be_ptnr_vdev) {
633*5113495bSYour Name 		nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
634*5113495bSYour Name 		if (qdf_unlikely(!nbuf_clone)) {
635*5113495bSYour Name 			dp_tx_debug("nbuf clone failed");
636*5113495bSYour Name 			return;
637*5113495bSYour Name 		}
638*5113495bSYour Name 	} else {
639*5113495bSYour Name 		nbuf_clone = ptr->nbuf;
640*5113495bSYour Name 	}
641*5113495bSYour Name 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
642*5113495bSYour Name 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
643*5113495bSYour Name 	msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
644*5113495bSYour Name 	msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(ptr->nbuf);
645*5113495bSYour Name 
646*5113495bSYour Name 
647*5113495bSYour Name 	if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
648*5113495bSYour Name 		msdu_info.tid = HTT_TX_EXT_TID_INVALID;
649*5113495bSYour Name 		HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
650*5113495bSYour Name 						msdu_info.meta_data[0], 1);
651*5113495bSYour Name 	} else {
652*5113495bSYour Name 		/* return when vlan map is not initialized */
653*5113495bSYour Name 		if (!ptnr_vdev->iv_vlan_map)
654*5113495bSYour Name 			goto nbuf_free;
655*5113495bSYour Name 		group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
656*5113495bSYour Name 
657*5113495bSYour Name 		/*
658*5113495bSYour Name 		 * If group key is not installed, drop the frame.
659*5113495bSYour Name 		 */
660*5113495bSYour Name 
661*5113495bSYour Name 		if (!group_key)
662*5113495bSYour Name 			goto nbuf_free;
663*5113495bSYour Name 
664*5113495bSYour Name 		dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
665*5113495bSYour Name 		dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
666*5113495bSYour Name 		msdu_info.exception_fw = 1;
667*5113495bSYour Name 	}
668*5113495bSYour Name 
669*5113495bSYour Name 	nbuf_clone = dp_tx_send_msdu_single(
670*5113495bSYour Name 					ptnr_vdev,
671*5113495bSYour Name 					nbuf_clone,
672*5113495bSYour Name 					&msdu_info,
673*5113495bSYour Name 					DP_MLO_MCAST_REINJECT_PEER_ID,
674*5113495bSYour Name 					NULL);
675*5113495bSYour Name 
676*5113495bSYour Name nbuf_free:
677*5113495bSYour Name 	if (qdf_unlikely(nbuf_clone)) {
678*5113495bSYour Name 		dp_info("pkt send failed");
679*5113495bSYour Name 		qdf_nbuf_free(nbuf_clone);
680*5113495bSYour Name 		return;
681*5113495bSYour Name 	}
682*5113495bSYour Name }
683*5113495bSYour Name 
684*5113495bSYour Name /**
685*5113495bSYour Name  * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
686*5113495bSYour Name  * @soc: DP soc handle
687*5113495bSYour Name  * @vdev: DP vdev handle
688*5113495bSYour Name  * @nbuf: nbuf to be enqueued
689*5113495bSYour Name  *
690*5113495bSYour Name  * Return: true if handling is done else false
691*5113495bSYour Name  */
692*5113495bSYour Name static bool
dp_tx_mlo_mcast_multipass_handler(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)693*5113495bSYour Name dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
694*5113495bSYour Name 				  struct dp_vdev *vdev,
695*5113495bSYour Name 				  qdf_nbuf_t nbuf)
696*5113495bSYour Name {
697*5113495bSYour Name 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
698*5113495bSYour Name 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
699*5113495bSYour Name 	qdf_nbuf_t nbuf_copy = NULL;
700*5113495bSYour Name 	struct dp_mlo_mpass_buf mpass_buf;
701*5113495bSYour Name 
702*5113495bSYour Name 	memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
703*5113495bSYour Name 	mpass_buf.vlan_id = INVALID_VLAN_ID;
704*5113495bSYour Name 	mpass_buf.nbuf = nbuf;
705*5113495bSYour Name 
706*5113495bSYour Name 	dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
707*5113495bSYour Name 	if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
708*5113495bSYour Name 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
709*5113495bSYour Name 				      dp_tx_mlo_mcast_multipass_lookup,
710*5113495bSYour Name 				      &mpass_buf, DP_MOD_ID_TX,
711*5113495bSYour Name 				      DP_ALL_VDEV_ITER,
712*5113495bSYour Name 				      DP_VDEV_ITERATE_SKIP_SELF);
713*5113495bSYour Name 		/*
714*5113495bSYour Name 		 * Do not drop the frame when vlan_id doesn't match.
715*5113495bSYour Name 		 * Send the frame as it is.
716*5113495bSYour Name 		 */
717*5113495bSYour Name 		if (mpass_buf.vlan_id == INVALID_VLAN_ID)
718*5113495bSYour Name 			return false;
719*5113495bSYour Name 	}
720*5113495bSYour Name 
721*5113495bSYour Name 	/* AP can have classic clients, special clients &
722*5113495bSYour Name 	 * classic repeaters.
723*5113495bSYour Name 	 * 1. Classic clients & special client:
724*5113495bSYour Name 	 *	Remove vlan header, find corresponding group key
725*5113495bSYour Name 	 *	index, fill in metaheader and enqueue multicast
726*5113495bSYour Name 	 *	frame to TCL.
727*5113495bSYour Name 	 * 2. Classic repeater:
728*5113495bSYour Name 	 *	Pass through to classic repeater with vlan tag
729*5113495bSYour Name 	 *	intact without any group key index. Hardware
730*5113495bSYour Name 	 *	will know which key to use to send frame to
731*5113495bSYour Name 	 *	repeater.
732*5113495bSYour Name 	 */
733*5113495bSYour Name 	nbuf_copy = qdf_nbuf_copy(nbuf);
734*5113495bSYour Name 
735*5113495bSYour Name 	/*
736*5113495bSYour Name 	 * Send multicast frame to special peers even
737*5113495bSYour Name 	 * if pass through to classic repeater fails.
738*5113495bSYour Name 	 */
739*5113495bSYour Name 	if (nbuf_copy) {
740*5113495bSYour Name 		struct dp_mlo_mpass_buf mpass_buf_copy = {0};
741*5113495bSYour Name 
742*5113495bSYour Name 		mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
743*5113495bSYour Name 		mpass_buf_copy.nbuf = nbuf_copy;
744*5113495bSYour Name 		/* send frame on partner vdevs */
745*5113495bSYour Name 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
746*5113495bSYour Name 				      dp_tx_mlo_mcast_multipass_send,
747*5113495bSYour Name 				      &mpass_buf_copy, DP_MOD_ID_TX,
748*5113495bSYour Name 				      DP_LINK_VDEV_ITER,
749*5113495bSYour Name 				      DP_VDEV_ITERATE_SKIP_SELF);
750*5113495bSYour Name 
751*5113495bSYour Name 		/* send frame on mcast primary vdev */
752*5113495bSYour Name 		dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
753*5113495bSYour Name 
754*5113495bSYour Name 		if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
755*5113495bSYour Name 			be_vdev->mlo_dev_ctxt->seq_num = 0;
756*5113495bSYour Name 		else
757*5113495bSYour Name 			be_vdev->mlo_dev_ctxt->seq_num++;
758*5113495bSYour Name 	}
759*5113495bSYour Name 
760*5113495bSYour Name 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
761*5113495bSYour Name 			      dp_tx_mlo_mcast_multipass_send,
762*5113495bSYour Name 			      &mpass_buf, DP_MOD_ID_TX, DP_LINK_VDEV_ITER,
763*5113495bSYour Name 			      DP_VDEV_ITERATE_SKIP_SELF);
764*5113495bSYour Name 	dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
765*5113495bSYour Name 
766*5113495bSYour Name 	if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
767*5113495bSYour Name 		be_vdev->mlo_dev_ctxt->seq_num = 0;
768*5113495bSYour Name 	else
769*5113495bSYour Name 		be_vdev->mlo_dev_ctxt->seq_num++;
770*5113495bSYour Name 
771*5113495bSYour Name 	return true;
772*5113495bSYour Name }
773*5113495bSYour Name #else
774*5113495bSYour Name static bool
dp_tx_mlo_mcast_multipass_handler(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)775*5113495bSYour Name dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
776*5113495bSYour Name 				  qdf_nbuf_t nbuf)
777*5113495bSYour Name {
778*5113495bSYour Name 	return false;
779*5113495bSYour Name }
780*5113495bSYour Name #endif
781*5113495bSYour Name 
782*5113495bSYour Name void
dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)783*5113495bSYour Name dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
784*5113495bSYour Name 			 struct dp_vdev *ptnr_vdev,
785*5113495bSYour Name 			 void *arg)
786*5113495bSYour Name {
787*5113495bSYour Name 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
788*5113495bSYour Name 	qdf_nbuf_t  nbuf_clone;
789*5113495bSYour Name 	struct dp_vdev_be *be_ptnr_vdev = NULL;
790*5113495bSYour Name 	struct dp_tx_msdu_info_s msdu_info;
791*5113495bSYour Name 
792*5113495bSYour Name 	be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
793*5113495bSYour Name 	if (be_vdev != be_ptnr_vdev) {
794*5113495bSYour Name 		nbuf_clone = qdf_nbuf_clone(nbuf);
795*5113495bSYour Name 		if (qdf_unlikely(!nbuf_clone)) {
796*5113495bSYour Name 			dp_tx_debug("nbuf clone failed");
797*5113495bSYour Name 			return;
798*5113495bSYour Name 		}
799*5113495bSYour Name 	} else {
800*5113495bSYour Name 		nbuf_clone = nbuf;
801*5113495bSYour Name 	}
802*5113495bSYour Name 
803*5113495bSYour Name 	/* NAWDS clients will accepts on 4 addr format MCAST packets
804*5113495bSYour Name 	 * This will ensure to send packets in 4 addr format to NAWDS clients.
805*5113495bSYour Name 	 */
806*5113495bSYour Name 	if (qdf_unlikely(ptnr_vdev->nawds_enabled)) {
807*5113495bSYour Name 		qdf_mem_zero(&msdu_info, sizeof(msdu_info));
808*5113495bSYour Name 		dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
809*5113495bSYour Name 		dp_tx_nawds_handler(ptnr_vdev->pdev->soc, ptnr_vdev,
810*5113495bSYour Name 				    &msdu_info, nbuf_clone, DP_INVALID_PEER);
811*5113495bSYour Name 	}
812*5113495bSYour Name 
813*5113495bSYour Name 	if (qdf_unlikely(dp_tx_proxy_arp(ptnr_vdev, nbuf_clone) !=
814*5113495bSYour Name 			 QDF_STATUS_SUCCESS)) {
815*5113495bSYour Name 		qdf_nbuf_free(nbuf_clone);
816*5113495bSYour Name 		return;
817*5113495bSYour Name 	}
818*5113495bSYour Name 
819*5113495bSYour Name 	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
820*5113495bSYour Name 	dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
821*5113495bSYour Name 
822*5113495bSYour Name 	msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
823*5113495bSYour Name 	msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf_clone);
824*5113495bSYour Name 
825*5113495bSYour Name 	DP_STATS_INC(ptnr_vdev,
826*5113495bSYour Name 		     tx_i[msdu_info.xmit_type].mlo_mcast.send_pkt_count, 1);
827*5113495bSYour Name 	nbuf_clone = dp_tx_send_msdu_single(
828*5113495bSYour Name 					ptnr_vdev,
829*5113495bSYour Name 					nbuf_clone,
830*5113495bSYour Name 					&msdu_info,
831*5113495bSYour Name 					DP_MLO_MCAST_REINJECT_PEER_ID,
832*5113495bSYour Name 					NULL);
833*5113495bSYour Name 	if (qdf_unlikely(nbuf_clone)) {
834*5113495bSYour Name 		DP_STATS_INC(ptnr_vdev,
835*5113495bSYour Name 			     tx_i[msdu_info.xmit_type].mlo_mcast.fail_pkt_count,
836*5113495bSYour Name 			     1);
837*5113495bSYour Name 		dp_info("pkt send failed");
838*5113495bSYour Name 		qdf_nbuf_free(nbuf_clone);
839*5113495bSYour Name 		return;
840*5113495bSYour Name 	}
841*5113495bSYour Name }
842*5113495bSYour Name 
843*5113495bSYour Name static inline void
dp_tx_vdev_id_set_hal_tx_desc(uint32_t * hal_tx_desc_cached,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)844*5113495bSYour Name dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
845*5113495bSYour Name 			      struct dp_vdev *vdev,
846*5113495bSYour Name 			      struct dp_tx_msdu_info_s *msdu_info)
847*5113495bSYour Name {
848*5113495bSYour Name 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
849*5113495bSYour Name }
850*5113495bSYour Name 
dp_tx_mlo_mcast_handler_be(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)851*5113495bSYour Name void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
852*5113495bSYour Name 				struct dp_vdev *vdev,
853*5113495bSYour Name 				qdf_nbuf_t nbuf)
854*5113495bSYour Name {
855*5113495bSYour Name 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
856*5113495bSYour Name 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
857*5113495bSYour Name 
858*5113495bSYour Name 	if (qdf_unlikely(vdev->multipass_en) &&
859*5113495bSYour Name 	    dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
860*5113495bSYour Name 		return;
861*5113495bSYour Name 	/* send frame on partner vdevs */
862*5113495bSYour Name 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
863*5113495bSYour Name 			      dp_tx_mlo_mcast_pkt_send,
864*5113495bSYour Name 			      nbuf, DP_MOD_ID_REINJECT, DP_LINK_VDEV_ITER,
865*5113495bSYour Name 			      DP_VDEV_ITERATE_SKIP_SELF);
866*5113495bSYour Name 
867*5113495bSYour Name 	/* send frame on mcast primary vdev */
868*5113495bSYour Name 	dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
869*5113495bSYour Name 
870*5113495bSYour Name 	if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
871*5113495bSYour Name 		be_vdev->mlo_dev_ctxt->seq_num = 0;
872*5113495bSYour Name 	else
873*5113495bSYour Name 		be_vdev->mlo_dev_ctxt->seq_num++;
874*5113495bSYour Name }
875*5113495bSYour Name 
dp_tx_mlo_is_mcast_primary_be(struct dp_soc * soc,struct dp_vdev * vdev)876*5113495bSYour Name bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
877*5113495bSYour Name 				   struct dp_vdev *vdev)
878*5113495bSYour Name {
879*5113495bSYour Name 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
880*5113495bSYour Name 
881*5113495bSYour Name 	if (be_vdev->mcast_primary)
882*5113495bSYour Name 		return true;
883*5113495bSYour Name 
884*5113495bSYour Name 	return false;
885*5113495bSYour Name }
886*5113495bSYour Name 
887*5113495bSYour Name #if defined(CONFIG_MLO_SINGLE_DEV)
888*5113495bSYour Name static void
dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)889*5113495bSYour Name dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be *be_vdev,
890*5113495bSYour Name 			   struct dp_vdev *ptnr_vdev,
891*5113495bSYour Name 			   void *arg)
892*5113495bSYour Name {
893*5113495bSYour Name 	struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
894*5113495bSYour Name 	qdf_nbuf_t  nbuf = (qdf_nbuf_t)arg;
895*5113495bSYour Name 
896*5113495bSYour Name 	if (vdev == ptnr_vdev)
897*5113495bSYour Name 		return;
898*5113495bSYour Name 
899*5113495bSYour Name 	/*
900*5113495bSYour Name 	 * Hold the reference to avoid free of nbuf in
901*5113495bSYour Name 	 * dp_tx_mcast_enhance() in case of successful
902*5113495bSYour Name 	 * conversion
903*5113495bSYour Name 	 */
904*5113495bSYour Name 	qdf_nbuf_ref(nbuf);
905*5113495bSYour Name 
906*5113495bSYour Name 	if (qdf_unlikely(!dp_tx_mcast_enhance(ptnr_vdev, nbuf)))
907*5113495bSYour Name 		return;
908*5113495bSYour Name 
909*5113495bSYour Name 	qdf_nbuf_free(nbuf);
910*5113495bSYour Name }
911*5113495bSYour Name 
912*5113495bSYour Name qdf_nbuf_t
dp_tx_mlo_mcast_send_be(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct cdp_tx_exception_metadata * tx_exc_metadata)913*5113495bSYour Name dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
914*5113495bSYour Name 			qdf_nbuf_t nbuf,
915*5113495bSYour Name 			struct cdp_tx_exception_metadata *tx_exc_metadata)
916*5113495bSYour Name {
917*5113495bSYour Name 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
918*5113495bSYour Name 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
919*5113495bSYour Name 
920*5113495bSYour Name 	if (!tx_exc_metadata->is_mlo_mcast)
921*5113495bSYour Name 		return nbuf;
922*5113495bSYour Name 
923*5113495bSYour Name 	if (!be_vdev->mcast_primary) {
924*5113495bSYour Name 		qdf_nbuf_free(nbuf);
925*5113495bSYour Name 		return NULL;
926*5113495bSYour Name 	}
927*5113495bSYour Name 
928*5113495bSYour Name 	/*
929*5113495bSYour Name 	 * In the single netdev model avoid reinjection path as mcast
930*5113495bSYour Name 	 * packet is identified in upper layers while peer search to find
931*5113495bSYour Name 	 * primary TQM based on dest mac addr
932*5113495bSYour Name 	 *
933*5113495bSYour Name 	 * New bonding interface added into the bridge so MCSD will update
934*5113495bSYour Name 	 * snooping table and wifi driver populates the entries in appropriate
935*5113495bSYour Name 	 * child net devices.
936*5113495bSYour Name 	 */
937*5113495bSYour Name 	if (vdev->mcast_enhancement_en) {
938*5113495bSYour Name 		/*
939*5113495bSYour Name 		 * As dp_tx_mcast_enhance() can consume the nbuf incase of
940*5113495bSYour Name 		 * successful conversion hold the reference of nbuf.
941*5113495bSYour Name 		 *
942*5113495bSYour Name 		 * Hold the reference to tx on partner links
943*5113495bSYour Name 		 */
944*5113495bSYour Name 		qdf_nbuf_ref(nbuf);
945*5113495bSYour Name 		if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
946*5113495bSYour Name 			dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
947*5113495bSYour Name 					      dp_tx_mlo_mcast_enhance_be,
948*5113495bSYour Name 					      nbuf, DP_MOD_ID_TX,
949*5113495bSYour Name 					      DP_ALL_VDEV_ITER,
950*5113495bSYour Name 					      DP_VDEV_ITERATE_SKIP_SELF);
951*5113495bSYour Name 			qdf_nbuf_free(nbuf);
952*5113495bSYour Name 			return NULL;
953*5113495bSYour Name 		}
954*5113495bSYour Name 		/* release reference taken above */
955*5113495bSYour Name 		qdf_nbuf_free(nbuf);
956*5113495bSYour Name 	}
957*5113495bSYour Name 	dp_tx_mlo_mcast_handler_be(soc, vdev, nbuf);
958*5113495bSYour Name 	return NULL;
959*5113495bSYour Name }
960*5113495bSYour Name #endif
961*5113495bSYour Name #else
962*5113495bSYour Name static inline void
dp_tx_vdev_id_set_hal_tx_desc(uint32_t * hal_tx_desc_cached,struct dp_vdev * vdev,struct dp_tx_msdu_info_s * msdu_info)963*5113495bSYour Name dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
964*5113495bSYour Name 			      struct dp_vdev *vdev,
965*5113495bSYour Name 			      struct dp_tx_msdu_info_s *msdu_info)
966*5113495bSYour Name {
967*5113495bSYour Name 	hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
968*5113495bSYour Name }
969*5113495bSYour Name #endif
970*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
971*5113495bSYour Name 	!defined(WLAN_MCAST_MLO)
dp_tx_mlo_mcast_handler_be(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf)972*5113495bSYour Name void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
973*5113495bSYour Name 				struct dp_vdev *vdev,
974*5113495bSYour Name 				qdf_nbuf_t nbuf)
975*5113495bSYour Name {
976*5113495bSYour Name }
977*5113495bSYour Name 
dp_tx_mlo_is_mcast_primary_be(struct dp_soc * soc,struct dp_vdev * vdev)978*5113495bSYour Name bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
979*5113495bSYour Name 				   struct dp_vdev *vdev)
980*5113495bSYour Name {
981*5113495bSYour Name 	return false;
982*5113495bSYour Name }
983*5113495bSYour Name #endif
984*5113495bSYour Name 
985*5113495bSYour Name #ifdef CONFIG_SAWF
986*5113495bSYour Name /**
987*5113495bSYour Name  * dp_sawf_config_be - Configure sawf specific fields in tcl
988*5113495bSYour Name  *
989*5113495bSYour Name  * @soc: DP soc handle
990*5113495bSYour Name  * @hal_tx_desc_cached: tx descriptor
991*5113495bSYour Name  * @fw_metadata: firmware metadata
992*5113495bSYour Name  * @nbuf: skb buffer
993*5113495bSYour Name  * @msdu_info: msdu info
994*5113495bSYour Name  *
995*5113495bSYour Name  * Return: tid value in mark metadata
996*5113495bSYour Name  */
dp_sawf_config_be(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,uint16_t * fw_metadata,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)997*5113495bSYour Name uint8_t dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
998*5113495bSYour Name 			  uint16_t *fw_metadata, qdf_nbuf_t nbuf,
999*5113495bSYour Name 			  struct dp_tx_msdu_info_s *msdu_info)
1000*5113495bSYour Name {
1001*5113495bSYour Name 	uint8_t q_id = 0;
1002*5113495bSYour Name 	uint8_t tid = HTT_TX_EXT_TID_INVALID;
1003*5113495bSYour Name 
1004*5113495bSYour Name 	q_id = dp_sawf_queue_id_get(nbuf);
1005*5113495bSYour Name 
1006*5113495bSYour Name 	if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
1007*5113495bSYour Name 		return HTT_TX_EXT_TID_INVALID;
1008*5113495bSYour Name 
1009*5113495bSYour Name 	tid = (q_id & (CDP_DATA_TID_MAX - 1));
1010*5113495bSYour Name 	if (msdu_info)
1011*5113495bSYour Name 		msdu_info->tid = tid;
1012*5113495bSYour Name 
1013*5113495bSYour Name 	hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
1014*5113495bSYour Name 				 (q_id & (CDP_DATA_TID_MAX - 1)));
1015*5113495bSYour Name 
1016*5113495bSYour Name 	if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
1017*5113495bSYour Name 	    (q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
1018*5113495bSYour Name 		return tid;
1019*5113495bSYour Name 
1020*5113495bSYour Name 	if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
1021*5113495bSYour Name 		return tid;
1022*5113495bSYour Name 
1023*5113495bSYour Name 	if (fw_metadata)
1024*5113495bSYour Name 		dp_sawf_tcl_cmd(fw_metadata, nbuf);
1025*5113495bSYour Name 	hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
1026*5113495bSYour Name 					     DP_TX_FLOW_OVERRIDE_ENABLE);
1027*5113495bSYour Name 	hal_tx_desc_set_flow_override(hal_tx_desc_cached,
1028*5113495bSYour Name 				      DP_TX_FLOW_OVERRIDE_GET(q_id));
1029*5113495bSYour Name 	hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
1030*5113495bSYour Name 					      DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
1031*5113495bSYour Name 
1032*5113495bSYour Name 	return tid;
1033*5113495bSYour Name }
1034*5113495bSYour Name 
1035*5113495bSYour Name #else
1036*5113495bSYour Name 
1037*5113495bSYour Name static inline
dp_sawf_config_be(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,uint16_t * fw_metadata,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)1038*5113495bSYour Name uint8_t dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1039*5113495bSYour Name 			  uint16_t *fw_metadata, qdf_nbuf_t nbuf,
1040*5113495bSYour Name 			  struct dp_tx_msdu_info_s *msdu_info)
1041*5113495bSYour Name {
1042*5113495bSYour Name 	return HTT_TX_EXT_TID_INVALID;
1043*5113495bSYour Name }
1044*5113495bSYour Name 
1045*5113495bSYour Name static inline
dp_sawf_tx_enqueue_peer_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)1046*5113495bSYour Name QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
1047*5113495bSYour Name 					 struct dp_tx_desc_s *tx_desc)
1048*5113495bSYour Name {
1049*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1050*5113495bSYour Name }
1051*5113495bSYour Name 
1052*5113495bSYour Name static inline
dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)1053*5113495bSYour Name QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
1054*5113495bSYour Name 					      struct dp_tx_desc_s *tx_desc)
1055*5113495bSYour Name {
1056*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1057*5113495bSYour Name }
1058*5113495bSYour Name #endif
1059*5113495bSYour Name 
1060*5113495bSYour Name #ifdef WLAN_SUPPORT_PPEDS
1061*5113495bSYour Name 
1062*5113495bSYour Name /**
1063*5113495bSYour Name  * dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
1064*5113495bSYour Name  * @soc: Handle to DP Soc structure
1065*5113495bSYour Name  * @peer_id: Peer ID in the descriptor
1066*5113495bSYour Name  *
1067*5113495bSYour Name  * Return: NONE
1068*5113495bSYour Name  */
1069*5113495bSYour Name static inline
dp_ppeds_stats(struct dp_soc * soc,uint16_t peer_id)1070*5113495bSYour Name void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
1071*5113495bSYour Name {
1072*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
1073*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
1074*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1075*5113495bSYour Name 
1076*5113495bSYour Name 	DP_STATS_INC(soc, tx.fw2wbm_tx_drop, 1);
1077*5113495bSYour Name 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc,
1078*5113495bSYour Name 					       peer_id,
1079*5113495bSYour Name 					       &txrx_ref_handle,
1080*5113495bSYour Name 					       DP_MOD_ID_TX_COMP);
1081*5113495bSYour Name 	if (txrx_peer) {
1082*5113495bSYour Name 		vdev = txrx_peer->vdev;
1083*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.fw2wbm_tx_drop, 1);
1084*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
1085*5113495bSYour Name 	}
1086*5113495bSYour Name }
1087*5113495bSYour Name 
dp_ppeds_tx_comp_handler(struct dp_soc_be * be_soc,uint32_t quota)1088*5113495bSYour Name int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
1089*5113495bSYour Name {
1090*5113495bSYour Name 	uint32_t num_avail_for_reap = 0;
1091*5113495bSYour Name 	void *tx_comp_hal_desc;
1092*5113495bSYour Name 	uint8_t buf_src, status = 0;
1093*5113495bSYour Name 	uint32_t count = 0;
1094*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
1095*5113495bSYour Name 	struct dp_tx_desc_s *head_desc = NULL;
1096*5113495bSYour Name 	struct dp_tx_desc_s *tail_desc = NULL;
1097*5113495bSYour Name 	struct dp_soc *soc = &be_soc->soc;
1098*5113495bSYour Name 	void *last_prefetch_hw_desc = NULL;
1099*5113495bSYour Name 	struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
1100*5113495bSYour Name 	qdf_nbuf_t  nbuf;
1101*5113495bSYour Name 	hal_soc_handle_t hal_soc = soc->hal_soc;
1102*5113495bSYour Name 	hal_ring_handle_t hal_ring_hdl =
1103*5113495bSYour Name 				be_soc->ppeds_wbm_release_ring.hal_srng;
1104*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
1105*5113495bSYour Name 	uint16_t peer_id = CDP_INVALID_PEER;
1106*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1107*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
1108*5113495bSYour Name 	struct dp_pdev *pdev = NULL;
1109*5113495bSYour Name 	struct dp_srng *srng;
1110*5113495bSYour Name 
1111*5113495bSYour Name 	if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
1112*5113495bSYour Name 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1113*5113495bSYour Name 		return 0;
1114*5113495bSYour Name 	}
1115*5113495bSYour Name 
1116*5113495bSYour Name 	num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
1117*5113495bSYour Name 
1118*5113495bSYour Name 	if (num_avail_for_reap >= quota)
1119*5113495bSYour Name 		num_avail_for_reap = quota;
1120*5113495bSYour Name 
1121*5113495bSYour Name 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
1122*5113495bSYour Name 
1123*5113495bSYour Name 	last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
1124*5113495bSYour Name 						     num_avail_for_reap);
1125*5113495bSYour Name 
1126*5113495bSYour Name 	srng = &be_soc->ppeds_wbm_release_ring;
1127*5113495bSYour Name 
1128*5113495bSYour Name 	if (srng) {
1129*5113495bSYour Name 		hal_update_ring_util(soc->hal_soc, srng->hal_srng,
1130*5113495bSYour Name 				     WBM2SW_RELEASE,
1131*5113495bSYour Name 				     &be_soc->ppeds_wbm_release_ring.stats);
1132*5113495bSYour Name 	}
1133*5113495bSYour Name 
1134*5113495bSYour Name 	while (qdf_likely(num_avail_for_reap--)) {
1135*5113495bSYour Name 		tx_comp_hal_desc =  dp_srng_dst_get_next(soc, hal_ring_hdl);
1136*5113495bSYour Name 		if (qdf_unlikely(!tx_comp_hal_desc))
1137*5113495bSYour Name 			break;
1138*5113495bSYour Name 
1139*5113495bSYour Name 		buf_src = hal_tx_comp_get_buffer_source(hal_soc,
1140*5113495bSYour Name 							tx_comp_hal_desc);
1141*5113495bSYour Name 
1142*5113495bSYour Name 		if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
1143*5113495bSYour Name 				 buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1144*5113495bSYour Name 			dp_err("Tx comp release_src != TQM | FW but from %d",
1145*5113495bSYour Name 			       buf_src);
1146*5113495bSYour Name 			dp_assert_always_internal_ds_stat(0, be_soc,
1147*5113495bSYour Name 							  tx.tx_comp_buf_src);
1148*5113495bSYour Name 			continue;
1149*5113495bSYour Name 		}
1150*5113495bSYour Name 
1151*5113495bSYour Name 		dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
1152*5113495bSYour Name 						       &tx_desc);
1153*5113495bSYour Name 
1154*5113495bSYour Name 		if (!tx_desc) {
1155*5113495bSYour Name 			dp_err("unable to retrieve tx_desc!");
1156*5113495bSYour Name 			dp_assert_always_internal_ds_stat(0, be_soc,
1157*5113495bSYour Name 							  tx.tx_comp_desc_null);
1158*5113495bSYour Name 			continue;
1159*5113495bSYour Name 		}
1160*5113495bSYour Name 
1161*5113495bSYour Name 		if (qdf_unlikely(!(tx_desc->flags &
1162*5113495bSYour Name 				   DP_TX_DESC_FLAG_ALLOCATED) ||
1163*5113495bSYour Name 				 !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
1164*5113495bSYour Name 			dp_assert_always_internal_ds_stat(0, be_soc,
1165*5113495bSYour Name 						tx.tx_comp_invalid_flag);
1166*5113495bSYour Name 			continue;
1167*5113495bSYour Name 		}
1168*5113495bSYour Name 
1169*5113495bSYour Name 		tx_desc->buffer_src = buf_src;
1170*5113495bSYour Name 
1171*5113495bSYour Name 		if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
1172*5113495bSYour Name 			status = hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1173*5113495bSYour Name 			if (status != HTT_TX_FW2WBM_TX_STATUS_OK)
1174*5113495bSYour Name 				dp_ppeds_stats(soc, tx_desc->peer_id);
1175*5113495bSYour Name 
1176*5113495bSYour Name 			nbuf = dp_ppeds_tx_desc_free(soc, tx_desc);
1177*5113495bSYour Name 			qdf_nbuf_free(nbuf);
1178*5113495bSYour Name 		} else {
1179*5113495bSYour Name 			tx_desc->tx_status =
1180*5113495bSYour Name 				hal_tx_comp_get_tx_status(tx_comp_hal_desc);
1181*5113495bSYour Name 
1182*5113495bSYour Name 			/*
1183*5113495bSYour Name 			 * Add desc sync to account for extended statistics
1184*5113495bSYour Name 			 * during Tx completion.
1185*5113495bSYour Name 			 */
1186*5113495bSYour Name 			if (peer_id != tx_desc->peer_id) {
1187*5113495bSYour Name 				if (txrx_peer) {
1188*5113495bSYour Name 					dp_txrx_peer_unref_delete(txrx_ref_handle,
1189*5113495bSYour Name 								  DP_MOD_ID_TX_COMP);
1190*5113495bSYour Name 					txrx_peer = NULL;
1191*5113495bSYour Name 					vdev = NULL;
1192*5113495bSYour Name 					pdev = NULL;
1193*5113495bSYour Name 				}
1194*5113495bSYour Name 				peer_id = tx_desc->peer_id;
1195*5113495bSYour Name 				txrx_peer =
1196*5113495bSYour Name 					dp_txrx_peer_get_ref_by_id(soc, peer_id,
1197*5113495bSYour Name 								   &txrx_ref_handle,
1198*5113495bSYour Name 								   DP_MOD_ID_TX_COMP);
1199*5113495bSYour Name 				if (txrx_peer) {
1200*5113495bSYour Name 					vdev = txrx_peer->vdev;
1201*5113495bSYour Name 					if (!vdev)
1202*5113495bSYour Name 						goto next_desc;
1203*5113495bSYour Name 
1204*5113495bSYour Name 					pdev = vdev->pdev;
1205*5113495bSYour Name 					if (!pdev)
1206*5113495bSYour Name 						goto next_desc;
1207*5113495bSYour Name 
1208*5113495bSYour Name 					dp_tx_desc_update_fast_comp_flag(soc,
1209*5113495bSYour Name 									 tx_desc,
1210*5113495bSYour Name 									 !pdev->enhanced_stats_en);
1211*5113495bSYour Name 					if (pdev->enhanced_stats_en) {
1212*5113495bSYour Name 						hal_tx_comp_desc_sync(tx_comp_hal_desc,
1213*5113495bSYour Name 								      &tx_desc->comp, 1);
1214*5113495bSYour Name 					}
1215*5113495bSYour Name 				}
1216*5113495bSYour Name 			} else if (txrx_peer && vdev && pdev) {
1217*5113495bSYour Name 				dp_tx_desc_update_fast_comp_flag(soc,
1218*5113495bSYour Name 								 tx_desc,
1219*5113495bSYour Name 								 !pdev->enhanced_stats_en);
1220*5113495bSYour Name 				if (pdev->enhanced_stats_en) {
1221*5113495bSYour Name 					hal_tx_comp_desc_sync(tx_comp_hal_desc,
1222*5113495bSYour Name 							      &tx_desc->comp, 1);
1223*5113495bSYour Name 				}
1224*5113495bSYour Name 			}
1225*5113495bSYour Name next_desc:
1226*5113495bSYour Name 			if (!head_desc) {
1227*5113495bSYour Name 				head_desc = tx_desc;
1228*5113495bSYour Name 				tail_desc = tx_desc;
1229*5113495bSYour Name 			}
1230*5113495bSYour Name 
1231*5113495bSYour Name 			tail_desc->next = tx_desc;
1232*5113495bSYour Name 			tx_desc->next = NULL;
1233*5113495bSYour Name 			tail_desc = tx_desc;
1234*5113495bSYour Name 
1235*5113495bSYour Name 			count++;
1236*5113495bSYour Name 
1237*5113495bSYour Name 			dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
1238*5113495bSYour Name 						       num_avail_for_reap,
1239*5113495bSYour Name 						       hal_ring_hdl,
1240*5113495bSYour Name 						       &last_prefetch_hw_desc,
1241*5113495bSYour Name 						       &last_prefetch_sw_desc,
1242*5113495bSYour Name 						       NULL);
1243*5113495bSYour Name 		}
1244*5113495bSYour Name 	}
1245*5113495bSYour Name 
1246*5113495bSYour Name 	dp_srng_access_end(NULL, soc, hal_ring_hdl);
1247*5113495bSYour Name 
1248*5113495bSYour Name 	if (txrx_peer)
1249*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle,
1250*5113495bSYour Name 					  DP_MOD_ID_TX_COMP);
1251*5113495bSYour Name 	if (head_desc)
1252*5113495bSYour Name 		dp_tx_comp_process_desc_list(soc, head_desc,
1253*5113495bSYour Name 					     CDP_MAX_TX_COMP_PPE_RING);
1254*5113495bSYour Name 
1255*5113495bSYour Name 	return count;
1256*5113495bSYour Name }
1257*5113495bSYour Name #endif
1258*5113495bSYour Name 
1259*5113495bSYour Name #if defined(QCA_SUPPORT_WDS_EXTENDED)
1260*5113495bSYour Name static inline void
dp_get_peer_from_tx_exc_meta(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,struct cdp_tx_exception_metadata * tx_exc_metadata,uint16_t * ast_idx,uint16_t * ast_hash)1261*5113495bSYour Name dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1262*5113495bSYour Name 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1263*5113495bSYour Name 			     uint16_t *ast_idx, uint16_t *ast_hash)
1264*5113495bSYour Name {
1265*5113495bSYour Name 	struct dp_peer *peer = NULL;
1266*5113495bSYour Name 
1267*5113495bSYour Name 	if (tx_exc_metadata->is_wds_extended) {
1268*5113495bSYour Name 		peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
1269*5113495bSYour Name 					     DP_MOD_ID_TX);
1270*5113495bSYour Name 		if (peer) {
1271*5113495bSYour Name 			*ast_idx = peer->ast_idx;
1272*5113495bSYour Name 			*ast_hash = peer->ast_hash;
1273*5113495bSYour Name 			hal_tx_desc_set_index_lookup_override
1274*5113495bSYour Name 							(soc->hal_soc,
1275*5113495bSYour Name 							 hal_tx_desc_cached,
1276*5113495bSYour Name 							 0x1);
1277*5113495bSYour Name 			dp_peer_unref_delete(peer, DP_MOD_ID_TX);
1278*5113495bSYour Name 		}
1279*5113495bSYour Name 	} else {
1280*5113495bSYour Name 		return;
1281*5113495bSYour Name 	}
1282*5113495bSYour Name }
1283*5113495bSYour Name 
1284*5113495bSYour Name #else
1285*5113495bSYour Name static inline void
dp_get_peer_from_tx_exc_meta(struct dp_soc * soc,uint32_t * hal_tx_desc_cached,struct cdp_tx_exception_metadata * tx_exc_metadata,uint16_t * ast_idx,uint16_t * ast_hash)1286*5113495bSYour Name dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
1287*5113495bSYour Name 			     struct cdp_tx_exception_metadata *tx_exc_metadata,
1288*5113495bSYour Name 			     uint16_t *ast_idx, uint16_t *ast_hash)
1289*5113495bSYour Name {
1290*5113495bSYour Name }
1291*5113495bSYour Name #endif
1292*5113495bSYour Name 
1293*5113495bSYour Name QDF_STATUS
dp_tx_hw_enqueue_be(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint16_t fw_metadata,struct cdp_tx_exception_metadata * tx_exc_metadata,struct dp_tx_msdu_info_s * msdu_info)1294*5113495bSYour Name dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
1295*5113495bSYour Name 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
1296*5113495bSYour Name 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
1297*5113495bSYour Name 		    struct dp_tx_msdu_info_s *msdu_info)
1298*5113495bSYour Name {
1299*5113495bSYour Name 	void *hal_tx_desc;
1300*5113495bSYour Name 	uint32_t *hal_tx_desc_cached;
1301*5113495bSYour Name 	int coalesce = 0;
1302*5113495bSYour Name 	struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
1303*5113495bSYour Name 	uint8_t ring_id = tx_q->ring_id;
1304*5113495bSYour Name 	uint8_t tid;
1305*5113495bSYour Name 	struct dp_vdev_be *be_vdev;
1306*5113495bSYour Name 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1307*5113495bSYour Name 	uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
1308*5113495bSYour Name 	hal_ring_handle_t hal_ring_hdl = NULL;
1309*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1310*5113495bSYour Name 	uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
1311*5113495bSYour Name 	uint16_t ast_idx = vdev->bss_ast_idx;
1312*5113495bSYour Name 	uint16_t ast_hash = vdev->bss_ast_hash;
1313*5113495bSYour Name 
1314*5113495bSYour Name 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1315*5113495bSYour Name 
1316*5113495bSYour Name 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
1317*5113495bSYour Name 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
1318*5113495bSYour Name 		return QDF_STATUS_E_RESOURCES;
1319*5113495bSYour Name 	}
1320*5113495bSYour Name 
1321*5113495bSYour Name 	if (qdf_unlikely(tx_exc_metadata)) {
1322*5113495bSYour Name 		qdf_assert_always((tx_exc_metadata->tx_encap_type ==
1323*5113495bSYour Name 				   CDP_INVALID_TX_ENCAP_TYPE) ||
1324*5113495bSYour Name 				   (tx_exc_metadata->tx_encap_type ==
1325*5113495bSYour Name 				    vdev->tx_encap_type));
1326*5113495bSYour Name 
1327*5113495bSYour Name 		if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
1328*5113495bSYour Name 			qdf_assert_always((tx_exc_metadata->sec_type ==
1329*5113495bSYour Name 					   CDP_INVALID_SEC_TYPE) ||
1330*5113495bSYour Name 					   tx_exc_metadata->sec_type ==
1331*5113495bSYour Name 					   vdev->sec_type);
1332*5113495bSYour Name 		dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
1333*5113495bSYour Name 					     tx_exc_metadata,
1334*5113495bSYour Name 					     &ast_idx, &ast_hash);
1335*5113495bSYour Name 	}
1336*5113495bSYour Name 
1337*5113495bSYour Name 	hal_tx_desc_cached = (void *)cached_desc;
1338*5113495bSYour Name 
1339*5113495bSYour Name 	if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
1340*5113495bSYour Name 		dp_sawf_config_be(soc, hal_tx_desc_cached,
1341*5113495bSYour Name 				  &fw_metadata, tx_desc->nbuf, msdu_info);
1342*5113495bSYour Name 		dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
1343*5113495bSYour Name 	}
1344*5113495bSYour Name 
1345*5113495bSYour Name 	hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
1346*5113495bSYour Name 				    tx_desc->dma_addr, bm_id, tx_desc->id,
1347*5113495bSYour Name 				    (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
1348*5113495bSYour Name 	hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
1349*5113495bSYour Name 				   vdev->lmac_id);
1350*5113495bSYour Name 
1351*5113495bSYour Name 	hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
1352*5113495bSYour Name 					ast_idx);
1353*5113495bSYour Name 	/*
1354*5113495bSYour Name 	 * Bank_ID is used as DSCP_TABLE number in beryllium
1355*5113495bSYour Name 	 * So there is no explicit field used for DSCP_TID_TABLE_NUM.
1356*5113495bSYour Name 	 */
1357*5113495bSYour Name 
1358*5113495bSYour Name 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
1359*5113495bSYour Name 				      (ast_hash & 0xF));
1360*5113495bSYour Name 
1361*5113495bSYour Name 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
1362*5113495bSYour Name 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
1363*5113495bSYour Name 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
1364*5113495bSYour Name 
1365*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
1366*5113495bSYour Name 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
1367*5113495bSYour Name 
1368*5113495bSYour Name 	/* verify checksum offload configuration*/
1369*5113495bSYour Name 	if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
1370*5113495bSYour Name 				   QDF_NBUF_TX_CKSUM_TCP_UDP) ||
1371*5113495bSYour Name 	      qdf_nbuf_is_tso(tx_desc->nbuf)) {
1372*5113495bSYour Name 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
1373*5113495bSYour Name 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
1374*5113495bSYour Name 	}
1375*5113495bSYour Name 
1376*5113495bSYour Name 	hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
1377*5113495bSYour Name 
1378*5113495bSYour Name 	dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
1379*5113495bSYour Name 
1380*5113495bSYour Name 	tid = msdu_info->tid;
1381*5113495bSYour Name 	if (tid != HTT_TX_EXT_TID_INVALID)
1382*5113495bSYour Name 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
1383*5113495bSYour Name 
1384*5113495bSYour Name 	dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
1385*5113495bSYour Name 						tx_desc->nbuf);
1386*5113495bSYour Name 	dp_tx_set_particular_tx_queue(soc, hal_tx_desc_cached,
1387*5113495bSYour Name 				      tx_desc->nbuf);
1388*5113495bSYour Name 	dp_tx_desc_set_ktimestamp(vdev, tx_desc);
1389*5113495bSYour Name 
1390*5113495bSYour Name 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
1391*5113495bSYour Name 
1392*5113495bSYour Name 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1393*5113495bSYour Name 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1394*5113495bSYour Name 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1395*5113495bSYour Name 		DP_STATS_INC(vdev,
1396*5113495bSYour Name 			     tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1397*5113495bSYour Name 			     1);
1398*5113495bSYour Name 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1399*5113495bSYour Name 		return status;
1400*5113495bSYour Name 	}
1401*5113495bSYour Name 
1402*5113495bSYour Name 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1403*5113495bSYour Name 	if (qdf_unlikely(!hal_tx_desc)) {
1404*5113495bSYour Name 		dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
1405*5113495bSYour Name 		DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
1406*5113495bSYour Name 		DP_STATS_INC(vdev,
1407*5113495bSYour Name 			     tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1408*5113495bSYour Name 			     1);
1409*5113495bSYour Name 		dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
1410*5113495bSYour Name 		goto ring_access_fail;
1411*5113495bSYour Name 	}
1412*5113495bSYour Name 
1413*5113495bSYour Name 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1414*5113495bSYour Name 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
1415*5113495bSYour Name 
1416*5113495bSYour Name 	/* Sync cached descriptor with HW */
1417*5113495bSYour Name 	hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
1418*5113495bSYour Name 
1419*5113495bSYour Name 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
1420*5113495bSYour Name 					    msdu_info, ring_id);
1421*5113495bSYour Name 
1422*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].processed, 1,
1423*5113495bSYour Name 			 dp_tx_get_pkt_len(tx_desc));
1424*5113495bSYour Name 	DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
1425*5113495bSYour Name 	dp_tx_update_stats(soc, tx_desc, ring_id);
1426*5113495bSYour Name 	status = QDF_STATUS_SUCCESS;
1427*5113495bSYour Name 
1428*5113495bSYour Name 	dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
1429*5113495bSYour Name 				 hal_ring_hdl, soc, ring_id);
1430*5113495bSYour Name 
1431*5113495bSYour Name ring_access_fail:
1432*5113495bSYour Name 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
1433*5113495bSYour Name 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
1434*5113495bSYour Name 			     qdf_get_log_timestamp(), tx_desc->nbuf);
1435*5113495bSYour Name 	return status;
1436*5113495bSYour Name }
1437*5113495bSYour Name 
1438*5113495bSYour Name #ifdef IPA_OFFLOAD
1439*5113495bSYour Name static void
dp_tx_get_ipa_bank_config(struct dp_soc_be * be_soc,union hal_tx_bank_config * bank_config)1440*5113495bSYour Name dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
1441*5113495bSYour Name 			  union hal_tx_bank_config *bank_config)
1442*5113495bSYour Name {
1443*5113495bSYour Name 	bank_config->epd = 0;
1444*5113495bSYour Name 	bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
1445*5113495bSYour Name 	bank_config->encrypt_type = 0;
1446*5113495bSYour Name 
1447*5113495bSYour Name 	bank_config->src_buffer_swap = 0;
1448*5113495bSYour Name 	bank_config->link_meta_swap = 0;
1449*5113495bSYour Name 
1450*5113495bSYour Name 	bank_config->index_lookup_enable = 0;
1451*5113495bSYour Name 	bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1452*5113495bSYour Name 	bank_config->addrx_en = 1;
1453*5113495bSYour Name 	bank_config->addry_en = 1;
1454*5113495bSYour Name 
1455*5113495bSYour Name 	bank_config->mesh_enable = 0;
1456*5113495bSYour Name 	bank_config->dscp_tid_map_id = 0;
1457*5113495bSYour Name 	bank_config->vdev_id_check_en = 0;
1458*5113495bSYour Name 	bank_config->pmac_id = 0;
1459*5113495bSYour Name }
1460*5113495bSYour Name 
dp_tx_init_ipa_bank_profile(struct dp_soc_be * be_soc)1461*5113495bSYour Name static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1462*5113495bSYour Name {
1463*5113495bSYour Name 	union hal_tx_bank_config ipa_config = {0};
1464*5113495bSYour Name 	int bid;
1465*5113495bSYour Name 
1466*5113495bSYour Name 	if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
1467*5113495bSYour Name 		be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
1468*5113495bSYour Name 		return;
1469*5113495bSYour Name 	}
1470*5113495bSYour Name 
1471*5113495bSYour Name 	dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
1472*5113495bSYour Name 
1473*5113495bSYour Name 	/* Let IPA use last HOST owned bank */
1474*5113495bSYour Name 	bid = be_soc->num_bank_profiles - 1;
1475*5113495bSYour Name 
1476*5113495bSYour Name 	be_soc->bank_profiles[bid].is_configured = true;
1477*5113495bSYour Name 	be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
1478*5113495bSYour Name 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1479*5113495bSYour Name 				      &be_soc->bank_profiles[bid].bank_config,
1480*5113495bSYour Name 				      bid);
1481*5113495bSYour Name 	qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
1482*5113495bSYour Name 
1483*5113495bSYour Name 	dp_info("IPA bank at slot %d config:0x%x", bid,
1484*5113495bSYour Name 		be_soc->bank_profiles[bid].bank_config.val);
1485*5113495bSYour Name 
1486*5113495bSYour Name 	be_soc->ipa_bank_id = bid;
1487*5113495bSYour Name }
1488*5113495bSYour Name #else /* !IPA_OFFLOAD */
dp_tx_init_ipa_bank_profile(struct dp_soc_be * be_soc)1489*5113495bSYour Name static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
1490*5113495bSYour Name {
1491*5113495bSYour Name }
1492*5113495bSYour Name #endif /* IPA_OFFLOAD */
1493*5113495bSYour Name 
dp_tx_init_bank_profiles(struct dp_soc_be * be_soc)1494*5113495bSYour Name QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
1495*5113495bSYour Name {
1496*5113495bSYour Name 	int i, num_tcl_banks;
1497*5113495bSYour Name 
1498*5113495bSYour Name 	num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
1499*5113495bSYour Name 
1500*5113495bSYour Name 	dp_assert_always_internal(num_tcl_banks);
1501*5113495bSYour Name 	be_soc->num_bank_profiles = num_tcl_banks;
1502*5113495bSYour Name 
1503*5113495bSYour Name 	be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
1504*5113495bSYour Name 					       sizeof(*be_soc->bank_profiles));
1505*5113495bSYour Name 	if (!be_soc->bank_profiles) {
1506*5113495bSYour Name 		dp_err("unable to allocate memory for DP TX Profiles!");
1507*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
1508*5113495bSYour Name 	}
1509*5113495bSYour Name 
1510*5113495bSYour Name 	DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
1511*5113495bSYour Name 
1512*5113495bSYour Name 	for (i = 0; i < num_tcl_banks; i++) {
1513*5113495bSYour Name 		be_soc->bank_profiles[i].is_configured = false;
1514*5113495bSYour Name 		qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
1515*5113495bSYour Name 	}
1516*5113495bSYour Name 	dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
1517*5113495bSYour Name 
1518*5113495bSYour Name 	dp_tx_init_ipa_bank_profile(be_soc);
1519*5113495bSYour Name 
1520*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1521*5113495bSYour Name }
1522*5113495bSYour Name 
dp_tx_deinit_bank_profiles(struct dp_soc_be * be_soc)1523*5113495bSYour Name void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
1524*5113495bSYour Name {
1525*5113495bSYour Name 	qdf_mem_free(be_soc->bank_profiles);
1526*5113495bSYour Name 	DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
1527*5113495bSYour Name }
1528*5113495bSYour Name 
1529*5113495bSYour Name static
dp_tx_get_vdev_bank_config(struct dp_vdev_be * be_vdev,union hal_tx_bank_config * bank_config)1530*5113495bSYour Name void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
1531*5113495bSYour Name 				union hal_tx_bank_config *bank_config)
1532*5113495bSYour Name {
1533*5113495bSYour Name 	struct dp_vdev *vdev = &be_vdev->vdev;
1534*5113495bSYour Name 
1535*5113495bSYour Name 	bank_config->epd = 0;
1536*5113495bSYour Name 
1537*5113495bSYour Name 	bank_config->encap_type = vdev->tx_encap_type;
1538*5113495bSYour Name 
1539*5113495bSYour Name 	/* Only valid for raw frames. Needs work for RAW mode */
1540*5113495bSYour Name 	if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
1541*5113495bSYour Name 		bank_config->encrypt_type = sec_type_map[vdev->sec_type];
1542*5113495bSYour Name 	} else {
1543*5113495bSYour Name 		bank_config->encrypt_type = 0;
1544*5113495bSYour Name 	}
1545*5113495bSYour Name 
1546*5113495bSYour Name 	bank_config->src_buffer_swap = 0;
1547*5113495bSYour Name 	bank_config->link_meta_swap = 0;
1548*5113495bSYour Name 
1549*5113495bSYour Name 	if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
1550*5113495bSYour Name 	    vdev->opmode == wlan_op_mode_sta) {
1551*5113495bSYour Name 		bank_config->index_lookup_enable = 1;
1552*5113495bSYour Name 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
1553*5113495bSYour Name 		bank_config->addrx_en = 0;
1554*5113495bSYour Name 		bank_config->addry_en = 0;
1555*5113495bSYour Name 	} else {
1556*5113495bSYour Name 		bank_config->index_lookup_enable = 0;
1557*5113495bSYour Name 		bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
1558*5113495bSYour Name 		bank_config->addrx_en =
1559*5113495bSYour Name 			(vdev->hal_desc_addr_search_flags &
1560*5113495bSYour Name 			 HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
1561*5113495bSYour Name 		bank_config->addry_en =
1562*5113495bSYour Name 			(vdev->hal_desc_addr_search_flags &
1563*5113495bSYour Name 			 HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
1564*5113495bSYour Name 	}
1565*5113495bSYour Name 
1566*5113495bSYour Name 	bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
1567*5113495bSYour Name 
1568*5113495bSYour Name 	bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
1569*5113495bSYour Name 
1570*5113495bSYour Name 	/* Disabling vdev id check for now. Needs revist. */
1571*5113495bSYour Name 	bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
1572*5113495bSYour Name 
1573*5113495bSYour Name 	bank_config->pmac_id = vdev->lmac_id;
1574*5113495bSYour Name }
1575*5113495bSYour Name 
dp_tx_get_bank_profile(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev)1576*5113495bSYour Name int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
1577*5113495bSYour Name 			   struct dp_vdev_be *be_vdev)
1578*5113495bSYour Name {
1579*5113495bSYour Name 	char *temp_str = "";
1580*5113495bSYour Name 	bool found_match = false;
1581*5113495bSYour Name 	int bank_id = DP_BE_INVALID_BANK_ID;
1582*5113495bSYour Name 	int i;
1583*5113495bSYour Name 	int unconfigured_slot = DP_BE_INVALID_BANK_ID;
1584*5113495bSYour Name 	int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
1585*5113495bSYour Name 	union hal_tx_bank_config vdev_config = {0};
1586*5113495bSYour Name 
1587*5113495bSYour Name 	/* convert vdev params into hal_tx_bank_config */
1588*5113495bSYour Name 	dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
1589*5113495bSYour Name 
1590*5113495bSYour Name 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1591*5113495bSYour Name 	/* go over all banks and find a matching/unconfigured/unused bank */
1592*5113495bSYour Name 	for (i = 0; i < be_soc->num_bank_profiles; i++) {
1593*5113495bSYour Name 		if (be_soc->bank_profiles[i].is_configured &&
1594*5113495bSYour Name 		    (be_soc->bank_profiles[i].bank_config.val ^
1595*5113495bSYour Name 						vdev_config.val) == 0) {
1596*5113495bSYour Name 			found_match = true;
1597*5113495bSYour Name 			break;
1598*5113495bSYour Name 		}
1599*5113495bSYour Name 
1600*5113495bSYour Name 		if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
1601*5113495bSYour Name 		    !be_soc->bank_profiles[i].is_configured)
1602*5113495bSYour Name 			unconfigured_slot = i;
1603*5113495bSYour Name 		else if (zero_ref_count_slot  == DP_BE_INVALID_BANK_ID &&
1604*5113495bSYour Name 		    !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
1605*5113495bSYour Name 			zero_ref_count_slot = i;
1606*5113495bSYour Name 	}
1607*5113495bSYour Name 
1608*5113495bSYour Name 	if (found_match) {
1609*5113495bSYour Name 		temp_str = "matching";
1610*5113495bSYour Name 		bank_id = i;
1611*5113495bSYour Name 		goto inc_ref_and_return;
1612*5113495bSYour Name 	}
1613*5113495bSYour Name 	if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
1614*5113495bSYour Name 		temp_str = "unconfigured";
1615*5113495bSYour Name 		bank_id = unconfigured_slot;
1616*5113495bSYour Name 		goto configure_and_return;
1617*5113495bSYour Name 	}
1618*5113495bSYour Name 	if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
1619*5113495bSYour Name 		temp_str = "zero_ref_count";
1620*5113495bSYour Name 		bank_id = zero_ref_count_slot;
1621*5113495bSYour Name 	}
1622*5113495bSYour Name 	if (bank_id == DP_BE_INVALID_BANK_ID) {
1623*5113495bSYour Name 		dp_alert("unable to find TX bank!");
1624*5113495bSYour Name 		QDF_BUG(0);
1625*5113495bSYour Name 		return bank_id;
1626*5113495bSYour Name 	}
1627*5113495bSYour Name 
1628*5113495bSYour Name configure_and_return:
1629*5113495bSYour Name 	be_soc->bank_profiles[bank_id].is_configured = true;
1630*5113495bSYour Name 	be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
1631*5113495bSYour Name 	hal_tx_populate_bank_register(be_soc->soc.hal_soc,
1632*5113495bSYour Name 				      &be_soc->bank_profiles[bank_id].bank_config,
1633*5113495bSYour Name 				      bank_id);
1634*5113495bSYour Name inc_ref_and_return:
1635*5113495bSYour Name 	qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
1636*5113495bSYour Name 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1637*5113495bSYour Name 
1638*5113495bSYour Name 	dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
1639*5113495bSYour Name 		temp_str, bank_id, vdev_config.val,
1640*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.val,
1641*5113495bSYour Name 		qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
1642*5113495bSYour Name 
1643*5113495bSYour Name 	dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
1644*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.epd,
1645*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.encap_type,
1646*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
1647*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
1648*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
1649*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.addrx_en,
1650*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.addry_en,
1651*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
1652*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
1653*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.pmac_id,
1654*5113495bSYour Name 		be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
1655*5113495bSYour Name 
1656*5113495bSYour Name 	return bank_id;
1657*5113495bSYour Name }
1658*5113495bSYour Name 
dp_tx_put_bank_profile(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev)1659*5113495bSYour Name void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
1660*5113495bSYour Name 			    struct dp_vdev_be *be_vdev)
1661*5113495bSYour Name {
1662*5113495bSYour Name 	DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
1663*5113495bSYour Name 	qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
1664*5113495bSYour Name 	DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
1665*5113495bSYour Name }
1666*5113495bSYour Name 
dp_tx_update_bank_profile(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev)1667*5113495bSYour Name void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
1668*5113495bSYour Name 			       struct dp_vdev_be *be_vdev)
1669*5113495bSYour Name {
1670*5113495bSYour Name 	dp_tx_put_bank_profile(be_soc, be_vdev);
1671*5113495bSYour Name 	be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
1672*5113495bSYour Name 	be_vdev->vdev.bank_id = be_vdev->bank_id;
1673*5113495bSYour Name }
1674*5113495bSYour Name 
dp_tx_desc_pool_init_be(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id,bool spcl_tx_desc)1675*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
1676*5113495bSYour Name 				   uint32_t num_elem,
1677*5113495bSYour Name 				   uint8_t pool_id,
1678*5113495bSYour Name 				   bool spcl_tx_desc)
1679*5113495bSYour Name {
1680*5113495bSYour Name 	struct dp_tx_desc_pool_s *tx_desc_pool;
1681*5113495bSYour Name 	struct dp_hw_cookie_conversion_t *cc_ctx;
1682*5113495bSYour Name 	struct dp_spt_page_desc *page_desc;
1683*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
1684*5113495bSYour Name 	uint32_t ppt_idx = 0;
1685*5113495bSYour Name 	uint32_t avail_entry_index = 0;
1686*5113495bSYour Name 
1687*5113495bSYour Name 	if (!num_elem) {
1688*5113495bSYour Name 		dp_err("desc_num 0 !!");
1689*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1690*5113495bSYour Name 	}
1691*5113495bSYour Name 
1692*5113495bSYour Name 	if (spcl_tx_desc) {
1693*5113495bSYour Name 		tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
1694*5113495bSYour Name 		cc_ctx  = dp_get_spcl_tx_cookie_t(soc, pool_id);
1695*5113495bSYour Name 	} else {
1696*5113495bSYour Name 		tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);;
1697*5113495bSYour Name 		cc_ctx  = dp_get_tx_cookie_t(soc, pool_id);
1698*5113495bSYour Name 	}
1699*5113495bSYour Name 	tx_desc = tx_desc_pool->freelist;
1700*5113495bSYour Name 	page_desc = &cc_ctx->page_desc_base[0];
1701*5113495bSYour Name 	while (tx_desc) {
1702*5113495bSYour Name 		if (avail_entry_index == 0) {
1703*5113495bSYour Name 			if (ppt_idx >= cc_ctx->total_page_num) {
1704*5113495bSYour Name 				dp_alert("insufficient secondary page tables");
1705*5113495bSYour Name 				qdf_assert_always(0);
1706*5113495bSYour Name 			}
1707*5113495bSYour Name 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1708*5113495bSYour Name 		}
1709*5113495bSYour Name 
1710*5113495bSYour Name 		/* put each TX Desc VA to SPT pages and
1711*5113495bSYour Name 		 * get corresponding ID
1712*5113495bSYour Name 		 */
1713*5113495bSYour Name 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1714*5113495bSYour Name 					 avail_entry_index,
1715*5113495bSYour Name 					 tx_desc);
1716*5113495bSYour Name 		tx_desc->id =
1717*5113495bSYour Name 			dp_cc_desc_id_generate(page_desc->ppt_index,
1718*5113495bSYour Name 					       avail_entry_index);
1719*5113495bSYour Name 		tx_desc->pool_id = pool_id;
1720*5113495bSYour Name 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
1721*5113495bSYour Name 		tx_desc = tx_desc->next;
1722*5113495bSYour Name 		avail_entry_index = (avail_entry_index + 1) &
1723*5113495bSYour Name 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1724*5113495bSYour Name 	}
1725*5113495bSYour Name 
1726*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1727*5113495bSYour Name }
1728*5113495bSYour Name 
dp_tx_desc_pool_deinit_be(struct dp_soc * soc,struct dp_tx_desc_pool_s * tx_desc_pool,uint8_t pool_id,bool spcl_tx_desc)1729*5113495bSYour Name void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
1730*5113495bSYour Name 			       struct dp_tx_desc_pool_s *tx_desc_pool,
1731*5113495bSYour Name 			       uint8_t pool_id, bool spcl_tx_desc)
1732*5113495bSYour Name {
1733*5113495bSYour Name 	struct dp_spt_page_desc *page_desc;
1734*5113495bSYour Name 	int i = 0;
1735*5113495bSYour Name 	struct dp_hw_cookie_conversion_t *cc_ctx;
1736*5113495bSYour Name 
1737*5113495bSYour Name 	if (spcl_tx_desc)
1738*5113495bSYour Name 		cc_ctx  = dp_get_spcl_tx_cookie_t(soc, pool_id);
1739*5113495bSYour Name 	else
1740*5113495bSYour Name 		cc_ctx  = dp_get_tx_cookie_t(soc, pool_id);
1741*5113495bSYour Name 
1742*5113495bSYour Name 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1743*5113495bSYour Name 		page_desc = &cc_ctx->page_desc_base[i];
1744*5113495bSYour Name 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1745*5113495bSYour Name 	}
1746*5113495bSYour Name }
1747*5113495bSYour Name 
1748*5113495bSYour Name #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
dp_tx_comp_nf_handler(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint8_t ring_id,uint32_t quota)1749*5113495bSYour Name uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
1750*5113495bSYour Name 			       hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
1751*5113495bSYour Name 			       uint32_t quota)
1752*5113495bSYour Name {
1753*5113495bSYour Name 	struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
1754*5113495bSYour Name 	uint32_t work_done = 0;
1755*5113495bSYour Name 
1756*5113495bSYour Name 	if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
1757*5113495bSYour Name 			DP_SRNG_THRESH_NEAR_FULL)
1758*5113495bSYour Name 		return 0;
1759*5113495bSYour Name 
1760*5113495bSYour Name 	qdf_atomic_set(&tx_comp_ring->near_full, 1);
1761*5113495bSYour Name 	work_done++;
1762*5113495bSYour Name 
1763*5113495bSYour Name 	return work_done;
1764*5113495bSYour Name }
1765*5113495bSYour Name #endif
1766*5113495bSYour Name 
1767*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1768*5113495bSYour Name 	defined(WLAN_CONFIG_TX_DELAY)
1769*5113495bSYour Name #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
1770*5113495bSYour Name 	(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
1771*5113495bSYour Name 
1772*5113495bSYour Name #define HW_TX_DELAY_MAX                       0x1000000
1773*5113495bSYour Name #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US    10
1774*5113495bSYour Name #define HW_TX_DELAY_MASK                      0x1FFFFFFF
1775*5113495bSYour Name #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
1776*5113495bSYour Name 	(((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
1777*5113495bSYour Name 	 HW_TX_DELAY_MASK)
1778*5113495bSYour Name 
1779*5113495bSYour Name static inline
dp_mlo_compute_hw_delay_us(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)1780*5113495bSYour Name QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1781*5113495bSYour Name 				      struct dp_vdev *vdev,
1782*5113495bSYour Name 				      struct hal_tx_completion_status *ts,
1783*5113495bSYour Name 				      uint32_t *delay_us)
1784*5113495bSYour Name {
1785*5113495bSYour Name 	uint32_t ppdu_id;
1786*5113495bSYour Name 	uint8_t link_id_offset, link_id_bits;
1787*5113495bSYour Name 	uint8_t hw_link_id;
1788*5113495bSYour Name 	uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
1789*5113495bSYour Name 	uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
1790*5113495bSYour Name 	uint32_t delay;
1791*5113495bSYour Name 	int32_t delta_tsf2, delta_tqm;
1792*5113495bSYour Name 
1793*5113495bSYour Name 	if (!ts->valid)
1794*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
1795*5113495bSYour Name 
1796*5113495bSYour Name 	link_id_offset = soc->link_id_offset;
1797*5113495bSYour Name 	link_id_bits = soc->link_id_bits;
1798*5113495bSYour Name 	ppdu_id = ts->ppdu_id;
1799*5113495bSYour Name 	hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
1800*5113495bSYour Name 					   link_id_bits);
1801*5113495bSYour Name 
1802*5113495bSYour Name 	msdu_tqm_enqueue_tstamp_us =
1803*5113495bSYour Name 		TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
1804*5113495bSYour Name 	msdu_compl_tsf_tstamp_us = ts->tsf;
1805*5113495bSYour Name 
1806*5113495bSYour Name 	delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
1807*5113495bSYour Name 	delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
1808*5113495bSYour Name 
1809*5113495bSYour Name 	final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
1810*5113495bSYour Name 			delta_tqm) & HW_TX_DELAY_MASK;
1811*5113495bSYour Name 
1812*5113495bSYour Name 	final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
1813*5113495bSYour Name 			delta_tsf2) & HW_TX_DELAY_MASK;
1814*5113495bSYour Name 
1815*5113495bSYour Name 	delay = (final_msdu_compl_tsf_tstamp_us -
1816*5113495bSYour Name 		final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
1817*5113495bSYour Name 
1818*5113495bSYour Name 	if (delay > HW_TX_DELAY_MAX)
1819*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1820*5113495bSYour Name 
1821*5113495bSYour Name 	if (delay_us)
1822*5113495bSYour Name 		*delay_us = delay;
1823*5113495bSYour Name 
1824*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1825*5113495bSYour Name }
1826*5113495bSYour Name #else
1827*5113495bSYour Name static inline
dp_mlo_compute_hw_delay_us(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)1828*5113495bSYour Name QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
1829*5113495bSYour Name 				      struct dp_vdev *vdev,
1830*5113495bSYour Name 				      struct hal_tx_completion_status *ts,
1831*5113495bSYour Name 				      uint32_t *delay_us)
1832*5113495bSYour Name {
1833*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1834*5113495bSYour Name }
1835*5113495bSYour Name #endif
1836*5113495bSYour Name 
dp_tx_compute_tx_delay_be(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)1837*5113495bSYour Name QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
1838*5113495bSYour Name 				     struct dp_vdev *vdev,
1839*5113495bSYour Name 				     struct hal_tx_completion_status *ts,
1840*5113495bSYour Name 				     uint32_t *delay_us)
1841*5113495bSYour Name {
1842*5113495bSYour Name 	return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
1843*5113495bSYour Name }
1844*5113495bSYour Name 
1845*5113495bSYour Name static inline
dp_tx_nbuf_map_be(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,qdf_nbuf_t nbuf)1846*5113495bSYour Name qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
1847*5113495bSYour Name 				 struct dp_tx_desc_s *tx_desc,
1848*5113495bSYour Name 				 qdf_nbuf_t nbuf)
1849*5113495bSYour Name {
1850*5113495bSYour Name 	qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
1851*5113495bSYour Name 					(void *)(nbuf->data + 256));
1852*5113495bSYour Name 
1853*5113495bSYour Name 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
1854*5113495bSYour Name }
1855*5113495bSYour Name 
1856*5113495bSYour Name static inline
dp_tx_nbuf_unmap_be(struct dp_soc * soc,struct dp_tx_desc_s * desc)1857*5113495bSYour Name void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
1858*5113495bSYour Name 			 struct dp_tx_desc_s *desc)
1859*5113495bSYour Name {
1860*5113495bSYour Name }
1861*5113495bSYour Name 
1862*5113495bSYour Name #ifdef QCA_DP_TX_NBUF_LIST_FREE
dp_tx_fast_send_be(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t nbuf)1863*5113495bSYour Name qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1864*5113495bSYour Name 			      qdf_nbuf_t nbuf)
1865*5113495bSYour Name {
1866*5113495bSYour Name 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1867*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
1868*5113495bSYour Name 	struct dp_pdev *pdev = NULL;
1869*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
1870*5113495bSYour Name 	uint16_t desc_pool_id;
1871*5113495bSYour Name 	uint16_t pkt_len;
1872*5113495bSYour Name 	qdf_dma_addr_t paddr;
1873*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
1874*5113495bSYour Name 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
1875*5113495bSYour Name 	hal_ring_handle_t hal_ring_hdl = NULL;
1876*5113495bSYour Name 	uint32_t *hal_tx_desc_cached;
1877*5113495bSYour Name 	void *hal_tx_desc;
1878*5113495bSYour Name 	uint8_t tid = HTT_TX_EXT_TID_INVALID;
1879*5113495bSYour Name 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
1880*5113495bSYour Name 	uint8_t sawf_tid = HTT_TX_EXT_TID_INVALID;
1881*5113495bSYour Name 
1882*5113495bSYour Name 	if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
1883*5113495bSYour Name 		return nbuf;
1884*5113495bSYour Name 
1885*5113495bSYour Name 	vdev = soc->vdev_id_map[vdev_id];
1886*5113495bSYour Name 	if (qdf_unlikely(!vdev))
1887*5113495bSYour Name 		return nbuf;
1888*5113495bSYour Name 
1889*5113495bSYour Name 	desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
1890*5113495bSYour Name 
1891*5113495bSYour Name 	pkt_len = qdf_nbuf_headlen(nbuf);
1892*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, pkt_len);
1893*5113495bSYour Name 	DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_in_fast_xmit_flow, 1);
1894*5113495bSYour Name 	DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_per_core[desc_pool_id], 1);
1895*5113495bSYour Name 
1896*5113495bSYour Name 	pdev = vdev->pdev;
1897*5113495bSYour Name 	if (dp_tx_limit_check(vdev, nbuf))
1898*5113495bSYour Name 		return nbuf;
1899*5113495bSYour Name 
1900*5113495bSYour Name 	if (qdf_unlikely(vdev->skip_sw_tid_classification
1901*5113495bSYour Name 				& DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
1902*5113495bSYour Name 		tid = qdf_nbuf_get_priority(nbuf);
1903*5113495bSYour Name 
1904*5113495bSYour Name 		if (tid >= DP_TX_INVALID_QOS_TAG)
1905*5113495bSYour Name 			tid = HTT_TX_EXT_TID_INVALID;
1906*5113495bSYour Name 	}
1907*5113495bSYour Name 
1908*5113495bSYour Name 	tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
1909*5113495bSYour Name 
1910*5113495bSYour Name 	if (qdf_unlikely(!tx_desc)) {
1911*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.desc_na.num, 1);
1912*5113495bSYour Name 		DP_STATS_INC(vdev,
1913*5113495bSYour Name 			     tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1914*5113495bSYour Name 			     1);
1915*5113495bSYour Name 		return nbuf;
1916*5113495bSYour Name 	}
1917*5113495bSYour Name 
1918*5113495bSYour Name 	dp_tx_outstanding_inc(pdev);
1919*5113495bSYour Name 
1920*5113495bSYour Name 	/* Initialize the SW tx descriptor */
1921*5113495bSYour Name 	tx_desc->nbuf = nbuf;
1922*5113495bSYour Name 	tx_desc->frm_type = dp_tx_frm_std;
1923*5113495bSYour Name 	tx_desc->tx_encap_type = vdev->tx_encap_type;
1924*5113495bSYour Name 	tx_desc->vdev_id = vdev_id;
1925*5113495bSYour Name 	tx_desc->pdev = pdev;
1926*5113495bSYour Name 	tx_desc->pkt_offset = 0;
1927*5113495bSYour Name 	tx_desc->length = pkt_len;
1928*5113495bSYour Name 	tx_desc->flags |= pdev->tx_fast_flag;
1929*5113495bSYour Name 
1930*5113495bSYour Name 	tx_desc->nbuf->fast_recycled = 1;
1931*5113495bSYour Name 
1932*5113495bSYour Name 	if (nbuf->is_from_recycler && nbuf->fast_xmit)
1933*5113495bSYour Name 		tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
1934*5113495bSYour Name 
1935*5113495bSYour Name 	paddr =  dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
1936*5113495bSYour Name 	if (!paddr) {
1937*5113495bSYour Name 		/* Handle failure */
1938*5113495bSYour Name 		dp_err("qdf_nbuf_map failed");
1939*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.dma_error, 1);
1940*5113495bSYour Name 		goto release_desc;
1941*5113495bSYour Name 	}
1942*5113495bSYour Name 
1943*5113495bSYour Name 	tx_desc->dma_addr = paddr;
1944*5113495bSYour Name 
1945*5113495bSYour Name 	hal_tx_desc_cached = (void *)cached_desc;
1946*5113495bSYour Name 	hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
1947*5113495bSYour Name 	hal_tx_desc_cached[1] = tx_desc->id <<
1948*5113495bSYour Name 		TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
1949*5113495bSYour Name 
1950*5113495bSYour Name 	/* bank_id */
1951*5113495bSYour Name 	hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
1952*5113495bSYour Name 	hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
1953*5113495bSYour Name 		TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
1954*5113495bSYour Name 
1955*5113495bSYour Name 	hal_tx_desc_cached[4] = tx_desc->length;
1956*5113495bSYour Name 	/* l3 and l4 checksum enable */
1957*5113495bSYour Name 	hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
1958*5113495bSYour Name 		TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
1959*5113495bSYour Name 
1960*5113495bSYour Name 	hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
1961*5113495bSYour Name 	hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
1962*5113495bSYour Name 
1963*5113495bSYour Name 	if (qdf_unlikely(dp_sawf_tag_valid_get(nbuf))) {
1964*5113495bSYour Name 		sawf_tid = dp_sawf_config_be(soc, hal_tx_desc_cached,
1965*5113495bSYour Name 					     NULL, nbuf, NULL);
1966*5113495bSYour Name 		if (sawf_tid != HTT_TX_EXT_TID_INVALID)
1967*5113495bSYour Name 			tid = sawf_tid;
1968*5113495bSYour Name 	}
1969*5113495bSYour Name 
1970*5113495bSYour Name 	if (tid != HTT_TX_EXT_TID_INVALID) {
1971*5113495bSYour Name 		hal_tx_desc_cached[5] |= tid << TCL_DATA_CMD_HLOS_TID_LSB;
1972*5113495bSYour Name 		hal_tx_desc_cached[5] |= 1 << TCL_DATA_CMD_HLOS_TID_OVERWRITE_LSB;
1973*5113495bSYour Name 	}
1974*5113495bSYour Name 
1975*5113495bSYour Name 	if (vdev->opmode == wlan_op_mode_sta)
1976*5113495bSYour Name 		hal_tx_desc_cached[6] = vdev->bss_ast_idx |
1977*5113495bSYour Name 			((vdev->bss_ast_hash & 0xF) <<
1978*5113495bSYour Name 			 TCL_DATA_CMD_CACHE_SET_NUM_LSB);
1979*5113495bSYour Name 
1980*5113495bSYour Name 	hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
1981*5113495bSYour Name 
1982*5113495bSYour Name 	if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
1983*5113495bSYour Name 		dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
1984*5113495bSYour Name 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1985*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
1986*5113495bSYour Name 		goto ring_access_fail2;
1987*5113495bSYour Name 	}
1988*5113495bSYour Name 
1989*5113495bSYour Name 	hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
1990*5113495bSYour Name 	if (qdf_unlikely(!hal_tx_desc)) {
1991*5113495bSYour Name 		dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
1992*5113495bSYour Name 		DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
1993*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
1994*5113495bSYour Name 		goto ring_access_fail;
1995*5113495bSYour Name 	}
1996*5113495bSYour Name 
1997*5113495bSYour Name 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
1998*5113495bSYour Name 
1999*5113495bSYour Name 	/* Sync cached descriptor with HW */
2000*5113495bSYour Name 	qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
2001*5113495bSYour Name 	qdf_dsb();
2002*5113495bSYour Name 
2003*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[xmit_type].processed, 1, tx_desc->length);
2004*5113495bSYour Name 	DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
2005*5113495bSYour Name 	status = QDF_STATUS_SUCCESS;
2006*5113495bSYour Name 
2007*5113495bSYour Name ring_access_fail:
2008*5113495bSYour Name 	dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
2009*5113495bSYour Name 
2010*5113495bSYour Name ring_access_fail2:
2011*5113495bSYour Name 	if (status != QDF_STATUS_SUCCESS) {
2012*5113495bSYour Name 		dp_tx_nbuf_unmap_be(soc, tx_desc);
2013*5113495bSYour Name 		goto release_desc;
2014*5113495bSYour Name 	}
2015*5113495bSYour Name 
2016*5113495bSYour Name 	return NULL;
2017*5113495bSYour Name 
2018*5113495bSYour Name release_desc:
2019*5113495bSYour Name 	dp_tx_desc_release(soc, tx_desc, desc_pool_id);
2020*5113495bSYour Name 
2021*5113495bSYour Name 	return nbuf;
2022*5113495bSYour Name }
2023*5113495bSYour Name #endif
2024*5113495bSYour Name 
dp_tx_desc_pool_alloc_be(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)2025*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_alloc_be(struct dp_soc *soc, uint32_t num_elem,
2026*5113495bSYour Name 				    uint8_t pool_id)
2027*5113495bSYour Name {
2028*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2029*5113495bSYour Name }
2030*5113495bSYour Name 
dp_tx_desc_pool_free_be(struct dp_soc * soc,uint8_t pool_id)2031*5113495bSYour Name void dp_tx_desc_pool_free_be(struct dp_soc *soc, uint8_t pool_id)
2032*5113495bSYour Name {
2033*5113495bSYour Name }
2034