xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/rh/dp_rh_tx.c (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
3*5113495bSYour Name  *
4*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
5*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
6*5113495bSYour Name  * above copyright notice and this permission notice appear in all
7*5113495bSYour Name  * copies.
8*5113495bSYour Name  *
9*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
17*5113495bSYour Name  */
18*5113495bSYour Name #include "cdp_txrx_cmn_struct.h"
19*5113495bSYour Name #include "dp_types.h"
20*5113495bSYour Name #include "dp_tx.h"
21*5113495bSYour Name #include "dp_rh_tx.h"
22*5113495bSYour Name #include "dp_tx_desc.h"
23*5113495bSYour Name #include <dp_internal.h>
24*5113495bSYour Name #include <dp_htt.h>
25*5113495bSYour Name #include <hal_rh_api.h>
26*5113495bSYour Name #include <hal_rh_tx.h>
27*5113495bSYour Name #include "dp_peer.h"
28*5113495bSYour Name #include "dp_rh.h"
29*5113495bSYour Name #include <ce_api.h>
30*5113495bSYour Name #include <ce_internal.h>
31*5113495bSYour Name #include "dp_rh_htt.h"
32*5113495bSYour Name 
33*5113495bSYour Name extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
34*5113495bSYour Name 
35*5113495bSYour Name #if defined(FEATURE_TSO)
36*5113495bSYour Name /**
37*5113495bSYour Name  * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
38*5113495bSYour Name  * @nbuf: socket buffer
39*5113495bSYour Name  * @msdu_info: handle to struct dp_tx_msdu_info_s
40*5113495bSYour Name  * @download_len: Packet download length that needs adjustment
41*5113495bSYour Name  *
42*5113495bSYour Name  * Return: uint32_t (Adjusted packet download length)
43*5113495bSYour Name  */
44*5113495bSYour Name static uint32_t
dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint32_t download_len)45*5113495bSYour Name dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
46*5113495bSYour Name 				 struct dp_tx_msdu_info_s *msdu_info,
47*5113495bSYour Name 				 uint32_t download_len)
48*5113495bSYour Name {
49*5113495bSYour Name 	uint32_t frag0_len;
50*5113495bSYour Name 	uint32_t delta;
51*5113495bSYour Name 	uint32_t eit_hdr_len;
52*5113495bSYour Name 
53*5113495bSYour Name 	frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
54*5113495bSYour Name 	download_len -= frag0_len;
55*5113495bSYour Name 
56*5113495bSYour Name 	eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
57*5113495bSYour Name 
58*5113495bSYour Name 	/* If EIT header length is less than the MSDU download length, then
59*5113495bSYour Name 	 * adjust the download length to just hold EIT header.
60*5113495bSYour Name 	 */
61*5113495bSYour Name 	if (eit_hdr_len < download_len) {
62*5113495bSYour Name 		delta = download_len - eit_hdr_len;
63*5113495bSYour Name 		download_len -= delta;
64*5113495bSYour Name 	}
65*5113495bSYour Name 
66*5113495bSYour Name 	return download_len;
67*5113495bSYour Name }
68*5113495bSYour Name #else
69*5113495bSYour Name static uint32_t
dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint32_t download_len)70*5113495bSYour Name dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
71*5113495bSYour Name 				 struct dp_tx_msdu_info_s *msdu_info,
72*5113495bSYour Name 				 uint32_t download_len)
73*5113495bSYour Name {
74*5113495bSYour Name 	return download_len;
75*5113495bSYour Name }
76*5113495bSYour Name #endif /* FEATURE_TSO */
77*5113495bSYour Name 
78*5113495bSYour Name QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)79*5113495bSYour Name dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
80*5113495bSYour Name 				       void *tx_comp_hal_desc,
81*5113495bSYour Name 				       struct dp_tx_desc_s **r_tx_desc)
82*5113495bSYour Name {
83*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
84*5113495bSYour Name }
85*5113495bSYour Name 
86*5113495bSYour Name /**
87*5113495bSYour Name  * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
88*5113495bSYour Name  *
89*5113495bSYour Name  * @soc: Handle to DP SoC structure
90*5113495bSYour Name  * @sw_cookie: Key to find the TX descriptor
91*5113495bSYour Name  *
92*5113495bSYour Name  * Return: TX descriptor handle or NULL (if not found)
93*5113495bSYour Name  */
94*5113495bSYour Name static struct dp_tx_desc_s *
dp_tx_comp_find_tx_desc_rh(struct dp_soc * soc,uint32_t sw_cookie)95*5113495bSYour Name dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
96*5113495bSYour Name {
97*5113495bSYour Name 	uint8_t pool_id;
98*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
99*5113495bSYour Name 
100*5113495bSYour Name 	pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
101*5113495bSYour Name 			DP_TX_DESC_ID_POOL_OS;
102*5113495bSYour Name 
103*5113495bSYour Name 	/* Find Tx descriptor */
104*5113495bSYour Name 	tx_desc = dp_tx_desc_find(soc, pool_id,
105*5113495bSYour Name 				  (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
106*5113495bSYour Name 						DP_TX_DESC_ID_PAGE_OS,
107*5113495bSYour Name 				  (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
108*5113495bSYour Name 						DP_TX_DESC_ID_OFFSET_OS, false);
109*5113495bSYour Name 	/* pool id is not matching. Error */
110*5113495bSYour Name 	if (tx_desc && tx_desc->pool_id != pool_id) {
111*5113495bSYour Name 		dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
112*5113495bSYour Name 				 pool_id, tx_desc->pool_id);
113*5113495bSYour Name 
114*5113495bSYour Name 		qdf_assert_always(0);
115*5113495bSYour Name 	}
116*5113495bSYour Name 
117*5113495bSYour Name 	return tx_desc;
118*5113495bSYour Name }
119*5113495bSYour Name 
dp_tx_process_htt_completion_rh(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t ring_id)120*5113495bSYour Name void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
121*5113495bSYour Name 				     struct dp_tx_desc_s *tx_desc,
122*5113495bSYour Name 				     uint8_t *status,
123*5113495bSYour Name 				     uint8_t ring_id)
124*5113495bSYour Name {
125*5113495bSYour Name }
126*5113495bSYour Name 
127*5113495bSYour Name static inline uint32_t
dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf,uint32_t download_len)128*5113495bSYour Name dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
129*5113495bSYour Name {
130*5113495bSYour Name 	uint32_t frag0_len; /* TCL_DATA_CMD */
131*5113495bSYour Name 	uint32_t frag1_len; /* 64 byte payload */
132*5113495bSYour Name 
133*5113495bSYour Name 	frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
134*5113495bSYour Name 	frag1_len = download_len - frag0_len;
135*5113495bSYour Name 
136*5113495bSYour Name 	if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
137*5113495bSYour Name 		frag1_len = qdf_nbuf_len(nbuf);
138*5113495bSYour Name 
139*5113495bSYour Name 	return frag0_len + frag1_len;
140*5113495bSYour Name }
141*5113495bSYour Name 
dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)142*5113495bSYour Name static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
143*5113495bSYour Name {
144*5113495bSYour Name 	uint32_t pkt_offset;
145*5113495bSYour Name 	uint32_t tx_classify;
146*5113495bSYour Name 	uint32_t data_attr;
147*5113495bSYour Name 
148*5113495bSYour Name 	/* Enable tx_classify bit in CE SRC DESC for all data packets */
149*5113495bSYour Name 	tx_classify = 1;
150*5113495bSYour Name 	pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
151*5113495bSYour Name 
152*5113495bSYour Name 	data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
153*5113495bSYour Name 	data_attr |= pkt_offset  << CE_DESC_PKT_OFFSET_BIT_S;
154*5113495bSYour Name 
155*5113495bSYour Name 	qdf_nbuf_data_attr_set(nbuf, data_attr);
156*5113495bSYour Name }
157*5113495bSYour Name 
158*5113495bSYour Name #ifdef DP_TX_HW_DESC_HISTORY
159*5113495bSYour Name static inline void
dp_tx_record_hw_desc_rh(uint8_t * hal_tx_desc_cached,struct dp_soc * soc)160*5113495bSYour Name dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
161*5113495bSYour Name {
162*5113495bSYour Name 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
163*5113495bSYour Name 						&soc->tx_hw_desc_history;
164*5113495bSYour Name 	struct dp_tx_hw_desc_evt *evt;
165*5113495bSYour Name 	uint32_t idx = 0;
166*5113495bSYour Name 	uint16_t slot = 0;
167*5113495bSYour Name 
168*5113495bSYour Name 	if (!tx_hw_desc_history->allocated)
169*5113495bSYour Name 		return;
170*5113495bSYour Name 
171*5113495bSYour Name 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
172*5113495bSYour Name 					 &slot,
173*5113495bSYour Name 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
174*5113495bSYour Name 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
175*5113495bSYour Name 					 DP_TX_HW_DESC_HIST_MAX);
176*5113495bSYour Name 
177*5113495bSYour Name 	evt = &tx_hw_desc_history->entry[slot][idx];
178*5113495bSYour Name 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
179*5113495bSYour Name 	evt->posted = qdf_get_log_timestamp();
180*5113495bSYour Name 	evt->tcl_ring_id = 0;
181*5113495bSYour Name }
182*5113495bSYour Name #else
183*5113495bSYour Name static inline void
dp_tx_record_hw_desc_rh(uint8_t * hal_tx_desc_cached,struct dp_soc * soc)184*5113495bSYour Name dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
185*5113495bSYour Name {
186*5113495bSYour Name }
187*5113495bSYour Name #endif
188*5113495bSYour Name 
189*5113495bSYour Name #if defined(FEATURE_RUNTIME_PM)
dp_tx_update_write_index(struct dp_soc * soc,struct dp_tx_ep_info_rh * tx_ep_info,int coalesce)190*5113495bSYour Name static void dp_tx_update_write_index(struct dp_soc *soc,
191*5113495bSYour Name 				     struct dp_tx_ep_info_rh *tx_ep_info,
192*5113495bSYour Name 				     int coalesce)
193*5113495bSYour Name {
194*5113495bSYour Name 	int ret;
195*5113495bSYour Name 
196*5113495bSYour Name 	/* Avoid runtime get and put APIs under high throughput scenarios */
197*5113495bSYour Name 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
198*5113495bSYour Name 		ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
199*5113495bSYour Name 						    coalesce);
200*5113495bSYour Name 		return;
201*5113495bSYour Name 	}
202*5113495bSYour Name 
203*5113495bSYour Name 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
204*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(ret)) {
205*5113495bSYour Name 		if (hif_system_pm_state_check(soc->hif_handle)) {
206*5113495bSYour Name 			ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
207*5113495bSYour Name 					  CE_RING_FLUSH_EVENT);
208*5113495bSYour Name 			ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
209*5113495bSYour Name 		} else {
210*5113495bSYour Name 			ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
211*5113495bSYour Name 							    coalesce);
212*5113495bSYour Name 		}
213*5113495bSYour Name 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
214*5113495bSYour Name 	} else {
215*5113495bSYour Name 		dp_runtime_get(soc);
216*5113495bSYour Name 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
217*5113495bSYour Name 				  CE_RING_FLUSH_EVENT);
218*5113495bSYour Name 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
219*5113495bSYour Name 		qdf_atomic_inc(&soc->tx_pending_rtpm);
220*5113495bSYour Name 		dp_runtime_put(soc);
221*5113495bSYour Name 	}
222*5113495bSYour Name }
223*5113495bSYour Name #elif defined(DP_POWER_SAVE)
dp_tx_update_write_index(struct dp_soc * soc,struct dp_tx_ep_info_rh * tx_ep_info)224*5113495bSYour Name static void dp_tx_update_write_index(struct dp_soc *soc,
225*5113495bSYour Name 				     struct dp_tx_ep_info_rh *tx_ep_info)
226*5113495bSYour Name {
227*5113495bSYour Name 	if (hif_system_pm_state_check(soc->hif_handle)) {
228*5113495bSYour Name 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
229*5113495bSYour Name 				  CE_RING_FLUSH_EVENT);
230*5113495bSYour Name 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
231*5113495bSYour Name 	} else {
232*5113495bSYour Name 		ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
233*5113495bSYour Name 						    coalesce);
234*5113495bSYour Name 	}
235*5113495bSYour Name }
236*5113495bSYour Name #else
dp_tx_update_write_index(struct dp_soc * soc,struct dp_tx_ep_info_rh * tx_ep_info)237*5113495bSYour Name static void dp_tx_update_write_index(struct dp_soc *soc,
238*5113495bSYour Name 				     struct dp_tx_ep_info_rh *tx_ep_info)
239*5113495bSYour Name {
240*5113495bSYour Name 	ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
241*5113495bSYour Name 					    coalesce);
242*5113495bSYour Name }
243*5113495bSYour Name #endif
244*5113495bSYour Name 
245*5113495bSYour Name /*
246*5113495bSYour Name  * dp_flush_tx_ring_rh() - flush tx ring write index
247*5113495bSYour Name  * @pdev: dp pdev handle
248*5113495bSYour Name  * @ring_id: Tx ring id
249*5113495bSYour Name  *
250*5113495bSYour Name  * Return: 0 on success and error code on failure
251*5113495bSYour Name  */
dp_flush_tx_ring_rh(struct dp_pdev * pdev,int ring_id)252*5113495bSYour Name int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
253*5113495bSYour Name {
254*5113495bSYour Name 	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
255*5113495bSYour Name 	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
256*5113495bSYour Name 	int ret;
257*5113495bSYour Name 
258*5113495bSYour Name 	ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
259*5113495bSYour Name 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
260*5113495bSYour Name 	if (ret) {
261*5113495bSYour Name 		ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
262*5113495bSYour Name 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
263*5113495bSYour Name 				  CE_RING_FLUSH_EVENT);
264*5113495bSYour Name 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
265*5113495bSYour Name 		return ret;
266*5113495bSYour Name 	}
267*5113495bSYour Name 
268*5113495bSYour Name 	ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
269*5113495bSYour Name 	ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
270*5113495bSYour Name 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
271*5113495bSYour Name 
272*5113495bSYour Name 	return ret;
273*5113495bSYour Name }
274*5113495bSYour Name 
275*5113495bSYour Name QDF_STATUS
dp_tx_hw_enqueue_rh(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint16_t fw_metadata,struct cdp_tx_exception_metadata * tx_exc_metadata,struct dp_tx_msdu_info_s * msdu_info)276*5113495bSYour Name dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
277*5113495bSYour Name 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
278*5113495bSYour Name 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
279*5113495bSYour Name 		    struct dp_tx_msdu_info_s *msdu_info)
280*5113495bSYour Name {
281*5113495bSYour Name 	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
282*5113495bSYour Name 	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
283*5113495bSYour Name 	uint32_t download_len = tx_ep_info->download_len;
284*5113495bSYour Name 	qdf_nbuf_t nbuf = tx_desc->nbuf;
285*5113495bSYour Name 	uint8_t tid = msdu_info->tid;
286*5113495bSYour Name 	uint32_t *hal_tx_desc_cached;
287*5113495bSYour Name 	int coalesce = 0;
288*5113495bSYour Name 	int ret;
289*5113495bSYour Name 
290*5113495bSYour Name 	/*
291*5113495bSYour Name 	 * Setting it initialization statically here to avoid
292*5113495bSYour Name 	 * a memset call jump with qdf_mem_set call
293*5113495bSYour Name 	 */
294*5113495bSYour Name 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
295*5113495bSYour Name 
296*5113495bSYour Name 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
297*5113495bSYour Name 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
298*5113495bSYour Name 			tx_exc_metadata->sec_type : vdev->sec_type);
299*5113495bSYour Name 
300*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
301*5113495bSYour Name 
302*5113495bSYour Name 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
303*5113495bSYour Name 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
304*5113495bSYour Name 		return QDF_STATUS_E_RESOURCES;
305*5113495bSYour Name 	}
306*5113495bSYour Name 
307*5113495bSYour Name 	hal_tx_desc_cached = (void *)cached_desc;
308*5113495bSYour Name 
309*5113495bSYour Name 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
310*5113495bSYour Name 				 tx_desc->dma_addr, 0, tx_desc->id,
311*5113495bSYour Name 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
312*5113495bSYour Name 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
313*5113495bSYour Name 				vdev->lmac_id);
314*5113495bSYour Name 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
315*5113495bSYour Name 				    vdev->search_type);
316*5113495bSYour Name 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
317*5113495bSYour Name 				     vdev->bss_ast_idx);
318*5113495bSYour Name 
319*5113495bSYour Name 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
320*5113495bSYour Name 				     sec_type_map[sec_type]);
321*5113495bSYour Name 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
322*5113495bSYour Name 				      (vdev->bss_ast_hash & 0xF));
323*5113495bSYour Name 
324*5113495bSYour Name 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
325*5113495bSYour Name 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
326*5113495bSYour Name 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
327*5113495bSYour Name 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
328*5113495bSYour Name 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
329*5113495bSYour Name 					  vdev->hal_desc_addr_search_flags);
330*5113495bSYour Name 
331*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
332*5113495bSYour Name 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
333*5113495bSYour Name 
334*5113495bSYour Name 	/* verify checksum offload configuration*/
335*5113495bSYour Name 	if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
336*5113495bSYour Name 	    qdf_nbuf_is_tso(nbuf))  {
337*5113495bSYour Name 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
338*5113495bSYour Name 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
339*5113495bSYour Name 	}
340*5113495bSYour Name 
341*5113495bSYour Name 	if (tid != HTT_TX_EXT_TID_INVALID)
342*5113495bSYour Name 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
343*5113495bSYour Name 
344*5113495bSYour Name 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
345*5113495bSYour Name 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
346*5113495bSYour Name 
347*5113495bSYour Name 	if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
348*5113495bSYour Name 		dp_tx_desc_set_timestamp(tx_desc);
349*5113495bSYour Name 
350*5113495bSYour Name 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
351*5113495bSYour Name 			 tx_desc->length,
352*5113495bSYour Name 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
353*5113495bSYour Name 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
354*5113495bSYour Name 			 tx_desc->id);
355*5113495bSYour Name 
356*5113495bSYour Name 	hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
357*5113495bSYour Name 
358*5113495bSYour Name 	qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
359*5113495bSYour Name 				(char *)tx_desc->tcl_cmd_vaddr,
360*5113495bSYour Name 				tx_desc->tcl_cmd_paddr);
361*5113495bSYour Name 
362*5113495bSYour Name 	download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
363*5113495bSYour Name 
364*5113495bSYour Name 	if (qdf_nbuf_is_tso(nbuf)) {
365*5113495bSYour Name 		QDF_NBUF_CB_PADDR(nbuf) =
366*5113495bSYour Name 			msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
367*5113495bSYour Name 		download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
368*5113495bSYour Name 								download_len);
369*5113495bSYour Name 	}
370*5113495bSYour Name 
371*5113495bSYour Name 	dp_tx_fill_nbuf_data_attr_rh(nbuf);
372*5113495bSYour Name 
373*5113495bSYour Name 	ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
374*5113495bSYour Name 	ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
375*5113495bSYour Name 			      tx_ep_info->tx_endpoint, download_len);
376*5113495bSYour Name 	if (ret) {
377*5113495bSYour Name 		ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
378*5113495bSYour Name 		dp_verbose_debug("CE tx ring full");
379*5113495bSYour Name 		/* TODO: Should this be a separate ce_ring_full stat? */
380*5113495bSYour Name 		DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
381*5113495bSYour Name 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail, 1);
382*5113495bSYour Name 		goto enqueue_fail;
383*5113495bSYour Name 	}
384*5113495bSYour Name 
385*5113495bSYour Name 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
386*5113495bSYour Name 					    msdu_info, 0);
387*5113495bSYour Name 
388*5113495bSYour Name 	dp_tx_update_write_index(soc, tx_ep_info, coalesce);
389*5113495bSYour Name 	ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
390*5113495bSYour Name 
391*5113495bSYour Name 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
392*5113495bSYour Name 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
393*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
394*5113495bSYour Name 			 tx_desc->length);
395*5113495bSYour Name 	DP_STATS_INC(soc, tx.tcl_enq[0], 1);
396*5113495bSYour Name 
397*5113495bSYour Name 	dp_tx_update_stats(soc, tx_desc, 0);
398*5113495bSYour Name 	status = QDF_STATUS_SUCCESS;
399*5113495bSYour Name 
400*5113495bSYour Name 	dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
401*5113495bSYour Name 
402*5113495bSYour Name enqueue_fail:
403*5113495bSYour Name 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
404*5113495bSYour Name 			     qdf_get_log_timestamp(), tx_desc->nbuf);
405*5113495bSYour Name 
406*5113495bSYour Name 	return status;
407*5113495bSYour Name }
408*5113495bSYour Name 
409*5113495bSYour Name /**
410*5113495bSYour Name  * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
411*5113495bSYour Name  *				    based on pool_id
412*5113495bSYour Name  * @soc: Handle to DP SoC structure
413*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
414*5113495bSYour Name  * @pool_id: Pool to allocate
415*5113495bSYour Name  *
416*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
417*5113495bSYour Name  *	   QDF_STATUS_E_NOMEM
418*5113495bSYour Name  */
419*5113495bSYour Name static QDF_STATUS
dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)420*5113495bSYour Name dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
421*5113495bSYour Name 			     uint8_t pool_id)
422*5113495bSYour Name {
423*5113495bSYour Name 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
424*5113495bSYour Name 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
425*5113495bSYour Name 	uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
426*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
427*5113495bSYour Name 	qdf_dma_context_t memctx = 0;
428*5113495bSYour Name 
429*5113495bSYour Name 	if (pool_id > MAX_TXDESC_POOLS - 1)
430*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
431*5113495bSYour Name 
432*5113495bSYour Name 	/* Allocate tcl descriptors in coherent memory */
433*5113495bSYour Name 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
434*5113495bSYour Name 	memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
435*5113495bSYour Name 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TCL_DESC_TYPE,
436*5113495bSYour Name 				      &tcl_desc_pool->desc_pages,
437*5113495bSYour Name 				      elem_size, num_elem, memctx, false);
438*5113495bSYour Name 
439*5113495bSYour Name 	if (!tcl_desc_pool->desc_pages.num_pages) {
440*5113495bSYour Name 		dp_err("failed to allocate tcl desc Pages");
441*5113495bSYour Name 		status = QDF_STATUS_E_NOMEM;
442*5113495bSYour Name 		goto err_alloc_fail;
443*5113495bSYour Name 	}
444*5113495bSYour Name 
445*5113495bSYour Name 	return status;
446*5113495bSYour Name 
447*5113495bSYour Name err_alloc_fail:
448*5113495bSYour Name 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
449*5113495bSYour Name 				     &tcl_desc_pool->desc_pages,
450*5113495bSYour Name 				     memctx, false);
451*5113495bSYour Name 	return status;
452*5113495bSYour Name }
453*5113495bSYour Name 
454*5113495bSYour Name /**
455*5113495bSYour Name  * dp_tx_tcl_desc_pool_free_rh() -  Free the tcl descriptor pool
456*5113495bSYour Name  * @soc: Handle to DP SoC structure
457*5113495bSYour Name  * @pool_id: pool to free
458*5113495bSYour Name  *
459*5113495bSYour Name  */
dp_tx_tcl_desc_pool_free_rh(struct dp_soc * soc,uint8_t pool_id)460*5113495bSYour Name static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
461*5113495bSYour Name {
462*5113495bSYour Name 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
463*5113495bSYour Name 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
464*5113495bSYour Name 	qdf_dma_context_t memctx = 0;
465*5113495bSYour Name 
466*5113495bSYour Name 	if (pool_id > MAX_TXDESC_POOLS - 1)
467*5113495bSYour Name 		return;
468*5113495bSYour Name 
469*5113495bSYour Name 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
470*5113495bSYour Name 	memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
471*5113495bSYour Name 
472*5113495bSYour Name 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
473*5113495bSYour Name 				     &tcl_desc_pool->desc_pages,
474*5113495bSYour Name 				     memctx, false);
475*5113495bSYour Name }
476*5113495bSYour Name 
477*5113495bSYour Name /**
478*5113495bSYour Name  * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
479*5113495bSYour Name  *				   based on pool_id
480*5113495bSYour Name  * @soc: Handle to DP SoC structure
481*5113495bSYour Name  * @num_elem: Number of descriptor elements per pool
482*5113495bSYour Name  * @pool_id: pool to initialize
483*5113495bSYour Name  *
484*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
485*5113495bSYour Name  *	   QDF_STATUS_E_FAULT
486*5113495bSYour Name  */
487*5113495bSYour Name static QDF_STATUS
dp_tx_tcl_desc_pool_init_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)488*5113495bSYour Name dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
489*5113495bSYour Name 			    uint8_t pool_id)
490*5113495bSYour Name {
491*5113495bSYour Name 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
492*5113495bSYour Name 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
493*5113495bSYour Name 	struct qdf_mem_dma_page_t *page_info;
494*5113495bSYour Name 	QDF_STATUS status;
495*5113495bSYour Name 
496*5113495bSYour Name 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
497*5113495bSYour Name 	tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
498*5113495bSYour Name 	tcl_desc_pool->elem_count = num_elem;
499*5113495bSYour Name 
500*5113495bSYour Name 	/* Link tcl descriptors into a freelist */
501*5113495bSYour Name 	if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
502*5113495bSYour Name 				    tcl_desc_pool->elem_size,
503*5113495bSYour Name 				    tcl_desc_pool->elem_count,
504*5113495bSYour Name 				    false)) {
505*5113495bSYour Name 		dp_err("failed to link tcl desc Pages");
506*5113495bSYour Name 		status = QDF_STATUS_E_FAULT;
507*5113495bSYour Name 		goto err_link_fail;
508*5113495bSYour Name 	}
509*5113495bSYour Name 
510*5113495bSYour Name 	page_info = tcl_desc_pool->desc_pages.dma_pages;
511*5113495bSYour Name 	tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
512*5113495bSYour Name 
513*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
514*5113495bSYour Name 
515*5113495bSYour Name err_link_fail:
516*5113495bSYour Name 	return status;
517*5113495bSYour Name }
518*5113495bSYour Name 
519*5113495bSYour Name /**
520*5113495bSYour Name  * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
521*5113495bSYour Name  *				     based on pool_id
522*5113495bSYour Name  * @soc: Handle to DP SoC structure
523*5113495bSYour Name  * @pool_id: pool to de-initialize
524*5113495bSYour Name  *
525*5113495bSYour Name  */
dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc * soc,uint8_t pool_id)526*5113495bSYour Name static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
527*5113495bSYour Name {
528*5113495bSYour Name }
529*5113495bSYour Name 
530*5113495bSYour Name /**
531*5113495bSYour Name  * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
532*5113495bSYour Name  * @tcl_desc_pool: Tcl descriptor pool
533*5113495bSYour Name  * @tx_desc: SW TX descriptor
534*5113495bSYour Name  * @index: Index into the tcl descriptor pool
535*5113495bSYour Name  */
dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s * tcl_desc_pool,struct dp_tx_desc_s * tx_desc,uint32_t index)536*5113495bSYour Name static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
537*5113495bSYour Name 				    struct dp_tx_desc_s *tx_desc,
538*5113495bSYour Name 				    uint32_t index)
539*5113495bSYour Name {
540*5113495bSYour Name 	struct qdf_mem_dma_page_t *dma_page;
541*5113495bSYour Name 	uint32_t page_id;
542*5113495bSYour Name 	uint32_t offset;
543*5113495bSYour Name 
544*5113495bSYour Name 	tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
545*5113495bSYour Name 
546*5113495bSYour Name 	if (tcl_desc_pool->freelist)
547*5113495bSYour Name 		tcl_desc_pool->freelist =
548*5113495bSYour Name 			*((uint32_t **)tcl_desc_pool->freelist);
549*5113495bSYour Name 
550*5113495bSYour Name 	page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
551*5113495bSYour Name 	offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
552*5113495bSYour Name 	dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
553*5113495bSYour Name 
554*5113495bSYour Name 	tx_desc->tcl_cmd_paddr =
555*5113495bSYour Name 		dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
556*5113495bSYour Name }
557*5113495bSYour Name 
dp_tx_desc_pool_init_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id,bool spcl_tx_desc)558*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
559*5113495bSYour Name 				   uint32_t num_elem,
560*5113495bSYour Name 				   uint8_t pool_id,
561*5113495bSYour Name 				   bool spcl_tx_desc)
562*5113495bSYour Name {
563*5113495bSYour Name 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
564*5113495bSYour Name 	uint32_t id, count, page_id, offset, pool_id_32;
565*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc;
566*5113495bSYour Name 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
567*5113495bSYour Name 	struct dp_tx_desc_pool_s *tx_desc_pool;
568*5113495bSYour Name 	uint16_t num_desc_per_page;
569*5113495bSYour Name 	QDF_STATUS status;
570*5113495bSYour Name 
571*5113495bSYour Name 	status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
572*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
573*5113495bSYour Name 		dp_err("failed to initialise tcl desc pool %d", pool_id);
574*5113495bSYour Name 		goto err_out;
575*5113495bSYour Name 	}
576*5113495bSYour Name 
577*5113495bSYour Name 	status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
578*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
579*5113495bSYour Name 		dp_err("failed to initialise tx ext desc pool %d", pool_id);
580*5113495bSYour Name 		goto err_deinit_tcl_pool;
581*5113495bSYour Name 	}
582*5113495bSYour Name 
583*5113495bSYour Name 	status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
584*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
585*5113495bSYour Name 		dp_err("failed to initialise tso desc pool %d", pool_id);
586*5113495bSYour Name 		goto err_deinit_tx_ext_pool;
587*5113495bSYour Name 	}
588*5113495bSYour Name 
589*5113495bSYour Name 	status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
590*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
591*5113495bSYour Name 		dp_err("failed to initialise tso num seg pool %d", pool_id);
592*5113495bSYour Name 		goto err_deinit_tso_pool;
593*5113495bSYour Name 	}
594*5113495bSYour Name 
595*5113495bSYour Name 	tx_desc_pool = &soc->tx_desc[pool_id];
596*5113495bSYour Name 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
597*5113495bSYour Name 	tx_desc = tx_desc_pool->freelist;
598*5113495bSYour Name 	count = 0;
599*5113495bSYour Name 	pool_id_32 = (uint32_t)pool_id;
600*5113495bSYour Name 	num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
601*5113495bSYour Name 	while (tx_desc) {
602*5113495bSYour Name 		page_id = count / num_desc_per_page;
603*5113495bSYour Name 		offset = count % num_desc_per_page;
604*5113495bSYour Name 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
605*5113495bSYour Name 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
606*5113495bSYour Name 
607*5113495bSYour Name 		tx_desc->id = id;
608*5113495bSYour Name 		tx_desc->pool_id = pool_id;
609*5113495bSYour Name 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
610*5113495bSYour Name 		dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
611*5113495bSYour Name 		tx_desc = tx_desc->next;
612*5113495bSYour Name 		count++;
613*5113495bSYour Name 	}
614*5113495bSYour Name 
615*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
616*5113495bSYour Name 
617*5113495bSYour Name err_deinit_tso_pool:
618*5113495bSYour Name 	dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
619*5113495bSYour Name err_deinit_tx_ext_pool:
620*5113495bSYour Name 	dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
621*5113495bSYour Name err_deinit_tcl_pool:
622*5113495bSYour Name 	dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
623*5113495bSYour Name err_out:
624*5113495bSYour Name 	/* TODO: is assert needed ? */
625*5113495bSYour Name 	qdf_assert_always(0);
626*5113495bSYour Name 	return status;
627*5113495bSYour Name }
628*5113495bSYour Name 
dp_tx_desc_pool_deinit_rh(struct dp_soc * soc,struct dp_tx_desc_pool_s * tx_desc_pool,uint8_t pool_id,bool spcl_tx_desc)629*5113495bSYour Name void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
630*5113495bSYour Name 			       struct dp_tx_desc_pool_s *tx_desc_pool,
631*5113495bSYour Name 			       uint8_t pool_id, bool spcl_tx_desc)
632*5113495bSYour Name {
633*5113495bSYour Name 	dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
634*5113495bSYour Name 	dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
635*5113495bSYour Name 	dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
636*5113495bSYour Name 	dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
637*5113495bSYour Name }
638*5113495bSYour Name 
dp_tx_compute_tx_delay_rh(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)639*5113495bSYour Name QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
640*5113495bSYour Name 				     struct dp_vdev *vdev,
641*5113495bSYour Name 				     struct hal_tx_completion_status *ts,
642*5113495bSYour Name 				     uint32_t *delay_us)
643*5113495bSYour Name {
644*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
645*5113495bSYour Name }
646*5113495bSYour Name 
dp_tx_desc_pool_alloc_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)647*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
648*5113495bSYour Name 				    uint8_t pool_id)
649*5113495bSYour Name {
650*5113495bSYour Name 	QDF_STATUS status;
651*5113495bSYour Name 
652*5113495bSYour Name 	status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
653*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
654*5113495bSYour Name 		dp_err("failed to allocate tcl desc pool %d", pool_id);
655*5113495bSYour Name 		goto err_tcl_desc_pool;
656*5113495bSYour Name 	}
657*5113495bSYour Name 
658*5113495bSYour Name 	status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
659*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
660*5113495bSYour Name 		dp_err("failed to allocate tx ext desc pool %d", pool_id);
661*5113495bSYour Name 		goto err_free_tcl_pool;
662*5113495bSYour Name 	}
663*5113495bSYour Name 
664*5113495bSYour Name 	status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
665*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
666*5113495bSYour Name 		dp_err("failed to allocate tso desc pool %d", pool_id);
667*5113495bSYour Name 		goto err_free_tx_ext_pool;
668*5113495bSYour Name 	}
669*5113495bSYour Name 
670*5113495bSYour Name 	status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
671*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
672*5113495bSYour Name 		dp_err("failed to allocate tso num seg pool %d", pool_id);
673*5113495bSYour Name 		goto err_free_tso_pool;
674*5113495bSYour Name 	}
675*5113495bSYour Name 
676*5113495bSYour Name 	return status;
677*5113495bSYour Name 
678*5113495bSYour Name err_free_tso_pool:
679*5113495bSYour Name 	dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
680*5113495bSYour Name err_free_tx_ext_pool:
681*5113495bSYour Name 	dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
682*5113495bSYour Name err_free_tcl_pool:
683*5113495bSYour Name 	dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
684*5113495bSYour Name err_tcl_desc_pool:
685*5113495bSYour Name 	/* TODO: is assert needed ? */
686*5113495bSYour Name 	qdf_assert_always(0);
687*5113495bSYour Name 	return status;
688*5113495bSYour Name }
689*5113495bSYour Name 
dp_tx_desc_pool_free_rh(struct dp_soc * soc,uint8_t pool_id)690*5113495bSYour Name void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
691*5113495bSYour Name {
692*5113495bSYour Name 	dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
693*5113495bSYour Name 	dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
694*5113495bSYour Name 	dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
695*5113495bSYour Name 	dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
696*5113495bSYour Name }
697*5113495bSYour Name 
dp_tx_compl_handler_rh(struct dp_soc * soc,qdf_nbuf_t htt_msg)698*5113495bSYour Name void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
699*5113495bSYour Name {
700*5113495bSYour Name 	struct dp_tx_desc_s *tx_desc = NULL;
701*5113495bSYour Name 	struct dp_tx_desc_s *head_desc = NULL;
702*5113495bSYour Name 	struct dp_tx_desc_s *tail_desc = NULL;
703*5113495bSYour Name 	uint32_t sw_cookie;
704*5113495bSYour Name 	uint32_t num_msdus;
705*5113495bSYour Name 	uint32_t *msg_word;
706*5113495bSYour Name 	uint8_t ring_id;
707*5113495bSYour Name 	uint8_t tx_status;
708*5113495bSYour Name 	int i;
709*5113495bSYour Name 
710*5113495bSYour Name 	DP_HIST_INIT();
711*5113495bSYour Name 
712*5113495bSYour Name 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
713*5113495bSYour Name 	num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
714*5113495bSYour Name 	msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
715*5113495bSYour Name 
716*5113495bSYour Name 	for (i = 0; i < num_msdus; i++) {
717*5113495bSYour Name 		sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
718*5113495bSYour Name 
719*5113495bSYour Name 		tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
720*5113495bSYour Name 		if (!tx_desc) {
721*5113495bSYour Name 			dp_err("failed to find tx desc");
722*5113495bSYour Name 			qdf_assert_always(0);
723*5113495bSYour Name 		}
724*5113495bSYour Name 
725*5113495bSYour Name 		/*
726*5113495bSYour Name 		 * If the descriptor is already freed in vdev_detach,
727*5113495bSYour Name 		 * continue to next descriptor
728*5113495bSYour Name 		 */
729*5113495bSYour Name 		if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
730*5113495bSYour Name 				 !tx_desc->flags)) {
731*5113495bSYour Name 			dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
732*5113495bSYour Name 					   tx_desc->id);
733*5113495bSYour Name 			DP_STATS_INC(soc, tx.tx_comp_exception, 1);
734*5113495bSYour Name 			dp_tx_desc_check_corruption(tx_desc);
735*5113495bSYour Name 			goto next_msdu;
736*5113495bSYour Name 		}
737*5113495bSYour Name 
738*5113495bSYour Name 		if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
739*5113495bSYour Name 			dp_tx_comp_info_rl("pdev in down state %d",
740*5113495bSYour Name 					   tx_desc->id);
741*5113495bSYour Name 			tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
742*5113495bSYour Name 			dp_tx_comp_free_buf(soc, tx_desc, false);
743*5113495bSYour Name 			dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
744*5113495bSYour Name 			goto next_msdu;
745*5113495bSYour Name 		}
746*5113495bSYour Name 
747*5113495bSYour Name 		if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
748*5113495bSYour Name 		    !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
749*5113495bSYour Name 			dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
750*5113495bSYour Name 					 tx_desc->flags, tx_desc->id);
751*5113495bSYour Name 			qdf_assert_always(0);
752*5113495bSYour Name 		}
753*5113495bSYour Name 
754*5113495bSYour Name 		if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
755*5113495bSYour Name 		    HTT_TX_MSDU_RELEASE_SOURCE_FW)
756*5113495bSYour Name 			tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
757*5113495bSYour Name 		else
758*5113495bSYour Name 			tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
759*5113495bSYour Name 
760*5113495bSYour Name 		tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
761*5113495bSYour Name 		tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
762*5113495bSYour Name 
763*5113495bSYour Name 		tx_desc->tx_status =
764*5113495bSYour Name 			(tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
765*5113495bSYour Name 			 HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
766*5113495bSYour Name 
767*5113495bSYour Name 		qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
768*5113495bSYour Name 
769*5113495bSYour Name 		DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
770*5113495bSYour Name 
771*5113495bSYour Name 		/* First ring descriptor on the cycle */
772*5113495bSYour Name 		if (!head_desc) {
773*5113495bSYour Name 			head_desc = tx_desc;
774*5113495bSYour Name 			tail_desc = tx_desc;
775*5113495bSYour Name 		}
776*5113495bSYour Name 
777*5113495bSYour Name 		tail_desc->next = tx_desc;
778*5113495bSYour Name 		tx_desc->next = NULL;
779*5113495bSYour Name 		tail_desc = tx_desc;
780*5113495bSYour Name next_msdu:
781*5113495bSYour Name 		msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
782*5113495bSYour Name 	}
783*5113495bSYour Name 
784*5113495bSYour Name 	/* For now, pass ring_id as 0 (zero) as WCN6450 only
785*5113495bSYour Name 	 * supports one TX ring.
786*5113495bSYour Name 	 */
787*5113495bSYour Name 	ring_id = 0;
788*5113495bSYour Name 
789*5113495bSYour Name 	if (head_desc)
790*5113495bSYour Name 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
791*5113495bSYour Name 
792*5113495bSYour Name 	DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
793*5113495bSYour Name 	DP_TX_HIST_STATS_PER_PDEV();
794*5113495bSYour Name }
795