xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/rh/dp_rh_tx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include "cdp_txrx_cmn_struct.h"
19 #include "dp_types.h"
20 #include "dp_tx.h"
21 #include "dp_rh_tx.h"
22 #include "dp_tx_desc.h"
23 #include <dp_internal.h>
24 #include <dp_htt.h>
25 #include <hal_rh_api.h>
26 #include <hal_rh_tx.h>
27 #include "dp_peer.h"
28 #include "dp_rh.h"
29 #include <ce_api.h>
30 #include <ce_internal.h>
31 #include "dp_rh_htt.h"
32 
33 extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
34 
35 #if defined(FEATURE_TSO)
36 /**
37  * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
38  * @nbuf: socket buffer
39  * @msdu_info: handle to struct dp_tx_msdu_info_s
40  * @download_len: Packet download length that needs adjustment
41  *
42  * Return: uint32_t (Adjusted packet download length)
43  */
44 static uint32_t
dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint32_t download_len)45 dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
46 				 struct dp_tx_msdu_info_s *msdu_info,
47 				 uint32_t download_len)
48 {
49 	uint32_t frag0_len;
50 	uint32_t delta;
51 	uint32_t eit_hdr_len;
52 
53 	frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
54 	download_len -= frag0_len;
55 
56 	eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
57 
58 	/* If EIT header length is less than the MSDU download length, then
59 	 * adjust the download length to just hold EIT header.
60 	 */
61 	if (eit_hdr_len < download_len) {
62 		delta = download_len - eit_hdr_len;
63 		download_len -= delta;
64 	}
65 
66 	return download_len;
67 }
68 #else
69 static uint32_t
dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info,uint32_t download_len)70 dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
71 				 struct dp_tx_msdu_info_s *msdu_info,
72 				 uint32_t download_len)
73 {
74 	return download_len;
75 }
76 #endif /* FEATURE_TSO */
77 
78 QDF_STATUS
dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc * soc,void * tx_comp_hal_desc,struct dp_tx_desc_s ** r_tx_desc)79 dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
80 				       void *tx_comp_hal_desc,
81 				       struct dp_tx_desc_s **r_tx_desc)
82 {
83 	return QDF_STATUS_SUCCESS;
84 }
85 
86 /**
87  * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
88  *
89  * @soc: Handle to DP SoC structure
90  * @sw_cookie: Key to find the TX descriptor
91  *
92  * Return: TX descriptor handle or NULL (if not found)
93  */
94 static struct dp_tx_desc_s *
dp_tx_comp_find_tx_desc_rh(struct dp_soc * soc,uint32_t sw_cookie)95 dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
96 {
97 	uint8_t pool_id;
98 	struct dp_tx_desc_s *tx_desc;
99 
100 	pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
101 			DP_TX_DESC_ID_POOL_OS;
102 
103 	/* Find Tx descriptor */
104 	tx_desc = dp_tx_desc_find(soc, pool_id,
105 				  (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
106 						DP_TX_DESC_ID_PAGE_OS,
107 				  (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
108 						DP_TX_DESC_ID_OFFSET_OS, false);
109 	/* pool id is not matching. Error */
110 	if (tx_desc && tx_desc->pool_id != pool_id) {
111 		dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
112 				 pool_id, tx_desc->pool_id);
113 
114 		qdf_assert_always(0);
115 	}
116 
117 	return tx_desc;
118 }
119 
dp_tx_process_htt_completion_rh(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t * status,uint8_t ring_id)120 void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
121 				     struct dp_tx_desc_s *tx_desc,
122 				     uint8_t *status,
123 				     uint8_t ring_id)
124 {
125 }
126 
127 static inline uint32_t
dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf,uint32_t download_len)128 dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
129 {
130 	uint32_t frag0_len; /* TCL_DATA_CMD */
131 	uint32_t frag1_len; /* 64 byte payload */
132 
133 	frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
134 	frag1_len = download_len - frag0_len;
135 
136 	if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
137 		frag1_len = qdf_nbuf_len(nbuf);
138 
139 	return frag0_len + frag1_len;
140 }
141 
dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)142 static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
143 {
144 	uint32_t pkt_offset;
145 	uint32_t tx_classify;
146 	uint32_t data_attr;
147 
148 	/* Enable tx_classify bit in CE SRC DESC for all data packets */
149 	tx_classify = 1;
150 	pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
151 
152 	data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
153 	data_attr |= pkt_offset  << CE_DESC_PKT_OFFSET_BIT_S;
154 
155 	qdf_nbuf_data_attr_set(nbuf, data_attr);
156 }
157 
158 #ifdef DP_TX_HW_DESC_HISTORY
159 static inline void
dp_tx_record_hw_desc_rh(uint8_t * hal_tx_desc_cached,struct dp_soc * soc)160 dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
161 {
162 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
163 						&soc->tx_hw_desc_history;
164 	struct dp_tx_hw_desc_evt *evt;
165 	uint32_t idx = 0;
166 	uint16_t slot = 0;
167 
168 	if (!tx_hw_desc_history->allocated)
169 		return;
170 
171 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
172 					 &slot,
173 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
174 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
175 					 DP_TX_HW_DESC_HIST_MAX);
176 
177 	evt = &tx_hw_desc_history->entry[slot][idx];
178 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
179 	evt->posted = qdf_get_log_timestamp();
180 	evt->tcl_ring_id = 0;
181 }
182 #else
183 static inline void
dp_tx_record_hw_desc_rh(uint8_t * hal_tx_desc_cached,struct dp_soc * soc)184 dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
185 {
186 }
187 #endif
188 
189 #if defined(FEATURE_RUNTIME_PM)
dp_tx_update_write_index(struct dp_soc * soc,struct dp_tx_ep_info_rh * tx_ep_info,int coalesce)190 static void dp_tx_update_write_index(struct dp_soc *soc,
191 				     struct dp_tx_ep_info_rh *tx_ep_info,
192 				     int coalesce)
193 {
194 	int ret;
195 
196 	/* Avoid runtime get and put APIs under high throughput scenarios */
197 	if (dp_get_rtpm_tput_policy_requirement(soc)) {
198 		ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
199 						    coalesce);
200 		return;
201 	}
202 
203 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
204 	if (QDF_IS_STATUS_SUCCESS(ret)) {
205 		if (hif_system_pm_state_check(soc->hif_handle)) {
206 			ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
207 					  CE_RING_FLUSH_EVENT);
208 			ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
209 		} else {
210 			ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
211 							    coalesce);
212 		}
213 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
214 	} else {
215 		dp_runtime_get(soc);
216 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
217 				  CE_RING_FLUSH_EVENT);
218 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
219 		qdf_atomic_inc(&soc->tx_pending_rtpm);
220 		dp_runtime_put(soc);
221 	}
222 }
223 #elif defined(DP_POWER_SAVE)
dp_tx_update_write_index(struct dp_soc * soc,struct dp_tx_ep_info_rh * tx_ep_info)224 static void dp_tx_update_write_index(struct dp_soc *soc,
225 				     struct dp_tx_ep_info_rh *tx_ep_info)
226 {
227 	if (hif_system_pm_state_check(soc->hif_handle)) {
228 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
229 				  CE_RING_FLUSH_EVENT);
230 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
231 	} else {
232 		ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
233 						    coalesce);
234 	}
235 }
236 #else
dp_tx_update_write_index(struct dp_soc * soc,struct dp_tx_ep_info_rh * tx_ep_info)237 static void dp_tx_update_write_index(struct dp_soc *soc,
238 				     struct dp_tx_ep_info_rh *tx_ep_info)
239 {
240 	ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
241 					    coalesce);
242 }
243 #endif
244 
245 /*
246  * dp_flush_tx_ring_rh() - flush tx ring write index
247  * @pdev: dp pdev handle
248  * @ring_id: Tx ring id
249  *
250  * Return: 0 on success and error code on failure
251  */
dp_flush_tx_ring_rh(struct dp_pdev * pdev,int ring_id)252 int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
253 {
254 	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
255 	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
256 	int ret;
257 
258 	ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
259 	ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
260 	if (ret) {
261 		ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
262 		ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
263 				  CE_RING_FLUSH_EVENT);
264 		ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
265 		return ret;
266 	}
267 
268 	ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
269 	ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
270 	hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
271 
272 	return ret;
273 }
274 
275 QDF_STATUS
dp_tx_hw_enqueue_rh(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint16_t fw_metadata,struct cdp_tx_exception_metadata * tx_exc_metadata,struct dp_tx_msdu_info_s * msdu_info)276 dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
277 		    struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
278 		    struct cdp_tx_exception_metadata *tx_exc_metadata,
279 		    struct dp_tx_msdu_info_s *msdu_info)
280 {
281 	struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
282 	struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
283 	uint32_t download_len = tx_ep_info->download_len;
284 	qdf_nbuf_t nbuf = tx_desc->nbuf;
285 	uint8_t tid = msdu_info->tid;
286 	uint32_t *hal_tx_desc_cached;
287 	int coalesce = 0;
288 	int ret;
289 
290 	/*
291 	 * Setting it initialization statically here to avoid
292 	 * a memset call jump with qdf_mem_set call
293 	 */
294 	uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
295 
296 	enum cdp_sec_type sec_type = ((tx_exc_metadata &&
297 			tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
298 			tx_exc_metadata->sec_type : vdev->sec_type);
299 
300 	QDF_STATUS status = QDF_STATUS_E_RESOURCES;
301 
302 	if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
303 		dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
304 		return QDF_STATUS_E_RESOURCES;
305 	}
306 
307 	hal_tx_desc_cached = (void *)cached_desc;
308 
309 	hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
310 				 tx_desc->dma_addr, 0, tx_desc->id,
311 				 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
312 	hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
313 				vdev->lmac_id);
314 	hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
315 				    vdev->search_type);
316 	hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
317 				     vdev->bss_ast_idx);
318 
319 	hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
320 				     sec_type_map[sec_type]);
321 	hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
322 				      (vdev->bss_ast_hash & 0xF));
323 
324 	hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
325 	hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
326 	hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
327 	hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
328 	hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
329 					  vdev->hal_desc_addr_search_flags);
330 
331 	if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
332 		hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
333 
334 	/* verify checksum offload configuration*/
335 	if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
336 	    qdf_nbuf_is_tso(nbuf))  {
337 		hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
338 		hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
339 	}
340 
341 	if (tid != HTT_TX_EXT_TID_INVALID)
342 		hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
343 
344 	if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
345 		hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
346 
347 	if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
348 		dp_tx_desc_set_timestamp(tx_desc);
349 
350 	dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
351 			 tx_desc->length,
352 			 (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
353 			 (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
354 			 tx_desc->id);
355 
356 	hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
357 
358 	qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
359 				(char *)tx_desc->tcl_cmd_vaddr,
360 				tx_desc->tcl_cmd_paddr);
361 
362 	download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
363 
364 	if (qdf_nbuf_is_tso(nbuf)) {
365 		QDF_NBUF_CB_PADDR(nbuf) =
366 			msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
367 		download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
368 								download_len);
369 	}
370 
371 	dp_tx_fill_nbuf_data_attr_rh(nbuf);
372 
373 	ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
374 	ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
375 			      tx_ep_info->tx_endpoint, download_len);
376 	if (ret) {
377 		ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
378 		dp_verbose_debug("CE tx ring full");
379 		/* TODO: Should this be a separate ce_ring_full stat? */
380 		DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
381 		DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail, 1);
382 		goto enqueue_fail;
383 	}
384 
385 	coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
386 					    msdu_info, 0);
387 
388 	dp_tx_update_write_index(soc, tx_ep_info, coalesce);
389 	ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
390 
391 	tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
392 	dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
393 	DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
394 			 tx_desc->length);
395 	DP_STATS_INC(soc, tx.tcl_enq[0], 1);
396 
397 	dp_tx_update_stats(soc, tx_desc, 0);
398 	status = QDF_STATUS_SUCCESS;
399 
400 	dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
401 
402 enqueue_fail:
403 	dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
404 			     qdf_get_log_timestamp(), tx_desc->nbuf);
405 
406 	return status;
407 }
408 
409 /**
410  * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
411  *				    based on pool_id
412  * @soc: Handle to DP SoC structure
413  * @num_elem: Number of descriptor elements per pool
414  * @pool_id: Pool to allocate
415  *
416  * Return: QDF_STATUS_SUCCESS
417  *	   QDF_STATUS_E_NOMEM
418  */
419 static QDF_STATUS
dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)420 dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
421 			     uint8_t pool_id)
422 {
423 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
424 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
425 	uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
426 	QDF_STATUS status = QDF_STATUS_SUCCESS;
427 	qdf_dma_context_t memctx = 0;
428 
429 	if (pool_id > MAX_TXDESC_POOLS - 1)
430 		return QDF_STATUS_E_INVAL;
431 
432 	/* Allocate tcl descriptors in coherent memory */
433 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
434 	memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
435 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TCL_DESC_TYPE,
436 				      &tcl_desc_pool->desc_pages,
437 				      elem_size, num_elem, memctx, false);
438 
439 	if (!tcl_desc_pool->desc_pages.num_pages) {
440 		dp_err("failed to allocate tcl desc Pages");
441 		status = QDF_STATUS_E_NOMEM;
442 		goto err_alloc_fail;
443 	}
444 
445 	return status;
446 
447 err_alloc_fail:
448 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
449 				     &tcl_desc_pool->desc_pages,
450 				     memctx, false);
451 	return status;
452 }
453 
454 /**
455  * dp_tx_tcl_desc_pool_free_rh() -  Free the tcl descriptor pool
456  * @soc: Handle to DP SoC structure
457  * @pool_id: pool to free
458  *
459  */
dp_tx_tcl_desc_pool_free_rh(struct dp_soc * soc,uint8_t pool_id)460 static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
461 {
462 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
463 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
464 	qdf_dma_context_t memctx = 0;
465 
466 	if (pool_id > MAX_TXDESC_POOLS - 1)
467 		return;
468 
469 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
470 	memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
471 
472 	dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
473 				     &tcl_desc_pool->desc_pages,
474 				     memctx, false);
475 }
476 
477 /**
478  * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
479  *				   based on pool_id
480  * @soc: Handle to DP SoC structure
481  * @num_elem: Number of descriptor elements per pool
482  * @pool_id: pool to initialize
483  *
484  * Return: QDF_STATUS_SUCCESS
485  *	   QDF_STATUS_E_FAULT
486  */
487 static QDF_STATUS
dp_tx_tcl_desc_pool_init_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)488 dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
489 			    uint8_t pool_id)
490 {
491 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
492 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
493 	struct qdf_mem_dma_page_t *page_info;
494 	QDF_STATUS status;
495 
496 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
497 	tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
498 	tcl_desc_pool->elem_count = num_elem;
499 
500 	/* Link tcl descriptors into a freelist */
501 	if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
502 				    tcl_desc_pool->elem_size,
503 				    tcl_desc_pool->elem_count,
504 				    false)) {
505 		dp_err("failed to link tcl desc Pages");
506 		status = QDF_STATUS_E_FAULT;
507 		goto err_link_fail;
508 	}
509 
510 	page_info = tcl_desc_pool->desc_pages.dma_pages;
511 	tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
512 
513 	return QDF_STATUS_SUCCESS;
514 
515 err_link_fail:
516 	return status;
517 }
518 
519 /**
520  * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
521  *				     based on pool_id
522  * @soc: Handle to DP SoC structure
523  * @pool_id: pool to de-initialize
524  *
525  */
dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc * soc,uint8_t pool_id)526 static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
527 {
528 }
529 
530 /**
531  * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
532  * @tcl_desc_pool: Tcl descriptor pool
533  * @tx_desc: SW TX descriptor
534  * @index: Index into the tcl descriptor pool
535  */
dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s * tcl_desc_pool,struct dp_tx_desc_s * tx_desc,uint32_t index)536 static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
537 				    struct dp_tx_desc_s *tx_desc,
538 				    uint32_t index)
539 {
540 	struct qdf_mem_dma_page_t *dma_page;
541 	uint32_t page_id;
542 	uint32_t offset;
543 
544 	tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
545 
546 	if (tcl_desc_pool->freelist)
547 		tcl_desc_pool->freelist =
548 			*((uint32_t **)tcl_desc_pool->freelist);
549 
550 	page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
551 	offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
552 	dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
553 
554 	tx_desc->tcl_cmd_paddr =
555 		dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
556 }
557 
dp_tx_desc_pool_init_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id,bool spcl_tx_desc)558 QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
559 				   uint32_t num_elem,
560 				   uint8_t pool_id,
561 				   bool spcl_tx_desc)
562 {
563 	struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
564 	uint32_t id, count, page_id, offset, pool_id_32;
565 	struct dp_tx_desc_s *tx_desc;
566 	struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
567 	struct dp_tx_desc_pool_s *tx_desc_pool;
568 	uint16_t num_desc_per_page;
569 	QDF_STATUS status;
570 
571 	status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
572 	if (QDF_IS_STATUS_ERROR(status)) {
573 		dp_err("failed to initialise tcl desc pool %d", pool_id);
574 		goto err_out;
575 	}
576 
577 	status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
578 	if (QDF_IS_STATUS_ERROR(status)) {
579 		dp_err("failed to initialise tx ext desc pool %d", pool_id);
580 		goto err_deinit_tcl_pool;
581 	}
582 
583 	status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
584 	if (QDF_IS_STATUS_ERROR(status)) {
585 		dp_err("failed to initialise tso desc pool %d", pool_id);
586 		goto err_deinit_tx_ext_pool;
587 	}
588 
589 	status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
590 	if (QDF_IS_STATUS_ERROR(status)) {
591 		dp_err("failed to initialise tso num seg pool %d", pool_id);
592 		goto err_deinit_tso_pool;
593 	}
594 
595 	tx_desc_pool = &soc->tx_desc[pool_id];
596 	tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
597 	tx_desc = tx_desc_pool->freelist;
598 	count = 0;
599 	pool_id_32 = (uint32_t)pool_id;
600 	num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
601 	while (tx_desc) {
602 		page_id = count / num_desc_per_page;
603 		offset = count % num_desc_per_page;
604 		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
605 			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
606 
607 		tx_desc->id = id;
608 		tx_desc->pool_id = pool_id;
609 		dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
610 		dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
611 		tx_desc = tx_desc->next;
612 		count++;
613 	}
614 
615 	return QDF_STATUS_SUCCESS;
616 
617 err_deinit_tso_pool:
618 	dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
619 err_deinit_tx_ext_pool:
620 	dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
621 err_deinit_tcl_pool:
622 	dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
623 err_out:
624 	/* TODO: is assert needed ? */
625 	qdf_assert_always(0);
626 	return status;
627 }
628 
dp_tx_desc_pool_deinit_rh(struct dp_soc * soc,struct dp_tx_desc_pool_s * tx_desc_pool,uint8_t pool_id,bool spcl_tx_desc)629 void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
630 			       struct dp_tx_desc_pool_s *tx_desc_pool,
631 			       uint8_t pool_id, bool spcl_tx_desc)
632 {
633 	dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
634 	dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
635 	dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
636 	dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
637 }
638 
dp_tx_compute_tx_delay_rh(struct dp_soc * soc,struct dp_vdev * vdev,struct hal_tx_completion_status * ts,uint32_t * delay_us)639 QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
640 				     struct dp_vdev *vdev,
641 				     struct hal_tx_completion_status *ts,
642 				     uint32_t *delay_us)
643 {
644 	return QDF_STATUS_SUCCESS;
645 }
646 
dp_tx_desc_pool_alloc_rh(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)647 QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
648 				    uint8_t pool_id)
649 {
650 	QDF_STATUS status;
651 
652 	status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
653 	if (QDF_IS_STATUS_ERROR(status)) {
654 		dp_err("failed to allocate tcl desc pool %d", pool_id);
655 		goto err_tcl_desc_pool;
656 	}
657 
658 	status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
659 	if (QDF_IS_STATUS_ERROR(status)) {
660 		dp_err("failed to allocate tx ext desc pool %d", pool_id);
661 		goto err_free_tcl_pool;
662 	}
663 
664 	status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
665 	if (QDF_IS_STATUS_ERROR(status)) {
666 		dp_err("failed to allocate tso desc pool %d", pool_id);
667 		goto err_free_tx_ext_pool;
668 	}
669 
670 	status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
671 	if (QDF_IS_STATUS_ERROR(status)) {
672 		dp_err("failed to allocate tso num seg pool %d", pool_id);
673 		goto err_free_tso_pool;
674 	}
675 
676 	return status;
677 
678 err_free_tso_pool:
679 	dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
680 err_free_tx_ext_pool:
681 	dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
682 err_free_tcl_pool:
683 	dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
684 err_tcl_desc_pool:
685 	/* TODO: is assert needed ? */
686 	qdf_assert_always(0);
687 	return status;
688 }
689 
dp_tx_desc_pool_free_rh(struct dp_soc * soc,uint8_t pool_id)690 void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
691 {
692 	dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
693 	dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
694 	dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
695 	dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
696 }
697 
dp_tx_compl_handler_rh(struct dp_soc * soc,qdf_nbuf_t htt_msg)698 void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
699 {
700 	struct dp_tx_desc_s *tx_desc = NULL;
701 	struct dp_tx_desc_s *head_desc = NULL;
702 	struct dp_tx_desc_s *tail_desc = NULL;
703 	uint32_t sw_cookie;
704 	uint32_t num_msdus;
705 	uint32_t *msg_word;
706 	uint8_t ring_id;
707 	uint8_t tx_status;
708 	int i;
709 
710 	DP_HIST_INIT();
711 
712 	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
713 	num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
714 	msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
715 
716 	for (i = 0; i < num_msdus; i++) {
717 		sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
718 
719 		tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
720 		if (!tx_desc) {
721 			dp_err("failed to find tx desc");
722 			qdf_assert_always(0);
723 		}
724 
725 		/*
726 		 * If the descriptor is already freed in vdev_detach,
727 		 * continue to next descriptor
728 		 */
729 		if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
730 				 !tx_desc->flags)) {
731 			dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
732 					   tx_desc->id);
733 			DP_STATS_INC(soc, tx.tx_comp_exception, 1);
734 			dp_tx_desc_check_corruption(tx_desc);
735 			goto next_msdu;
736 		}
737 
738 		if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
739 			dp_tx_comp_info_rl("pdev in down state %d",
740 					   tx_desc->id);
741 			tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
742 			dp_tx_comp_free_buf(soc, tx_desc, false);
743 			dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
744 			goto next_msdu;
745 		}
746 
747 		if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
748 		    !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
749 			dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
750 					 tx_desc->flags, tx_desc->id);
751 			qdf_assert_always(0);
752 		}
753 
754 		if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
755 		    HTT_TX_MSDU_RELEASE_SOURCE_FW)
756 			tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
757 		else
758 			tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
759 
760 		tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
761 		tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
762 
763 		tx_desc->tx_status =
764 			(tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
765 			 HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
766 
767 		qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
768 
769 		DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
770 
771 		/* First ring descriptor on the cycle */
772 		if (!head_desc) {
773 			head_desc = tx_desc;
774 			tail_desc = tx_desc;
775 		}
776 
777 		tail_desc->next = tx_desc;
778 		tx_desc->next = NULL;
779 		tail_desc = tx_desc;
780 next_msdu:
781 		msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
782 	}
783 
784 	/* For now, pass ring_id as 0 (zero) as WCN6450 only
785 	 * supports one TX ring.
786 	 */
787 	ring_id = 0;
788 
789 	if (head_desc)
790 		dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
791 
792 	DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
793 	DP_TX_HIST_STATS_PER_PDEV();
794 }
795