xref: /wlan-driver/qcacld-3.0/core/dp/htt/htt_rx_ll.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_mem.h>         /* qdf_mem_malloc,free, etc. */
21 #include <qdf_types.h>          /* qdf_print, bool */
22 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
23 #include <qdf_timer.h>		/* qdf_timer_free */
24 
25 #include <htt.h>                /* HTT_HL_RX_DESC_SIZE */
26 #include <ol_cfg.h>
27 #include <ol_rx.h>
28 #include <ol_htt_rx_api.h>
29 #include <htt_internal.h>       /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
30 #include "regtable.h"
31 
32 #include <cds_ieee80211_common.h>   /* ieee80211_frame, ieee80211_qoscntl */
33 #include <cds_utils.h>
34 #include <wlan_policy_mgr_api.h>
35 #include "ol_txrx_types.h"
36 #ifdef DEBUG_DMA_DONE
37 #include <asm/barrier.h>
38 #include <wma_api.h>
39 #endif
40 #include <pktlog_ac_fmt.h>
41 #include <wlan_mlme_api.h>
42 
43 #ifdef DEBUG_DMA_DONE
44 #define MAX_DONE_BIT_CHECK_ITER 5
45 #endif
46 
47 #ifdef HTT_DEBUG_DATA
48 #define HTT_PKT_DUMP(x) x
49 #else
50 #define HTT_PKT_DUMP(x) /* no-op */
51 #endif
52 
53 /*--- setup / tear-down functions -------------------------------------------*/
54 
55 #ifndef HTT_RX_HOST_LATENCY_MAX_MS
56 #define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */	/* very conservative */
57 #endif
58 
59  /* very conservative to ensure enough buffers are allocated */
60 #ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
61 #ifdef QCA_WIFI_3_0
62 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 20
63 #else
64 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
65 #endif
66 #endif
67 
68 #ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
69 #define HTT_RX_RING_REFILL_RETRY_TIME_MS    50
70 #endif
71 
72 #define RX_PADDR_MAGIC_PATTERN 0xDEAD0000
73 
74 #ifdef ENABLE_DEBUG_ADDRESS_MARKING
75 static qdf_dma_addr_t
htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)76 htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
77 {
78 	if (sizeof(qdf_dma_addr_t) > 4) {
79 		/* clear high bits, leave lower 37 bits (paddr) */
80 		paddr &= 0x01FFFFFFFFF;
81 		/* mark upper 16 bits of paddr */
82 		paddr |= (((uint64_t)RX_PADDR_MAGIC_PATTERN) << 32);
83 	}
84 	return paddr;
85 }
86 #else
87 static qdf_dma_addr_t
htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)88 htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
89 {
90 	return paddr;
91 }
92 #endif
93 
94 /**
95  * htt_get_first_packet_after_wow_wakeup() - get first packet after wow wakeup
96  * @msg_word: pointer to rx indication message word
97  * @buf: pointer to buffer
98  *
99  * Return: None
100  */
101 static void
htt_get_first_packet_after_wow_wakeup(uint32_t * msg_word,qdf_nbuf_t buf)102 htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf)
103 {
104 	if (HTT_RX_IN_ORD_PADDR_IND_MSDU_INFO_GET(*msg_word) &
105 			FW_MSDU_INFO_FIRST_WAKEUP_M) {
106 		qdf_nbuf_mark_wakeup_frame(buf);
107 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
108 			  "%s: First packet after WOW Wakeup rcvd", __func__);
109 	}
110 }
111 
112 /**
113  * htt_rx_ring_smmu_mapped() - check if rx ring is smmu mapped or not
114  * @pdev: HTT pdev handle
115  *
116  * Return: true or false.
117  */
htt_rx_ring_smmu_mapped(htt_pdev_handle pdev)118 static inline bool htt_rx_ring_smmu_mapped(htt_pdev_handle pdev)
119 {
120 	if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
121 	    pdev->is_ipa_uc_enabled &&
122 	    pdev->rx_ring.smmu_map)
123 		return true;
124 	else
125 		return false;
126 }
127 
htt_rx_netbuf_pop(htt_pdev_handle pdev)128 static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
129 {
130 	int idx;
131 	qdf_nbuf_t msdu;
132 
133 	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
134 
135 #ifdef DEBUG_DMA_DONE
136 	pdev->rx_ring.dbg_ring_idx++;
137 	pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
138 #endif
139 
140 	idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
141 	msdu = pdev->rx_ring.buf.netbufs_ring[idx];
142 	idx++;
143 	idx &= pdev->rx_ring.size_mask;
144 	pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
145 	qdf_atomic_dec(&pdev->rx_ring.fill_cnt);
146 	return msdu;
147 }
148 
htt_rx_ring_elems(struct htt_pdev_t * pdev)149 static inline unsigned int htt_rx_ring_elems(struct htt_pdev_t *pdev)
150 {
151 	return
152 		(*pdev->rx_ring.alloc_idx.vaddr -
153 		 pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
154 }
155 
156 /**
157  * htt_rx_buff_pool_init() - initialize the pool of buffers
158  * @pdev: pointer to device
159  *
160  * Return: 0 - success, 1 - failure
161  */
htt_rx_buff_pool_init(struct htt_pdev_t * pdev)162 static int htt_rx_buff_pool_init(struct htt_pdev_t *pdev)
163 {
164 	qdf_nbuf_t net_buf;
165 	int i;
166 
167 	pdev->rx_buff_pool.netbufs_ring =
168 		qdf_mem_malloc(HTT_RX_PRE_ALLOC_POOL_SIZE * sizeof(qdf_nbuf_t));
169 
170 	if (!pdev->rx_buff_pool.netbufs_ring)
171 		return 1; /* failure */
172 
173 	qdf_atomic_init(&pdev->rx_buff_pool.fill_cnt);
174 	qdf_atomic_init(&pdev->rx_buff_pool.refill_low_mem);
175 
176 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
177 		net_buf = qdf_nbuf_alloc(pdev->osdev,
178 					 HTT_RX_BUF_SIZE,
179 					 0, 4, false);
180 		if (net_buf) {
181 			qdf_atomic_inc(&pdev->rx_buff_pool.fill_cnt);
182 			/*
183 			 * Mark this netbuf to differentiate it
184 			 * from other buf. If set 1, this buf
185 			 * is from pre allocated pool.
186 			 */
187 			QDF_NBUF_CB_RX_PACKET_BUFF_POOL(net_buf) = 1;
188 		}
189 		/* Allow NULL to be inserted.
190 		 * Taken care during alloc from this pool.
191 		 */
192 		pdev->rx_buff_pool.netbufs_ring[i] = net_buf;
193 	}
194 	QDF_TRACE(QDF_MODULE_ID_HTT,
195 		  QDF_TRACE_LEVEL_INFO,
196 		  "max pool size %d pool filled %d",
197 		  HTT_RX_PRE_ALLOC_POOL_SIZE,
198 		  qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt));
199 
200 	qdf_spinlock_create(&pdev->rx_buff_pool.rx_buff_pool_lock);
201 	return 0;
202 }
203 
204 /**
205  * htt_rx_buff_pool_deinit() - deinitialize the pool of buffers
206  * @pdev: pointer to device
207  *
208  * Return: none
209  */
htt_rx_buff_pool_deinit(struct htt_pdev_t * pdev)210 static void htt_rx_buff_pool_deinit(struct htt_pdev_t *pdev)
211 {
212 	qdf_nbuf_t net_buf;
213 	int i;
214 
215 	if (!pdev->rx_buff_pool.netbufs_ring)
216 		return;
217 
218 	qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
219 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
220 		net_buf = pdev->rx_buff_pool.netbufs_ring[i];
221 		if (!net_buf)
222 			continue;
223 		qdf_nbuf_free(net_buf);
224 		qdf_atomic_dec(&pdev->rx_buff_pool.fill_cnt);
225 	}
226 	qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
227 	QDF_TRACE(QDF_MODULE_ID_HTT,
228 		  QDF_TRACE_LEVEL_INFO,
229 		  "max pool size %d pool filled %d",
230 		  HTT_RX_PRE_ALLOC_POOL_SIZE,
231 		  qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt));
232 
233 	qdf_mem_free(pdev->rx_buff_pool.netbufs_ring);
234 	qdf_spinlock_destroy(&pdev->rx_buff_pool.rx_buff_pool_lock);
235 }
236 
237 /**
238  * htt_rx_buff_pool_refill() - refill the pool with new buf or reuse same buf
239  * @pdev: pointer to device
240  * @netbuf: netbuf to reuse
241  *
242  * Return: true - if able to alloc new buf and insert into pool,
243  * false - if need to reuse the netbuf or not able to insert into pool
244  */
htt_rx_buff_pool_refill(struct htt_pdev_t * pdev,qdf_nbuf_t netbuf)245 static bool htt_rx_buff_pool_refill(struct htt_pdev_t *pdev, qdf_nbuf_t netbuf)
246 {
247 	bool ret = false;
248 	qdf_nbuf_t net_buf;
249 	int i;
250 
251 	net_buf = qdf_nbuf_alloc(pdev->osdev,
252 				 HTT_RX_BUF_SIZE,
253 				 0, 4, false);
254 	if (net_buf) {
255 		/* able to alloc new net_buf.
256 		 * mark this netbuf as pool buf.
257 		 */
258 		QDF_NBUF_CB_RX_PACKET_BUFF_POOL(net_buf) = 1;
259 		ret = true;
260 	} else {
261 		/* reuse the netbuf and
262 		 * reset all fields of this netbuf.
263 		 */
264 		net_buf = netbuf;
265 		qdf_nbuf_reset(net_buf, 0, 4);
266 
267 		/* mark this netbuf as pool buf */
268 		QDF_NBUF_CB_RX_PACKET_BUFF_POOL(net_buf) = 1;
269 	}
270 
271 	qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
272 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
273 		/* insert the netbuf in empty slot of pool */
274 		if (pdev->rx_buff_pool.netbufs_ring[i])
275 			continue;
276 
277 		pdev->rx_buff_pool.netbufs_ring[i] = net_buf;
278 		qdf_atomic_inc(&pdev->rx_buff_pool.fill_cnt);
279 		break;
280 	}
281 	qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
282 
283 	if (i == HTT_RX_PRE_ALLOC_POOL_SIZE) {
284 		/* fail to insert into pool, free net_buf */
285 		qdf_nbuf_free(net_buf);
286 		ret = false;
287 	}
288 
289 	return ret;
290 }
291 
292 /**
293  * htt_rx_buff_alloc() - alloc the net buf from the pool
294  * @pdev: pointer to device
295  *
296  * Return: nbuf or NULL
297  */
htt_rx_buff_alloc(struct htt_pdev_t * pdev)298 static qdf_nbuf_t htt_rx_buff_alloc(struct htt_pdev_t *pdev)
299 {
300 	qdf_nbuf_t net_buf = NULL;
301 	int i;
302 
303 	if (!pdev->rx_buff_pool.netbufs_ring)
304 		return net_buf;
305 
306 	qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
307 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
308 		/* allocate the valid netbuf */
309 		if (!pdev->rx_buff_pool.netbufs_ring[i])
310 			continue;
311 
312 		net_buf = pdev->rx_buff_pool.netbufs_ring[i];
313 		qdf_atomic_dec(&pdev->rx_buff_pool.fill_cnt);
314 		pdev->rx_buff_pool.netbufs_ring[i] = NULL;
315 		break;
316 	}
317 	qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
318 	return net_buf;
319 }
320 
321 /**
322  * htt_rx_ring_buf_attach() - return net buf to attach in ring
323  * @pdev: pointer to device
324  *
325  * Return: nbuf or NULL
326  */
htt_rx_ring_buf_attach(struct htt_pdev_t * pdev)327 static qdf_nbuf_t htt_rx_ring_buf_attach(struct htt_pdev_t *pdev)
328 {
329 	qdf_nbuf_t net_buf = NULL;
330 	bool allocated = true;
331 
332 	net_buf =
333 		qdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
334 			       0, 4, false);
335 	if (!net_buf) {
336 		if (pdev->rx_buff_pool.netbufs_ring &&
337 		    qdf_atomic_read(&pdev->rx_buff_pool.refill_low_mem) &&
338 		    qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt))
339 			net_buf = htt_rx_buff_alloc(pdev);
340 
341 		allocated = false; /* allocated from pool */
342 	}
343 
344 	if (allocated || !qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt))
345 		qdf_atomic_set(&pdev->rx_buff_pool.refill_low_mem, 0);
346 
347 	return net_buf;
348 }
349 
350 /**
351  * htt_rx_ring_buff_free() - free the net buff or reuse it
352  * @pdev: pointer to device
353  * @netbuf: netbuf
354  *
355  * Return: none
356  */
htt_rx_ring_buff_free(struct htt_pdev_t * pdev,qdf_nbuf_t netbuf)357 static void htt_rx_ring_buff_free(struct htt_pdev_t *pdev, qdf_nbuf_t netbuf)
358 {
359 	bool status = false;
360 
361 	if (pdev->rx_buff_pool.netbufs_ring &&
362 	    QDF_NBUF_CB_RX_PACKET_BUFF_POOL(netbuf)) {
363 		int i;
364 
365 		/* rest this netbuf before putting back into pool */
366 		qdf_nbuf_reset(netbuf, 0, 4);
367 
368 		/* mark this netbuf as pool buf */
369 		QDF_NBUF_CB_RX_PACKET_BUFF_POOL(netbuf) = 1;
370 
371 		qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
372 		for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
373 			/* insert the netbuf in empty slot of pool */
374 			if (!pdev->rx_buff_pool.netbufs_ring[i]) {
375 				pdev->rx_buff_pool.netbufs_ring[i] = netbuf;
376 				qdf_atomic_inc(&pdev->rx_buff_pool.fill_cnt);
377 				status = true;    /* valid insertion */
378 				break;
379 			}
380 		}
381 		qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
382 	}
383 	if (!status)
384 		qdf_nbuf_free(netbuf);
385 }
386 
387 /* full_reorder_offload case: this function is called with lock held */
htt_rx_ring_fill_n(struct htt_pdev_t * pdev,int num)388 static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
389 {
390 	int idx;
391 	QDF_STATUS status;
392 	struct htt_host_rx_desc_base *rx_desc;
393 	int filled = 0;
394 	int debt_served = 0;
395 	qdf_mem_info_t mem_map_table = {0};
396 
397 	idx = *pdev->rx_ring.alloc_idx.vaddr;
398 
399 	if ((idx < 0) || (idx > pdev->rx_ring.size_mask) ||
400 	    (num > pdev->rx_ring.size))  {
401 		QDF_TRACE(QDF_MODULE_ID_HTT,
402 			  QDF_TRACE_LEVEL_ERROR,
403 			  "%s:rx refill failed!", __func__);
404 		return filled;
405 	}
406 
407 moretofill:
408 	while (num > 0) {
409 		qdf_dma_addr_t paddr, paddr_marked;
410 		qdf_nbuf_t rx_netbuf;
411 		int headroom;
412 
413 		rx_netbuf = htt_rx_ring_buf_attach(pdev);
414 		if (!rx_netbuf) {
415 			qdf_timer_stop(&pdev->rx_ring.
416 						 refill_retry_timer);
417 			/*
418 			 * Failed to fill it to the desired level -
419 			 * we'll start a timer and try again next time.
420 			 * As long as enough buffers are left in the ring for
421 			 * another A-MPDU rx, no special recovery is needed.
422 			 */
423 #ifdef DEBUG_DMA_DONE
424 			pdev->rx_ring.dbg_refill_cnt++;
425 #endif
426 			pdev->refill_retry_timer_starts++;
427 			qdf_timer_start(
428 				&pdev->rx_ring.refill_retry_timer,
429 				HTT_RX_RING_REFILL_RETRY_TIME_MS);
430 			goto update_alloc_idx;
431 		}
432 
433 		/* Clear rx_desc attention word before posting to Rx ring */
434 		rx_desc = htt_rx_desc(rx_netbuf);
435 		*(uint32_t *)&rx_desc->attention = 0;
436 
437 #ifdef DEBUG_DMA_DONE
438 		*(uint32_t *)&rx_desc->msdu_end = 1;
439 
440 #define MAGIC_PATTERN 0xDEADBEEF
441 		*(uint32_t *)&rx_desc->msdu_start = MAGIC_PATTERN;
442 
443 		/*
444 		 * To ensure that attention bit is reset and msdu_end is set
445 		 * before calling dma_map
446 		 */
447 		smp_mb();
448 #endif
449 		/*
450 		 * Adjust qdf_nbuf_data to point to the location in the buffer
451 		 * where the rx descriptor will be filled in.
452 		 */
453 		headroom = qdf_nbuf_data(rx_netbuf) - (uint8_t *)rx_desc;
454 		qdf_nbuf_push_head(rx_netbuf, headroom);
455 
456 #ifdef DEBUG_DMA_DONE
457 		status = qdf_nbuf_map(pdev->osdev, rx_netbuf,
458 				      QDF_DMA_BIDIRECTIONAL);
459 #else
460 		status = qdf_nbuf_map(pdev->osdev, rx_netbuf,
461 				      QDF_DMA_FROM_DEVICE);
462 #endif
463 		if (status != QDF_STATUS_SUCCESS) {
464 			htt_rx_ring_buff_free(pdev, rx_netbuf);
465 			goto update_alloc_idx;
466 		}
467 
468 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
469 		paddr_marked = htt_rx_paddr_mark_high_bits(paddr);
470 		if (pdev->cfg.is_full_reorder_offload) {
471 			if (qdf_unlikely(htt_rx_hash_list_insert(
472 					pdev, paddr_marked, rx_netbuf))) {
473 				QDF_TRACE(QDF_MODULE_ID_HTT,
474 					  QDF_TRACE_LEVEL_ERROR,
475 					  "%s: hash insert failed!", __func__);
476 #ifdef DEBUG_DMA_DONE
477 				qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
478 					       QDF_DMA_BIDIRECTIONAL);
479 #else
480 				qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
481 					       QDF_DMA_FROM_DEVICE);
482 #endif
483 				htt_rx_ring_buff_free(pdev, rx_netbuf);
484 
485 				goto update_alloc_idx;
486 			}
487 			htt_rx_dbg_rxbuf_set(pdev, paddr_marked, rx_netbuf);
488 		} else {
489 			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
490 		}
491 
492 		/* Caller already protected this function with refill_lock */
493 		if (qdf_nbuf_is_rx_ipa_smmu_map(rx_netbuf)) {
494 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
495 						 paddr, HTT_RX_BUF_SIZE);
496 			qdf_assert_always(
497 				!cds_smmu_map_unmap(true, 1, &mem_map_table));
498 		}
499 
500 		pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked;
501 		qdf_atomic_inc(&pdev->rx_ring.fill_cnt);
502 
503 		num--;
504 		idx++;
505 		filled++;
506 		idx &= pdev->rx_ring.size_mask;
507 	}
508 
509 	if (debt_served <  qdf_atomic_read(&pdev->rx_ring.refill_debt)) {
510 		num = qdf_atomic_read(&pdev->rx_ring.refill_debt) - debt_served;
511 		debt_served += num;
512 		goto moretofill;
513 	}
514 
515 update_alloc_idx:
516 	/*
517 	 * Make sure alloc index write is reflected correctly before FW polls
518 	 * remote ring write index as compiler can reorder the instructions
519 	 * based on optimizations.
520 	 */
521 	qdf_mb();
522 	*pdev->rx_ring.alloc_idx.vaddr = idx;
523 	htt_rx_dbg_rxbuf_indupd(pdev, idx);
524 
525 	return filled;
526 }
527 
htt_rx_ring_size(struct htt_pdev_t * pdev)528 static int htt_rx_ring_size(struct htt_pdev_t *pdev)
529 {
530 	int size;
531 	QDF_STATUS status;
532 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
533 	bool enable_2x2 = true;
534 
535 	/*
536 	 * It is expected that the host CPU will typically be able to service
537 	 * the rx indication from one A-MPDU before the rx indication from
538 	 * the subsequent A-MPDU happens, roughly 1-2 ms later.
539 	 * However, the rx ring should be sized very conservatively, to
540 	 * accommodate the worst reasonable delay before the host CPU services
541 	 * a rx indication interrupt.
542 	 * The rx ring need not be kept full of empty buffers.  In theory,
543 	 * the htt host SW can dynamically track the low-water mark in the
544 	 * rx ring, and dynamically adjust the level to which the rx ring
545 	 * is filled with empty buffers, to dynamically meet the desired
546 	 * low-water mark.
547 	 * In contrast, it's difficult to resize the rx ring itself, once
548 	 * it's in use.
549 	 * Thus, the ring itself should be sized very conservatively, while
550 	 * the degree to which the ring is filled with empty buffers should
551 	 * be sized moderately conservatively.
552 	 */
553 	size =
554 		ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
555 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
556 		(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
557 
558 	if (size < HTT_RX_RING_SIZE_MIN)
559 		size = HTT_RX_RING_SIZE_MIN;
560 	else if (size > HTT_RX_RING_SIZE_MAX)
561 		size = HTT_RX_RING_SIZE_MAX;
562 
563 	size = qdf_get_pwr2(size);
564 
565 	if (!soc) {
566 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
567 		  "Unable to get 2x2 cap soc is NULL ring size:%u selected ", size);
568 		return size;
569 	}
570 
571 	status = wlan_mlme_get_vht_enable2x2((void *)soc->psoc, &enable_2x2);
572 	if (QDF_IS_STATUS_SUCCESS(status))
573 		size = (enable_2x2) ? size : QDF_MIN(size, HTT_RX_RING_SIZE_1x1);
574 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
575 		  "HTT RX refill ring size:%u selected for %s mode", size, enable_2x2 ? "2x2" : "1x1");
576 
577 	return size;
578 }
579 
htt_rx_ring_fill_level(struct htt_pdev_t * pdev)580 static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
581 {
582 	int size;
583 
584 	size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
585 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
586 		(8 * HTT_RX_AVG_FRM_BYTES) *
587 		HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
588 
589 	size = qdf_get_pwr2(size);
590 	/*
591 	 * Make sure the fill level is at least 1 less than the ring size.
592 	 * Leaving 1 element empty allows the SW to easily distinguish
593 	 * between a full ring vs. an empty ring.
594 	 */
595 	if (size >= pdev->rx_ring.size)
596 		size = pdev->rx_ring.size - 1;
597 
598 	return size;
599 }
600 
htt_rx_ring_refill_retry(void * arg)601 static void htt_rx_ring_refill_retry(void *arg)
602 {
603 	htt_pdev_handle pdev = (htt_pdev_handle)arg;
604 	int filled = 0;
605 	int num;
606 
607 	pdev->refill_retry_timer_calls++;
608 	qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
609 
610 	num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
611 	qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
612 
613 	qdf_atomic_set(&pdev->rx_buff_pool.refill_low_mem, 1);
614 
615 	filled = htt_rx_ring_fill_n(pdev, num);
616 
617 	if (filled > num) {
618 		/* we served ourselves and some other debt */
619 		/* sub is safer than  = 0 */
620 		qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
621 	} else if (num == filled) { /* nothing to be done */
622 	} else {
623 		qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
624 		/* we could not fill all, timer must have been started */
625 		pdev->refill_retry_timer_doubles++;
626 	}
627 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
628 }
629 
630 /*--- rx descriptor field access functions ----------------------------------*/
631 /*
632  * These functions need to use bit masks and shifts to extract fields
633  * from the rx descriptors, rather than directly using the bitfields.
634  * For example, use
635  *     (desc & FIELD_MASK) >> FIELD_LSB
636  * rather than
637  *     desc.field
638  * This allows the functions to work correctly on either little-endian
639  * machines (no endianness conversion needed) or big-endian machines
640  * (endianness conversion provided automatically by the HW DMA's
641  * byte-swizzling).
642  */
643 
644 #ifdef CHECKSUM_OFFLOAD
645 static inline void
htt_set_checksum_result_ll(htt_pdev_handle pdev,qdf_nbuf_t msdu,struct htt_host_rx_desc_base * rx_desc)646 htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
647 			   struct htt_host_rx_desc_base *rx_desc)
648 {
649 #define MAX_IP_VER          2
650 #define MAX_PROTO_VAL       4
651 	struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
652 	unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
653 
654 	/*
655 	 * HW supports TCP & UDP checksum offload for ipv4 and ipv6
656 	 */
657 	static const qdf_nbuf_l4_rx_cksum_type_t
658 		cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
659 		{
660 			/* non-fragmented IP packet */
661 			/* non TCP/UDP packet */
662 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
663 			/* TCP packet */
664 			{QDF_NBUF_RX_CKSUM_TCP, QDF_NBUF_RX_CKSUM_TCPIPV6},
665 			/* UDP packet */
666 			{QDF_NBUF_RX_CKSUM_UDP, QDF_NBUF_RX_CKSUM_UDPIPV6},
667 			/* invalid packet type */
668 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
669 		},
670 		{
671 			/* fragmented IP packet */
672 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
673 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
674 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
675 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
676 		}
677 	};
678 
679 	qdf_nbuf_rx_cksum_t cksum = {
680 		cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
681 		QDF_NBUF_RX_CKSUM_NONE,
682 		0
683 	};
684 
685 	if (cksum.l4_type !=
686 	    (qdf_nbuf_l4_rx_cksum_type_t)QDF_NBUF_RX_CKSUM_NONE) {
687 		cksum.l4_result =
688 			((*(uint32_t *)&rx_desc->attention) &
689 			 RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
690 			QDF_NBUF_RX_CKSUM_NONE :
691 			QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
692 	}
693 	qdf_nbuf_set_rx_cksum(msdu, &cksum);
694 #undef MAX_IP_VER
695 #undef MAX_PROTO_VAL
696 }
697 
698 #else
699 
700 static inline
htt_set_checksum_result_ll(htt_pdev_handle pdev,qdf_nbuf_t msdu,struct htt_host_rx_desc_base * rx_desc)701 void htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
702 				struct htt_host_rx_desc_base *rx_desc)
703 {
704 }
705 
706 #endif
707 
htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev,qdf_nbuf_t msdu)708 static void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
709 {
710 	return htt_rx_desc(msdu);
711 }
712 
htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev,void * mpdu_desc)713 static bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
714 {
715 	struct htt_host_rx_desc_base *rx_desc =
716 		(struct htt_host_rx_desc_base *)mpdu_desc;
717 
718 	return (((*((uint32_t *)&rx_desc->mpdu_start)) &
719 		 RX_MPDU_START_0_ENCRYPTED_MASK) >>
720 		RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
721 }
722 
723 static
htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev,void * mpdu_desc)724 bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
725 {
726 	return false;
727 }
728 
htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,struct ol_txrx_peer_t * peer,void * mpdu_desc,uint16_t * primary_chan_center_freq_mhz,uint16_t * contig_chan1_center_freq_mhz,uint16_t * contig_chan2_center_freq_mhz,uint8_t * phy_mode)729 static bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
730 				       struct ol_txrx_peer_t *peer,
731 				       void *mpdu_desc,
732 				       uint16_t *primary_chan_center_freq_mhz,
733 				       uint16_t *contig_chan1_center_freq_mhz,
734 				       uint16_t *contig_chan2_center_freq_mhz,
735 				       uint8_t *phy_mode)
736 {
737 	if (primary_chan_center_freq_mhz)
738 		*primary_chan_center_freq_mhz = 0;
739 	if (contig_chan1_center_freq_mhz)
740 		*contig_chan1_center_freq_mhz = 0;
741 	if (contig_chan2_center_freq_mhz)
742 		*contig_chan2_center_freq_mhz = 0;
743 	if (phy_mode)
744 		*phy_mode = 0;
745 	return false;
746 }
747 
748 static bool
htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev,void * msdu_desc)749 htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
750 {
751 	struct htt_host_rx_desc_base *rx_desc =
752 		(struct htt_host_rx_desc_base *)msdu_desc;
753 	return (bool)
754 		(((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
755 		  RX_MSDU_END_4_FIRST_MSDU_MASK) >>
756 		 RX_MSDU_END_4_FIRST_MSDU_LSB);
757 }
758 
759 static bool
htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev,void * mpdu_desc,uint8_t * key_id)760 htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
761 			   uint8_t *key_id)
762 {
763 	struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
764 						mpdu_desc;
765 
766 	if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
767 		return false;
768 
769 	*key_id = ((*(((uint32_t *)&rx_desc->msdu_end) + 1)) &
770 		   (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
771 		    RX_MSDU_END_1_KEY_ID_OCT_LSB));
772 
773 	return true;
774 }
775 
776 /**
777  * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
778  *                               for the Low Latency driver
779  * @pdev:                          Handle (pointer) to HTT pdev.
780  * @mpdu_desc:                     Void pointer to the Rx descriptor for MPDU
781  *                                 before the beginning of the payload.
782  *
783  *  This function returns the retry bit of the 802.11 header for the
784  *  provided rx MPDU descriptor.
785  *
786  * Return:        boolean -- true if retry is set, false otherwise
787  */
788 static bool
htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev,void * mpdu_desc)789 htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
790 {
791 	struct htt_host_rx_desc_base *rx_desc =
792 		(struct htt_host_rx_desc_base *)mpdu_desc;
793 
794 	return
795 		(bool)(((*((uint32_t *)&rx_desc->mpdu_start)) &
796 		RX_MPDU_START_0_RETRY_MASK) >>
797 		RX_MPDU_START_0_RETRY_LSB);
798 }
799 
htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev,void * mpdu_desc,bool update_seq_num)800 static uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev,
801 					    void *mpdu_desc,
802 					    bool update_seq_num)
803 {
804 	struct htt_host_rx_desc_base *rx_desc =
805 		(struct htt_host_rx_desc_base *)mpdu_desc;
806 
807 	return
808 		(uint16_t)(((*((uint32_t *)&rx_desc->mpdu_start)) &
809 			     RX_MPDU_START_0_SEQ_NUM_MASK) >>
810 			    RX_MPDU_START_0_SEQ_NUM_LSB);
811 }
812 
813 static void
htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,void * mpdu_desc,union htt_rx_pn_t * pn,int pn_len_bits)814 htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
815 		       void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
816 {
817 	struct htt_host_rx_desc_base *rx_desc =
818 		(struct htt_host_rx_desc_base *)mpdu_desc;
819 
820 	switch (pn_len_bits) {
821 	case 24:
822 		/* bits 23:0 */
823 		pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
824 		break;
825 	case 48:
826 		/* bits 31:0 */
827 		pn->pn48 = rx_desc->mpdu_start.pn_31_0;
828 		/* bits 47:32 */
829 		pn->pn48 |= ((uint64_t)
830 			     ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
831 			      & RX_MPDU_START_2_PN_47_32_MASK))
832 			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
833 		break;
834 	case 128:
835 		/* bits 31:0 */
836 		pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
837 		/* bits 47:32 */
838 		pn->pn128[0] |=
839 			((uint64_t)((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
840 				     & RX_MPDU_START_2_PN_47_32_MASK))
841 			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
842 		/* bits 63:48 */
843 		pn->pn128[0] |=
844 			((uint64_t)((*(((uint32_t *)&rx_desc->msdu_end) + 2))
845 				     & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
846 			<< (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
847 		/* bits 95:64 */
848 		pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
849 		/* bits 127:96 */
850 		pn->pn128[1] |=
851 			((uint64_t)rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
852 		break;
853 	default:
854 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
855 			  "Error: invalid length spec (%d bits) for PN",
856 			  pn_len_bits);
857 	};
858 }
859 
860 /**
861  * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
862  *                             for Low Latency driver
863  * @pdev:                        Handle (pointer) to HTT pdev.
864  * @mpdu_desc:                   Void pointer to the Rx descriptor for the MPDU
865  *                               before the beginning of the payload.
866  *
867  * This function returns the TID set in the 802.11 QoS Control for the MPDU
868  * in the packet header, by looking at the mpdu_start of the Rx descriptor.
869  * Rx descriptor gets a copy of the TID from the MAC.
870  *
871  * Return:        Actual TID set in the packet header.
872  */
873 static uint8_t
htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev,void * mpdu_desc)874 htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
875 {
876 	struct htt_host_rx_desc_base *rx_desc =
877 		(struct htt_host_rx_desc_base *)mpdu_desc;
878 
879 	return
880 		(uint8_t)(((*(((uint32_t *)&rx_desc->mpdu_start) + 2)) &
881 		RX_MPDU_START_2_TID_MASK) >>
882 		RX_MPDU_START_2_TID_LSB);
883 }
884 
htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev,void * msdu_desc)885 static bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev,
886 					       void *msdu_desc)
887 {
888 	struct htt_host_rx_desc_base *rx_desc =
889 		(struct htt_host_rx_desc_base *)msdu_desc;
890 	return (bool)
891 		(((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
892 		  RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
893 }
894 
htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev,void * msdu_desc)895 static int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev,
896 					      void *msdu_desc)
897 {
898 	struct htt_host_rx_desc_base *rx_desc =
899 		(struct htt_host_rx_desc_base *)msdu_desc;
900 	/*
901 	 * HW rx desc: the mcast_bcast flag is only valid
902 	 * if first_msdu is set
903 	 */
904 	return ((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
905 		RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
906 }
907 
htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev,void * msdu_desc)908 static bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
909 {
910 	struct htt_host_rx_desc_base *rx_desc =
911 		(struct htt_host_rx_desc_base *)msdu_desc;
912 	return ((*((uint32_t *)&rx_desc->attention)) &
913 		RX_ATTENTION_0_MCAST_BCAST_MASK)
914 		>> RX_ATTENTION_0_MCAST_BCAST_LSB;
915 }
916 
htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev,void * msdu_desc)917 static int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
918 {
919 	struct htt_host_rx_desc_base *rx_desc =
920 		(struct htt_host_rx_desc_base *)msdu_desc;
921 	return ((*((uint32_t *)&rx_desc->attention)) &
922 		 RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
923 }
924 
925 static inline int
htt_rx_offload_msdu_cnt_ll(htt_pdev_handle pdev)926 htt_rx_offload_msdu_cnt_ll(htt_pdev_handle pdev)
927 {
928 	return htt_rx_ring_elems(pdev);
929 }
930 
931 static int
htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t offload_deliver_msg,int * vdev_id,int * peer_id,int * tid,uint8_t * fw_desc,qdf_nbuf_t * head_buf,qdf_nbuf_t * tail_buf)932 htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
933 			   qdf_nbuf_t offload_deliver_msg,
934 			   int *vdev_id,
935 			   int *peer_id,
936 			   int *tid,
937 			   uint8_t *fw_desc,
938 			   qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
939 {
940 	qdf_nbuf_t buf;
941 	uint32_t *msdu_hdr, msdu_len;
942 
943 	*head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
944 
945 	if (qdf_unlikely(!buf)) {
946 		qdf_print("netbuf pop failed!");
947 		return 1;
948 	}
949 
950 	/* Fake read mpdu_desc to keep desc ptr in sync */
951 	htt_rx_mpdu_desc_list_next(pdev, NULL);
952 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
953 #ifdef DEBUG_DMA_DONE
954 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
955 #else
956 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
957 #endif
958 	msdu_hdr = (uint32_t *)qdf_nbuf_data(buf);
959 
960 	/* First dword */
961 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
962 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
963 
964 	/* Second dword */
965 	msdu_hdr++;
966 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
967 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
968 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
969 
970 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
971 	qdf_nbuf_set_pktlen(buf, msdu_len);
972 	return 0;
973 }
974 
975 int
htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,uint32_t * msg_word,int msdu_iter,int * vdev_id,int * peer_id,int * tid,uint8_t * fw_desc,qdf_nbuf_t * head_buf,qdf_nbuf_t * tail_buf)976 htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
977 				 uint32_t *msg_word,
978 				 int msdu_iter,
979 				 int *vdev_id,
980 				 int *peer_id,
981 				 int *tid,
982 				 uint8_t *fw_desc,
983 				 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
984 {
985 	qdf_nbuf_t buf;
986 	uint32_t *msdu_hdr, msdu_len;
987 	uint32_t *curr_msdu;
988 	qdf_dma_addr_t paddr;
989 
990 	curr_msdu =
991 		msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
992 	paddr = htt_rx_in_ord_paddr_get(curr_msdu);
993 	*head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
994 
995 	if (qdf_unlikely(!buf)) {
996 		qdf_print("netbuf pop failed!");
997 		return 1;
998 	}
999 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
1000 #ifdef DEBUG_DMA_DONE
1001 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
1002 #else
1003 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
1004 #endif
1005 
1006 	if (pdev->cfg.is_first_wakeup_packet)
1007 		htt_get_first_packet_after_wow_wakeup(
1008 			msg_word + NEXT_FIELD_OFFSET_IN32, buf);
1009 
1010 	msdu_hdr = (uint32_t *)qdf_nbuf_data(buf);
1011 
1012 	/* First dword */
1013 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1014 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1015 
1016 	/* Second dword */
1017 	msdu_hdr++;
1018 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1019 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1020 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1021 
1022 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1023 	qdf_nbuf_set_pktlen(buf, msdu_len);
1024 	return 0;
1025 }
1026 
1027 #ifdef WLAN_FULL_REORDER_OFFLOAD
1028 
1029 /* Number of buckets in the hash table */
1030 #define RX_NUM_HASH_BUCKETS 1024        /* This should always be a power of 2 */
1031 #define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
1032 
1033 /* Number of hash entries allocated per bucket */
1034 #define RX_ENTRIES_SIZE 10
1035 
1036 #define RX_HASH_FUNCTION(a) \
1037 	((((a) >> 14) ^ ((a) >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
1038 
1039 #ifdef RX_HASH_DEBUG_LOG
1040 #define RX_HASH_LOG(x) x
1041 #else
1042 #define RX_HASH_LOG(x)          /* no-op */
1043 #endif
1044 
1045 /* Return values: 1 - success, 0 - failure */
1046 #define RX_DESC_DISCARD_IS_SET ((*((u_int8_t *)&rx_desc->fw_desc.u.val)) & \
1047 							FW_RX_DESC_DISCARD_M)
1048 #define RX_DESC_MIC_ERR_IS_SET ((*((u_int8_t *)&rx_desc->fw_desc.u.val)) & \
1049 							FW_RX_DESC_ANY_ERR_M)
1050 
1051 #define RX_RING_REFILL_DEBT_MAX 128
1052 
1053 /* Initializes the circular linked list */
htt_list_init(struct htt_list_node * head)1054 static inline void htt_list_init(struct htt_list_node *head)
1055 {
1056 	head->prev = head;
1057 	head->next = head;
1058 }
1059 
1060 /* Adds entry to the end of the linked list */
htt_list_add_tail(struct htt_list_node * head,struct htt_list_node * node)1061 static inline void htt_list_add_tail(struct htt_list_node *head,
1062 				     struct htt_list_node *node)
1063 {
1064 	head->prev->next = node;
1065 	node->prev = head->prev;
1066 	node->next = head;
1067 	head->prev = node;
1068 }
1069 
1070 /* Removes the entry corresponding to the input node from the linked list */
htt_list_remove(struct htt_list_node * node)1071 static inline void htt_list_remove(struct htt_list_node *node)
1072 {
1073 	node->prev->next = node->next;
1074 	node->next->prev = node->prev;
1075 }
1076 
1077 /* Helper macro to iterate through the linked list */
1078 #define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next;		\
1079 					   (iter) != (head);		\
1080 					   (iter) = (iter)->next)	\
1081 
1082 #ifdef RX_HASH_DEBUG
1083 /* Hash cookie related macros */
1084 #define HTT_RX_HASH_COOKIE 0xDEED
1085 
1086 #define HTT_RX_HASH_COOKIE_SET(hash_element) \
1087 	((hash_element)->cookie = HTT_RX_HASH_COOKIE)
1088 
1089 #define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
1090 	HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
1091 
1092 /* Hash count related macros */
1093 #define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
1094 	((hash_bucket)->count++)
1095 
1096 #define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
1097 	((hash_bucket)->count--)
1098 
1099 #define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket)->count = 0)
1100 
1101 #define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
1102 	RX_HASH_LOG(qdf_print(" count %d\n", (hash_bucket)->count))
1103 #else                           /* RX_HASH_DEBUG */
1104 /* Hash cookie related macros */
1105 #define HTT_RX_HASH_COOKIE_SET(hash_element)    /* no-op */
1106 #define HTT_RX_HASH_COOKIE_CHECK(hash_element)  /* no-op */
1107 /* Hash count related macros */
1108 #define HTT_RX_HASH_COUNT_INCR(hash_bucket)     /* no-op */
1109 #define HTT_RX_HASH_COUNT_DECR(hash_bucket)     /* no-op */
1110 #define HTT_RX_HASH_COUNT_PRINT(hash_bucket)    /* no-op */
1111 #define HTT_RX_HASH_COUNT_RESET(hash_bucket)    /* no-op */
1112 #endif /* RX_HASH_DEBUG */
1113 
1114 /*
1115  * Inserts the given "physical address - network buffer" pair into the
1116  * hash table for the given pdev. This function will do the following:
1117  * 1. Determine which bucket to insert the pair into
1118  * 2. First try to allocate the hash entry for this pair from the pre-allocated
1119  *    entries list
1120  * 3. If there are no more entries in the pre-allocated entries list, allocate
1121  *    the hash entry from the hash memory pool
1122  * Note: this function is not thread-safe
1123  * Returns 0 - success, 1 - failure
1124  */
1125 int
htt_rx_hash_list_insert(struct htt_pdev_t * pdev,qdf_dma_addr_t paddr,qdf_nbuf_t netbuf)1126 htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
1127 			qdf_dma_addr_t paddr,
1128 			qdf_nbuf_t netbuf)
1129 {
1130 	int i;
1131 	int rc = 0;
1132 	struct htt_rx_hash_entry *hash_element = NULL;
1133 
1134 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1135 
1136 	/* get rid of the marking bits if they are available */
1137 	paddr = htt_paddr_trim_to_37(paddr);
1138 
1139 	i = RX_HASH_FUNCTION(paddr);
1140 
1141 	/* Check if there are any entries in the pre-allocated free list */
1142 	if (pdev->rx_ring.hash_table[i]->freepool.next !=
1143 	    &pdev->rx_ring.hash_table[i]->freepool) {
1144 		hash_element =
1145 			(struct htt_rx_hash_entry *)(
1146 				(char *)
1147 				pdev->rx_ring.hash_table[i]->freepool.next -
1148 				pdev->rx_ring.listnode_offset);
1149 		if (qdf_unlikely(!hash_element)) {
1150 			HTT_ASSERT_ALWAYS(0);
1151 			rc = 1;
1152 			goto hli_end;
1153 		}
1154 
1155 		htt_list_remove(pdev->rx_ring.hash_table[i]->freepool.next);
1156 	} else {
1157 		hash_element = qdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
1158 		if (qdf_unlikely(!hash_element)) {
1159 			HTT_ASSERT_ALWAYS(0);
1160 			rc = 1;
1161 			goto hli_end;
1162 		}
1163 		hash_element->fromlist = 0;
1164 	}
1165 
1166 	hash_element->netbuf = netbuf;
1167 	hash_element->paddr = paddr;
1168 	HTT_RX_HASH_COOKIE_SET(hash_element);
1169 
1170 	htt_list_add_tail(&pdev->rx_ring.hash_table[i]->listhead,
1171 			  &hash_element->listnode);
1172 
1173 	RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x netbuf %pK bucket %d\n",
1174 			      paddr, netbuf, (int)i));
1175 
1176 	if (htt_rx_ring_smmu_mapped(pdev)) {
1177 		if (qdf_unlikely(qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) {
1178 			qdf_err("Already smmu mapped, nbuf: %pK",
1179 				netbuf);
1180 			qdf_assert_always(0);
1181 		}
1182 		qdf_nbuf_set_rx_ipa_smmu_map(netbuf, true);
1183 	}
1184 
1185 	HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
1186 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
1187 
1188 hli_end:
1189 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1190 	return rc;
1191 }
1192 
1193 /*
1194  * Given a physical address this function will find the corresponding network
1195  *  buffer from the hash table.
1196  *  paddr is already stripped off of higher marking bits.
1197  */
htt_rx_hash_list_lookup(struct htt_pdev_t * pdev,qdf_dma_addr_t paddr)1198 qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev,
1199 				   qdf_dma_addr_t     paddr)
1200 {
1201 	uint32_t i;
1202 	struct htt_list_node *list_iter = NULL;
1203 	qdf_nbuf_t netbuf = NULL;
1204 	struct htt_rx_hash_entry *hash_entry;
1205 
1206 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1207 
1208 	if (!pdev->rx_ring.hash_table) {
1209 		qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1210 		return NULL;
1211 	}
1212 
1213 	i = RX_HASH_FUNCTION(paddr);
1214 
1215 	HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i]->listhead) {
1216 		hash_entry = (struct htt_rx_hash_entry *)
1217 			     ((char *)list_iter -
1218 			      pdev->rx_ring.listnode_offset);
1219 
1220 		HTT_RX_HASH_COOKIE_CHECK(hash_entry);
1221 
1222 		if (hash_entry->paddr == paddr) {
1223 			/* Found the entry corresponding to paddr */
1224 			netbuf = hash_entry->netbuf;
1225 			/* set netbuf to NULL to trace if freed entry
1226 			 * is getting unmapped in hash deinit.
1227 			 */
1228 			hash_entry->netbuf = NULL;
1229 			htt_list_remove(&hash_entry->listnode);
1230 			HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
1231 			/*
1232 			 * if the rx entry is from the pre-allocated list,
1233 			 * return it
1234 			 */
1235 			if (hash_entry->fromlist)
1236 				htt_list_add_tail(
1237 					&pdev->rx_ring.hash_table[i]->freepool,
1238 					&hash_entry->listnode);
1239 			else
1240 				qdf_mem_free(hash_entry);
1241 
1242 			htt_rx_dbg_rxbuf_reset(pdev, netbuf);
1243 			break;
1244 		}
1245 	}
1246 
1247 	if (netbuf && htt_rx_ring_smmu_mapped(pdev)) {
1248 		if (qdf_unlikely(!qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) {
1249 			qdf_err("smmu not mapped nbuf: %pK", netbuf);
1250 			qdf_assert_always(0);
1251 		}
1252 	}
1253 
1254 	RX_HASH_LOG(qdf_print("rx hash: paddr 0x%llx, netbuf %pK, bucket %d\n",
1255 			      (unsigned long long)paddr, netbuf, (int)i));
1256 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
1257 
1258 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1259 
1260 	if (!netbuf) {
1261 		qdf_print("rx hash: no entry found for %llx!\n",
1262 			  (unsigned long long)paddr);
1263 		cds_trigger_recovery(QDF_RX_HASH_NO_ENTRY_FOUND);
1264 	}
1265 
1266 	return netbuf;
1267 }
1268 
1269 /*
1270  * Initialization function of the rx buffer hash table. This function will
1271  * allocate a hash table of a certain pre-determined size and initialize all
1272  * the elements
1273  */
htt_rx_hash_init(struct htt_pdev_t * pdev)1274 static int htt_rx_hash_init(struct htt_pdev_t *pdev)
1275 {
1276 	int i, j;
1277 	int rc = 0;
1278 	void *allocation;
1279 
1280 	HTT_ASSERT2(QDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
1281 
1282 	/* hash table is array of bucket pointers */
1283 	pdev->rx_ring.hash_table =
1284 		qdf_mem_malloc(RX_NUM_HASH_BUCKETS *
1285 			       sizeof(struct htt_rx_hash_bucket *));
1286 
1287 	if (!pdev->rx_ring.hash_table)
1288 		return 1;
1289 
1290 	qdf_spinlock_create(&pdev->rx_ring.rx_hash_lock);
1291 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1292 
1293 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
1294 		qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1295 		/* pre-allocate bucket and pool of entries for this bucket */
1296 		allocation = qdf_mem_malloc((sizeof(struct htt_rx_hash_bucket) +
1297 			(RX_ENTRIES_SIZE * sizeof(struct htt_rx_hash_entry))));
1298 		qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1299 		pdev->rx_ring.hash_table[i] = allocation;
1300 
1301 		HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
1302 
1303 		/* initialize the hash table buckets */
1304 		htt_list_init(&pdev->rx_ring.hash_table[i]->listhead);
1305 
1306 		/* initialize the hash table free pool per bucket */
1307 		htt_list_init(&pdev->rx_ring.hash_table[i]->freepool);
1308 
1309 		/* pre-allocate a pool of entries for this bucket */
1310 		pdev->rx_ring.hash_table[i]->entries =
1311 			(struct htt_rx_hash_entry *)
1312 			((uint8_t *)pdev->rx_ring.hash_table[i] +
1313 			sizeof(struct htt_rx_hash_bucket));
1314 
1315 		if (!pdev->rx_ring.hash_table[i]->entries) {
1316 			qdf_print("rx hash bucket %d entries alloc failed\n",
1317 				  (int)i);
1318 			while (i) {
1319 				i--;
1320 				qdf_mem_free(pdev->rx_ring.hash_table[i]);
1321 			}
1322 			qdf_mem_free(pdev->rx_ring.hash_table);
1323 			pdev->rx_ring.hash_table = NULL;
1324 			rc = 1;
1325 			goto hi_end;
1326 		}
1327 
1328 		/* initialize the free list with pre-allocated entries */
1329 		for (j = 0; j < RX_ENTRIES_SIZE; j++) {
1330 			pdev->rx_ring.hash_table[i]->entries[j].fromlist = 1;
1331 			htt_list_add_tail(
1332 				&pdev->rx_ring.hash_table[i]->freepool,
1333 				&pdev->rx_ring.hash_table[i]->entries[j].
1334 				listnode);
1335 		}
1336 	}
1337 
1338 	pdev->rx_ring.listnode_offset =
1339 		qdf_offsetof(struct htt_rx_hash_entry, listnode);
1340 hi_end:
1341 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1342 
1343 	return rc;
1344 }
1345 
1346 /* De -initialization function of the rx buffer hash table. This function will
1347  *   free up the hash table which includes freeing all the pending rx buffers
1348  */
htt_rx_hash_deinit(struct htt_pdev_t * pdev)1349 static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
1350 {
1351 	uint32_t i;
1352 	struct htt_rx_hash_entry *hash_entry;
1353 	struct htt_rx_hash_bucket **hash_table;
1354 	struct htt_list_node *list_iter = NULL;
1355 	qdf_mem_info_t mem_map_table = {0};
1356 	bool ipa_smmu = false;
1357 
1358 	if (!pdev->rx_ring.hash_table)
1359 		return;
1360 
1361 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1362 	ipa_smmu = htt_rx_ring_smmu_mapped(pdev);
1363 	hash_table = pdev->rx_ring.hash_table;
1364 	pdev->rx_ring.hash_table = NULL;
1365 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1366 
1367 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
1368 		/* Free the hash entries in hash bucket i */
1369 		list_iter = hash_table[i]->listhead.next;
1370 		while (list_iter != &hash_table[i]->listhead) {
1371 			hash_entry =
1372 				(struct htt_rx_hash_entry *)((char *)list_iter -
1373 							     pdev->rx_ring.
1374 							     listnode_offset);
1375 			if (hash_entry->netbuf) {
1376 				if (ipa_smmu) {
1377 					if (qdf_unlikely(
1378 						!qdf_nbuf_is_rx_ipa_smmu_map(
1379 							hash_entry->netbuf))) {
1380 						qdf_err("nbuf: %pK NOT mapped",
1381 							hash_entry->netbuf);
1382 						qdf_assert_always(0);
1383 					}
1384 					qdf_nbuf_set_rx_ipa_smmu_map(
1385 							hash_entry->netbuf,
1386 							false);
1387 					qdf_update_mem_map_table(pdev->osdev,
1388 						&mem_map_table,
1389 						QDF_NBUF_CB_PADDR(
1390 							hash_entry->netbuf),
1391 						HTT_RX_BUF_SIZE);
1392 
1393 					qdf_assert_always(
1394 						!cds_smmu_map_unmap(
1395 							false, 1,
1396 							&mem_map_table));
1397 				}
1398 #ifdef DEBUG_DMA_DONE
1399 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
1400 					       QDF_DMA_BIDIRECTIONAL);
1401 #else
1402 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
1403 					       QDF_DMA_FROM_DEVICE);
1404 #endif
1405 				qdf_nbuf_free(hash_entry->netbuf);
1406 				hash_entry->paddr = 0;
1407 			}
1408 			list_iter = list_iter->next;
1409 
1410 			if (!hash_entry->fromlist)
1411 				qdf_mem_free(hash_entry);
1412 		}
1413 
1414 		qdf_mem_free(hash_table[i]);
1415 	}
1416 	qdf_mem_free(hash_table);
1417 
1418 	qdf_spinlock_destroy(&pdev->rx_ring.rx_hash_lock);
1419 }
1420 
htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev,uint32_t num)1421 int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
1422 {
1423 	int filled = 0;
1424 
1425 	if (!qdf_spin_trylock_bh(&pdev->rx_ring.refill_lock)) {
1426 		if (qdf_atomic_read(&pdev->rx_ring.refill_debt)
1427 			 < RX_RING_REFILL_DEBT_MAX) {
1428 			qdf_atomic_add(num, &pdev->rx_ring.refill_debt);
1429 			pdev->rx_buff_debt_invoked++;
1430 			return filled; /* 0 */
1431 		}
1432 		/*
1433 		 * else:
1434 		 * If we have quite a debt, then it is better for the lock
1435 		 * holder to finish its work and then acquire the lock and
1436 		 * fill our own part.
1437 		 */
1438 		qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
1439 	}
1440 	pdev->rx_buff_fill_n_invoked++;
1441 
1442 	filled = htt_rx_ring_fill_n(pdev, num);
1443 
1444 	if (filled > num) {
1445 		/* we served ourselves and some other debt */
1446 		/* sub is safer than  = 0 */
1447 		qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
1448 	} else {
1449 		qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
1450 	}
1451 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
1452 
1453 	return filled;
1454 }
1455 
1456 #if defined(WLAN_FEATURE_TSF_PLUS) && !defined(CONFIG_HL_SUPPORT)
1457 /**
1458  * htt_rx_tail_msdu_timestamp() - update tail msdu tsf64 timestamp
1459  * @tail_rx_desc: pointer to tail msdu descriptor
1460  * @timestamp_rx_desc: pointer to timestamp msdu descriptor
1461  *
1462  * Return: none
1463  */
htt_rx_tail_msdu_timestamp(struct htt_host_rx_desc_base * tail_rx_desc,struct htt_host_rx_desc_base * timestamp_rx_desc)1464 static inline void htt_rx_tail_msdu_timestamp(
1465 			struct htt_host_rx_desc_base *tail_rx_desc,
1466 			struct htt_host_rx_desc_base *timestamp_rx_desc)
1467 {
1468 	if (tail_rx_desc) {
1469 		if (!timestamp_rx_desc) {
1470 			tail_rx_desc->ppdu_end.wb_timestamp_lower_32 = 0;
1471 			tail_rx_desc->ppdu_end.wb_timestamp_upper_32 = 0;
1472 		} else {
1473 			if (timestamp_rx_desc != tail_rx_desc) {
1474 				tail_rx_desc->ppdu_end.wb_timestamp_lower_32 =
1475 			timestamp_rx_desc->ppdu_end.wb_timestamp_lower_32;
1476 				tail_rx_desc->ppdu_end.wb_timestamp_upper_32 =
1477 			timestamp_rx_desc->ppdu_end.wb_timestamp_upper_32;
1478 			}
1479 		}
1480 	}
1481 }
1482 #else
htt_rx_tail_msdu_timestamp(struct htt_host_rx_desc_base * tail_rx_desc,struct htt_host_rx_desc_base * timestamp_rx_desc)1483 static inline void htt_rx_tail_msdu_timestamp(
1484 			struct htt_host_rx_desc_base *tail_rx_desc,
1485 			struct htt_host_rx_desc_base *timestamp_rx_desc)
1486 {
1487 }
1488 #endif
1489 
1490 static int
htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * replenish_cnt)1491 htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1492 				qdf_nbuf_t rx_ind_msg,
1493 				qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1494 				uint32_t *replenish_cnt)
1495 {
1496 	qdf_nbuf_t msdu, next, prev = NULL;
1497 	uint8_t *rx_ind_data;
1498 	uint32_t *msg_word;
1499 	uint32_t rx_ctx_id;
1500 	unsigned int msdu_count = 0;
1501 	uint8_t offload_ind, frag_ind;
1502 	uint8_t peer_id;
1503 	struct htt_host_rx_desc_base *rx_desc = NULL;
1504 	enum qdf_dp_tx_rx_status status = QDF_TX_RX_STATUS_OK;
1505 	qdf_dma_addr_t paddr;
1506 	qdf_mem_info_t mem_map_table = {0};
1507 	int ret = 1;
1508 	struct htt_host_rx_desc_base *timestamp_rx_desc = NULL;
1509 
1510 	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1511 
1512 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1513 	rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(rx_ind_msg);
1514 	msg_word = (uint32_t *)rx_ind_data;
1515 	peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1516 					*(u_int32_t *)rx_ind_data);
1517 
1518 	offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1519 	frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
1520 
1521 	/* Get the total number of MSDUs */
1522 	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
1523 	HTT_RX_CHECK_MSDU_COUNT(msdu_count);
1524 
1525 	ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
1526 	htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
1527 
1528 	msg_word =
1529 		(uint32_t *)(rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
1530 	if (offload_ind) {
1531 		ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
1532 							msg_word);
1533 		*head_msdu = *tail_msdu = NULL;
1534 		ret = 0;
1535 		goto end;
1536 	}
1537 
1538 	paddr = htt_rx_in_ord_paddr_get(msg_word);
1539 	(*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
1540 
1541 	if (qdf_unlikely(!msdu)) {
1542 		qdf_print("netbuf pop failed!");
1543 		*tail_msdu = NULL;
1544 		pdev->rx_ring.pop_fail_cnt++;
1545 		ret = 0;
1546 		goto end;
1547 	}
1548 
1549 	while (msdu_count > 0) {
1550 		if (qdf_nbuf_is_rx_ipa_smmu_map(msdu)) {
1551 			/*
1552 			 * nbuf was already detached from hash_entry,
1553 			 * there is no parallel IPA context to access
1554 			 * this nbuf for smmu map/unmap, so updating
1555 			 * this flag here without lock.
1556 			 *
1557 			 * This flag was not updated in netbuf_pop context
1558 			 * htt_rx_hash_list_lookup (where lock held), to
1559 			 * differentiate whether this nbuf to be
1560 			 * smmu unmapped or it was never mapped so far.
1561 			 */
1562 			qdf_nbuf_set_rx_ipa_smmu_map(msdu, false);
1563 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
1564 						 QDF_NBUF_CB_PADDR(msdu),
1565 						 HTT_RX_BUF_SIZE);
1566 			qdf_assert_always(
1567 				!cds_smmu_map_unmap(false, 1, &mem_map_table));
1568 		}
1569 
1570 		/*
1571 		 * Set the netbuf length to be the entire buffer length
1572 		 * initially, so the unmap will unmap the entire buffer.
1573 		 */
1574 		qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
1575 #ifdef DEBUG_DMA_DONE
1576 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
1577 #else
1578 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
1579 #endif
1580 		msdu_count--;
1581 
1582 		if (pdev->rx_buff_pool.netbufs_ring &&
1583 		    QDF_NBUF_CB_RX_PACKET_BUFF_POOL(msdu) &&
1584 		    !htt_rx_buff_pool_refill(pdev, msdu)) {
1585 			if (!msdu_count) {
1586 				if (!prev) {
1587 					*head_msdu = *tail_msdu = NULL;
1588 					ret = 1;
1589 					goto end;
1590 				}
1591 				*tail_msdu = prev;
1592 				qdf_nbuf_set_next(prev, NULL);
1593 				goto end;
1594 			} else {
1595 				/* get the next msdu */
1596 				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1597 				paddr = htt_rx_in_ord_paddr_get(msg_word);
1598 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1599 				if (qdf_unlikely(!next)) {
1600 					qdf_print("netbuf pop failed!");
1601 					*tail_msdu = NULL;
1602 					pdev->rx_ring.pop_fail_cnt++;
1603 					ret = 0;
1604 					goto end;
1605 				}
1606 				/* if this is not the first msdu, update the
1607 				 * next pointer of the preceding msdu
1608 				 */
1609 				if (prev) {
1610 					qdf_nbuf_set_next(prev, next);
1611 				} else {
1612 					/* if this is the first msdu, update
1613 					 * head pointer
1614 					 */
1615 					*head_msdu = next;
1616 				}
1617 				msdu = next;
1618 				continue;
1619 			}
1620 		}
1621 
1622 		/* cache consistency has been taken care of by qdf_nbuf_unmap */
1623 		rx_desc = htt_rx_desc(msdu);
1624 		htt_rx_extract_lro_info(msdu, rx_desc);
1625 
1626 		/* check if the msdu is last mpdu */
1627 		if (rx_desc->attention.last_mpdu)
1628 			timestamp_rx_desc = rx_desc;
1629 
1630 		/*
1631 		 * Make the netbuf's data pointer point to the payload rather
1632 		 * than the descriptor.
1633 		 */
1634 		qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
1635 
1636 		QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
1637 		qdf_dp_trace_set_track(msdu, QDF_RX);
1638 		QDF_NBUF_CB_TX_PACKET_TRACK(msdu) = QDF_NBUF_TX_PKT_DATA_TRACK;
1639 		QDF_NBUF_CB_RX_CTX_ID(msdu) = rx_ctx_id;
1640 
1641 		if (qdf_nbuf_is_ipv4_arp_pkt(msdu))
1642 			QDF_NBUF_CB_GET_PACKET_TYPE(msdu) =
1643 				QDF_NBUF_CB_PACKET_TYPE_ARP;
1644 
1645 		DPTRACE(qdf_dp_trace(msdu,
1646 				     QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD,
1647 				     QDF_TRACE_DEFAULT_PDEV_ID,
1648 				     qdf_nbuf_data_addr(msdu),
1649 				     sizeof(qdf_nbuf_data(msdu)), QDF_RX));
1650 
1651 		qdf_nbuf_trim_tail(msdu,
1652 				   HTT_RX_BUF_SIZE -
1653 				   (RX_STD_DESC_SIZE +
1654 				    HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
1655 				    *(msg_word + NEXT_FIELD_OFFSET_IN32))));
1656 #if defined(HELIUMPLUS_DEBUG)
1657 		ol_txrx_dump_pkt(msdu, 0, 64);
1658 #endif
1659 		*((uint8_t *)&rx_desc->fw_desc.u.val) =
1660 			HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word +
1661 						NEXT_FIELD_OFFSET_IN32));
1662 
1663 		/* calling callback function for packet logging */
1664 		if (pdev->rx_pkt_dump_cb) {
1665 			if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
1666 					 !RX_DESC_DISCARD_IS_SET))
1667 				status = QDF_TX_RX_STATUS_FW_DISCARD;
1668 			pdev->rx_pkt_dump_cb(msdu, peer_id, status);
1669 		}
1670 
1671 		if (pdev->cfg.is_first_wakeup_packet)
1672 			htt_get_first_packet_after_wow_wakeup(
1673 				msg_word + NEXT_FIELD_OFFSET_IN32, msdu);
1674 
1675 		/* if discard flag is set (SA is self MAC), then
1676 		 * don't check mic failure.
1677 		 */
1678 		if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
1679 				 !RX_DESC_DISCARD_IS_SET)) {
1680 			uint8_t tid =
1681 				HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1682 					*(u_int32_t *)rx_ind_data);
1683 			ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
1684 						rx_desc, msdu);
1685 
1686 			htt_rx_desc_frame_free(pdev, msdu);
1687 			/* if this is the last msdu */
1688 			if (!msdu_count) {
1689 				/* if this is the only msdu */
1690 				if (!prev) {
1691 					*head_msdu = *tail_msdu = NULL;
1692 					ret = 0;
1693 					goto end;
1694 				}
1695 				*tail_msdu = prev;
1696 				qdf_nbuf_set_next(prev, NULL);
1697 				goto end;
1698 			} else { /* if this is not the last msdu */
1699 				/* get the next msdu */
1700 				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1701 				paddr = htt_rx_in_ord_paddr_get(msg_word);
1702 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1703 				if (qdf_unlikely(!next)) {
1704 					qdf_print("netbuf pop failed!");
1705 					*tail_msdu = NULL;
1706 					pdev->rx_ring.pop_fail_cnt++;
1707 					ret = 0;
1708 					goto end;
1709 				}
1710 
1711 				/* if this is not the first msdu, update the
1712 				 * next pointer of the preceding msdu
1713 				 */
1714 				if (prev) {
1715 					qdf_nbuf_set_next(prev, next);
1716 				} else {
1717 					/* if this is the first msdu, update the
1718 					 * head pointer
1719 					 */
1720 					*head_msdu = next;
1721 				}
1722 				msdu = next;
1723 				continue;
1724 			}
1725 		}
1726 		/* check if this is the last msdu */
1727 		if (msdu_count) {
1728 			msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1729 			paddr = htt_rx_in_ord_paddr_get(msg_word);
1730 			next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1731 			if (qdf_unlikely(!next)) {
1732 				qdf_print("netbuf pop failed!");
1733 				*tail_msdu = NULL;
1734 				pdev->rx_ring.pop_fail_cnt++;
1735 				ret = 0;
1736 				goto end;
1737 			}
1738 			qdf_nbuf_set_next(msdu, next);
1739 			prev = msdu;
1740 			msdu = next;
1741 		} else {
1742 			*tail_msdu = msdu;
1743 			qdf_nbuf_set_next(msdu, NULL);
1744 		}
1745 	}
1746 
1747 	htt_rx_tail_msdu_timestamp(rx_desc, timestamp_rx_desc);
1748 
1749 end:
1750 	return ret;
1751 }
1752 
htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t netbuf)1753 static void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
1754 						  qdf_nbuf_t netbuf)
1755 {
1756 	return (void *)htt_rx_desc(netbuf);
1757 }
1758 #else
1759 
1760 static inline
htt_rx_hash_init(struct htt_pdev_t * pdev)1761 int htt_rx_hash_init(struct htt_pdev_t *pdev)
1762 {
1763 	return 0;
1764 }
1765 
1766 static inline
htt_rx_hash_deinit(struct htt_pdev_t * pdev)1767 void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
1768 {
1769 }
1770 
1771 static inline int
htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * replenish_cnt)1772 htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1773 				qdf_nbuf_t rx_ind_msg,
1774 				qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1775 				uint32_t *replenish_cnt)
1776 {
1777 	return 0;
1778 }
1779 
1780 static inline
htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t netbuf)1781 void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
1782 					   qdf_nbuf_t netbuf)
1783 {
1784 	return NULL;
1785 }
1786 #endif
1787 
1788 #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
1789 
1790 /* AR9888v1 WORKAROUND for EV#112367 */
1791 /* FIX THIS - remove this WAR when the bug is fixed */
1792 #define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
1793 
1794 static int
htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * msdu_count)1795 htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
1796 		    qdf_nbuf_t rx_ind_msg,
1797 		    qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1798 		    uint32_t *msdu_count)
1799 {
1800 	int msdu_len, msdu_chaining = 0;
1801 	qdf_nbuf_t msdu;
1802 	struct htt_host_rx_desc_base *rx_desc;
1803 	uint8_t *rx_ind_data;
1804 	uint32_t *msg_word, num_msdu_bytes;
1805 	qdf_dma_addr_t rx_desc_paddr;
1806 	enum htt_t2h_msg_type msg_type;
1807 	uint8_t pad_bytes = 0;
1808 
1809 	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
1810 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1811 	msg_word = (uint32_t *)rx_ind_data;
1812 
1813 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1814 
1815 	if (qdf_unlikely(msg_type == HTT_T2H_MSG_TYPE_RX_FRAG_IND)) {
1816 		num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
1817 			*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
1818 	} else {
1819 		num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1820 			*(msg_word
1821 			  + HTT_RX_IND_HDR_PREFIX_SIZE32
1822 			  + HTT_RX_PPDU_DESC_SIZE32));
1823 	}
1824 	msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
1825 	while (1) {
1826 		int last_msdu, msdu_len_invalid, msdu_chained;
1827 		int byte_offset;
1828 		qdf_nbuf_t next;
1829 
1830 		/*
1831 		 * Set the netbuf length to be the entire buffer length
1832 		 * initially, so the unmap will unmap the entire buffer.
1833 		 */
1834 		qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
1835 #ifdef DEBUG_DMA_DONE
1836 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
1837 #else
1838 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
1839 #endif
1840 
1841 		/* cache consistency has been taken care of by qdf_nbuf_unmap */
1842 
1843 		/*
1844 		 * Now read the rx descriptor.
1845 		 * Set the length to the appropriate value.
1846 		 * Check if this MSDU completes a MPDU.
1847 		 */
1848 		rx_desc = htt_rx_desc(msdu);
1849 #if defined(HELIUMPLUS)
1850 		if (HTT_WIFI_IP(pdev, 2, 0))
1851 			pad_bytes = rx_desc->msdu_end.l3_header_padding;
1852 #endif /* defined(HELIUMPLUS) */
1853 
1854 		/*
1855 		 * Save PADDR of descriptor and make the netbuf's data pointer
1856 		 * point to the payload rather than the descriptor.
1857 		 */
1858 		rx_desc_paddr = QDF_NBUF_CB_PADDR(msdu);
1859 		qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION +
1860 					 pad_bytes);
1861 
1862 		/*
1863 		 * Sanity check - confirm the HW is finished filling in
1864 		 * the rx data.
1865 		 * If the HW and SW are working correctly, then it's guaranteed
1866 		 * that the HW's MAC DMA is done before this point in the SW.
1867 		 * To prevent the case that we handle a stale Rx descriptor,
1868 		 * just assert for now until we have a way to recover.
1869 		 */
1870 
1871 #ifdef DEBUG_DMA_DONE
1872 		if (qdf_unlikely(!((*(uint32_t *)&rx_desc->attention)
1873 				   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1874 			int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
1875 
1876 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1877 				  "malformed frame");
1878 
1879 			while (dbg_iter &&
1880 			       (!((*(uint32_t *)&rx_desc->attention) &
1881 				  RX_ATTENTION_0_MSDU_DONE_MASK))) {
1882 				qdf_mdelay(1);
1883 				qdf_mem_dma_sync_single_for_cpu(
1884 					pdev->osdev,
1885 					rx_desc_paddr,
1886 					HTT_RX_STD_DESC_RESERVATION,
1887 					DMA_FROM_DEVICE);
1888 
1889 				QDF_TRACE(QDF_MODULE_ID_HTT,
1890 					  QDF_TRACE_LEVEL_INFO,
1891 					  "debug iter %d success %d", dbg_iter,
1892 					  pdev->rx_ring.dbg_sync_success);
1893 
1894 				dbg_iter--;
1895 			}
1896 
1897 			if (qdf_unlikely(!((*(uint32_t *)&rx_desc->attention)
1898 					   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1899 #ifdef HTT_RX_RESTORE
1900 				QDF_TRACE(QDF_MODULE_ID_HTT,
1901 					  QDF_TRACE_LEVEL_ERROR,
1902 					  "RX done bit error detected!");
1903 
1904 				qdf_nbuf_set_next(msdu, NULL);
1905 				*tail_msdu = msdu;
1906 				pdev->rx_ring.rx_reset = 1;
1907 				return msdu_chaining;
1908 #else
1909 				wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
1910 						    0, GEN_CMD);
1911 				HTT_ASSERT_ALWAYS(0);
1912 #endif
1913 			}
1914 			pdev->rx_ring.dbg_sync_success++;
1915 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1916 				  "debug iter %d success %d", dbg_iter,
1917 				  pdev->rx_ring.dbg_sync_success);
1918 		}
1919 #else
1920 		HTT_ASSERT_ALWAYS((*(uint32_t *)&rx_desc->attention) &
1921 				  RX_ATTENTION_0_MSDU_DONE_MASK);
1922 #endif
1923 		/*
1924 		 * Copy the FW rx descriptor for this MSDU from the rx
1925 		 * indication message into the MSDU's netbuf.
1926 		 * HL uses the same rx indication message definition as LL, and
1927 		 * simply appends new info (fields from the HW rx desc, and the
1928 		 * MSDU payload itself).
1929 		 * So, the offset into the rx indication message only has to
1930 		 * account for the standard offset of the per-MSDU FW rx
1931 		 * desc info within the message, and how many bytes of the
1932 		 * per-MSDU FW rx desc info have already been consumed.
1933 		 * (And the endianness of the host,
1934 		 * since for a big-endian host, the rx ind message contents,
1935 		 * including the per-MSDU rx desc bytes, were byteswapped during
1936 		 * upload.)
1937 		 */
1938 		if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
1939 			if (qdf_unlikely
1940 				    (msg_type == HTT_T2H_MSG_TYPE_RX_FRAG_IND))
1941 				byte_offset =
1942 					HTT_ENDIAN_BYTE_IDX_SWAP
1943 					(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
1944 			else
1945 				byte_offset =
1946 					HTT_ENDIAN_BYTE_IDX_SWAP
1947 					(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
1948 						pdev->rx_ind_msdu_byte_idx);
1949 
1950 			*((uint8_t *)&rx_desc->fw_desc.u.val) =
1951 				rx_ind_data[byte_offset];
1952 			/*
1953 			 * The target is expected to only provide the basic
1954 			 * per-MSDU rx descriptors.  Just to be sure,
1955 			 * verify that the target has not attached
1956 			 * extension data (e.g. LRO flow ID).
1957 			 */
1958 			/*
1959 			 * The assertion below currently doesn't work for
1960 			 * RX_FRAG_IND messages, since their format differs
1961 			 * from the RX_IND format (no FW rx PPDU desc in
1962 			 * the current RX_FRAG_IND message).
1963 			 * If the RX_FRAG_IND message format is updated to match
1964 			 * the RX_IND message format, then the following
1965 			 * assertion can be restored.
1966 			 */
1967 			/*
1968 			 * qdf_assert((rx_ind_data[byte_offset] &
1969 			 * FW_RX_DESC_EXT_M) == 0);
1970 			 */
1971 			pdev->rx_ind_msdu_byte_idx += 1;
1972 			/* or more, if there's ext data */
1973 		} else {
1974 			/*
1975 			 * When an oversized AMSDU happened, FW will lost some
1976 			 * of MSDU status - in this case, the FW descriptors
1977 			 * provided will be less than the actual MSDUs
1978 			 * inside this MPDU.
1979 			 * Mark the FW descriptors so that it will still
1980 			 * deliver to upper stack, if no CRC error for the MPDU.
1981 			 *
1982 			 * FIX THIS - the FW descriptors are actually for MSDUs
1983 			 * in the end of this A-MSDU instead of the beginning.
1984 			 */
1985 			*((uint8_t *)&rx_desc->fw_desc.u.val) = 0;
1986 		}
1987 
1988 		/*
1989 		 *  TCP/UDP checksum offload support
1990 		 */
1991 		htt_set_checksum_result_ll(pdev, msdu, rx_desc);
1992 
1993 		msdu_len_invalid = (*(uint32_t *)&rx_desc->attention) &
1994 				   RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
1995 		msdu_chained = (((*(uint32_t *)&rx_desc->frag_info) &
1996 				 RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
1997 				RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
1998 		msdu_len =
1999 			((*((uint32_t *)&rx_desc->msdu_start)) &
2000 			 RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
2001 			RX_MSDU_START_0_MSDU_LENGTH_LSB;
2002 
2003 		do {
2004 			if (!msdu_len_invalid && !msdu_chained) {
2005 #if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
2006 				if (msdu_len > 0x3000)
2007 					break;
2008 #endif
2009 				qdf_nbuf_trim_tail(msdu,
2010 						   HTT_RX_BUF_SIZE -
2011 						   (RX_STD_DESC_SIZE +
2012 						    msdu_len));
2013 			}
2014 		} while (0);
2015 
2016 		while (msdu_chained--) {
2017 			next = htt_rx_netbuf_pop(pdev);
2018 			qdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
2019 			msdu_len -= HTT_RX_BUF_SIZE;
2020 			qdf_nbuf_set_next(msdu, next);
2021 			msdu = next;
2022 			msdu_chaining = 1;
2023 
2024 			if (msdu_chained == 0) {
2025 				/* Trim the last one to the correct size -
2026 				 * accounting for inconsistent HW lengths
2027 				 * causing length overflows and underflows
2028 				 */
2029 				if (((unsigned int)msdu_len) >
2030 				    ((unsigned int)
2031 				     (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
2032 					msdu_len =
2033 						(HTT_RX_BUF_SIZE -
2034 						 RX_STD_DESC_SIZE);
2035 				}
2036 
2037 				qdf_nbuf_trim_tail(next,
2038 						   HTT_RX_BUF_SIZE -
2039 						   (RX_STD_DESC_SIZE +
2040 						    msdu_len));
2041 			}
2042 		}
2043 
2044 		last_msdu =
2045 			((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
2046 			 RX_MSDU_END_4_LAST_MSDU_MASK) >>
2047 			RX_MSDU_END_4_LAST_MSDU_LSB;
2048 
2049 		if (last_msdu) {
2050 			qdf_nbuf_set_next(msdu, NULL);
2051 			break;
2052 		}
2053 
2054 		next = htt_rx_netbuf_pop(pdev);
2055 		qdf_nbuf_set_next(msdu, next);
2056 		msdu = next;
2057 	}
2058 	*tail_msdu = msdu;
2059 
2060 	/*
2061 	 * Don't refill the ring yet.
2062 	 * First, the elements popped here are still in use - it is
2063 	 * not safe to overwrite them until the matching call to
2064 	 * mpdu_desc_list_next.
2065 	 * Second, for efficiency it is preferable to refill the rx ring
2066 	 * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
2067 	 * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
2068 	 * Consequently, we'll rely on the txrx SW to tell us when it is done
2069 	 * pulling all the PPDU's rx buffers out of the rx ring, and then
2070 	 * refill it just once.
2071 	 */
2072 	return msdu_chaining;
2073 }
2074 
2075 static
htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)2076 void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
2077 {
2078 	int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
2079 	qdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
2080 
2081 	pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
2082 	return (void *)htt_rx_desc(netbuf);
2083 }
2084 
2085 #else
2086 
2087 static inline int
htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * msdu_count)2088 htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
2089 		    qdf_nbuf_t rx_ind_msg,
2090 		    qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
2091 		    uint32_t *msdu_count)
2092 {
2093 	return 0;
2094 }
2095 
2096 static inline
htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)2097 void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
2098 {
2099 	return NULL;
2100 }
2101 #endif
2102 
2103 /**
2104  * htt_rx_fill_ring_count() - replenish rx msdu buffer
2105  * @pdev: Handle (pointer) to HTT pdev.
2106  *
2107  * This function will replenish the rx buffer to the max number
2108  * that can be kept in the ring
2109  *
2110  * Return: None
2111  */
htt_rx_fill_ring_count(htt_pdev_handle pdev)2112 void htt_rx_fill_ring_count(htt_pdev_handle pdev)
2113 {
2114 	int num_to_fill;
2115 
2116 	num_to_fill = pdev->rx_ring.fill_level -
2117 		qdf_atomic_read(&pdev->rx_ring.fill_cnt);
2118 	htt_rx_ring_fill_n(pdev, num_to_fill /* okay if <= 0 */);
2119 }
2120 
htt_rx_attach(struct htt_pdev_t * pdev)2121 int htt_rx_attach(struct htt_pdev_t *pdev)
2122 {
2123 	qdf_dma_addr_t paddr;
2124 	uint32_t ring_elem_size = sizeof(target_paddr_t);
2125 
2126 	pdev->rx_ring.size = htt_rx_ring_size(pdev);
2127 	HTT_ASSERT2(QDF_IS_PWR2(pdev->rx_ring.size));
2128 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
2129 
2130 	/*
2131 	 * Set the initial value for the level to which the rx ring
2132 	 * should be filled, based on the max throughput and the worst
2133 	 * likely latency for the host to fill the rx ring.
2134 	 * In theory, this fill level can be dynamically adjusted from
2135 	 * the initial value set here to reflect the actual host latency
2136 	 * rather than a conservative assumption.
2137 	 */
2138 	pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
2139 
2140 	if (pdev->cfg.is_full_reorder_offload) {
2141 		if (htt_rx_hash_init(pdev))
2142 			goto fail1;
2143 
2144 		/* allocate the target index */
2145 		pdev->rx_ring.target_idx.vaddr =
2146 			 qdf_mem_alloc_consistent(pdev->osdev, pdev->osdev->dev,
2147 						  sizeof(uint32_t), &paddr);
2148 
2149 		if (!pdev->rx_ring.target_idx.vaddr)
2150 			goto fail2;
2151 
2152 		pdev->rx_ring.target_idx.paddr = paddr;
2153 		*pdev->rx_ring.target_idx.vaddr = 0;
2154 	} else {
2155 		pdev->rx_ring.buf.netbufs_ring =
2156 			qdf_mem_malloc(pdev->rx_ring.size * sizeof(qdf_nbuf_t));
2157 		if (!pdev->rx_ring.buf.netbufs_ring)
2158 			goto fail1;
2159 
2160 		pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
2161 		pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
2162 	}
2163 
2164 	pdev->rx_ring.buf.paddrs_ring =
2165 		qdf_mem_alloc_consistent(
2166 			pdev->osdev, pdev->osdev->dev,
2167 			 pdev->rx_ring.size * ring_elem_size,
2168 			 &paddr);
2169 	if (!pdev->rx_ring.buf.paddrs_ring)
2170 		goto fail3;
2171 
2172 	pdev->rx_ring.base_paddr = paddr;
2173 	pdev->rx_ring.alloc_idx.vaddr =
2174 		 qdf_mem_alloc_consistent(
2175 			pdev->osdev, pdev->osdev->dev,
2176 			 sizeof(uint32_t), &paddr);
2177 
2178 	if (!pdev->rx_ring.alloc_idx.vaddr)
2179 		goto fail4;
2180 
2181 	pdev->rx_ring.alloc_idx.paddr = paddr;
2182 	*pdev->rx_ring.alloc_idx.vaddr = 0;
2183 
2184 	if (htt_rx_buff_pool_init(pdev))
2185 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
2186 			  "HTT: pre allocated packet pool alloc failed");
2187 
2188 	/*
2189 	 * Initialize the Rx refill reference counter to be one so that
2190 	 * only one thread is allowed to refill the Rx ring.
2191 	 */
2192 	qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
2193 	qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
2194 
2195 	/* Initialize the refill_lock and debt (for rx-parallelization) */
2196 	qdf_spinlock_create(&pdev->rx_ring.refill_lock);
2197 	qdf_atomic_init(&pdev->rx_ring.refill_debt);
2198 
2199 	/* Initialize the Rx refill retry timer */
2200 	qdf_timer_init(pdev->osdev,
2201 		       &pdev->rx_ring.refill_retry_timer,
2202 		       htt_rx_ring_refill_retry, (void *)pdev,
2203 		       QDF_TIMER_TYPE_SW);
2204 
2205 	qdf_atomic_init(&pdev->rx_ring.fill_cnt);
2206 	pdev->rx_ring.pop_fail_cnt = 0;
2207 #ifdef DEBUG_DMA_DONE
2208 	pdev->rx_ring.dbg_ring_idx = 0;
2209 	pdev->rx_ring.dbg_refill_cnt = 0;
2210 	pdev->rx_ring.dbg_sync_success = 0;
2211 #endif
2212 #ifdef HTT_RX_RESTORE
2213 	pdev->rx_ring.rx_reset = 0;
2214 	pdev->rx_ring.htt_rx_restore = 0;
2215 #endif
2216 	htt_rx_dbg_rxbuf_init(pdev);
2217 	htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
2218 
2219 	if (pdev->cfg.is_full_reorder_offload) {
2220 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
2221 			  "HTT: full reorder offload enabled");
2222 		htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2223 		htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2224 		htt_rx_mpdu_desc_list_next =
2225 			 htt_rx_in_ord_mpdu_desc_list_next_ll;
2226 	} else {
2227 		htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
2228 		htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
2229 		htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
2230 	}
2231 
2232 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
2233 		htt_rx_amsdu_pop = htt_rx_mon_amsdu_rx_in_order_pop_ll;
2234 
2235 	htt_rx_offload_msdu_cnt = htt_rx_offload_msdu_cnt_ll;
2236 	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
2237 	htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
2238 	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
2239 	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
2240 	htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
2241 	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
2242 	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
2243 	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
2244 	htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
2245 	htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
2246 	htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
2247 	htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
2248 	htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
2249 	htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
2250 	htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
2251 
2252 	return 0;               /* success */
2253 
2254 fail4:
2255 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2256 				pdev->rx_ring.size * sizeof(target_paddr_t),
2257 				pdev->rx_ring.buf.paddrs_ring,
2258 				pdev->rx_ring.base_paddr,
2259 				qdf_get_dma_mem_context((&pdev->rx_ring.buf),
2260 							memctx));
2261 
2262 fail3:
2263 	if (pdev->cfg.is_full_reorder_offload)
2264 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2265 					sizeof(uint32_t),
2266 					pdev->rx_ring.target_idx.vaddr,
2267 					pdev->rx_ring.target_idx.paddr,
2268 					qdf_get_dma_mem_context((&pdev->
2269 								 rx_ring.
2270 								 target_idx),
2271 								 memctx));
2272 	else
2273 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
2274 
2275 fail2:
2276 	if (pdev->cfg.is_full_reorder_offload)
2277 		htt_rx_hash_deinit(pdev);
2278 
2279 fail1:
2280 	return 1;               /* failure */
2281 }
2282 
htt_rx_detach(struct htt_pdev_t * pdev)2283 void htt_rx_detach(struct htt_pdev_t *pdev)
2284 {
2285 	bool ipa_smmu = false;
2286 	qdf_nbuf_t nbuf;
2287 
2288 	qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
2289 	qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
2290 	htt_rx_dbg_rxbuf_deinit(pdev);
2291 
2292 	ipa_smmu = htt_rx_ring_smmu_mapped(pdev);
2293 
2294 	if (pdev->cfg.is_full_reorder_offload) {
2295 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2296 					sizeof(uint32_t),
2297 					pdev->rx_ring.target_idx.vaddr,
2298 					pdev->rx_ring.target_idx.paddr,
2299 					qdf_get_dma_mem_context((&pdev->
2300 								 rx_ring.
2301 								 target_idx),
2302 								 memctx));
2303 		htt_rx_hash_deinit(pdev);
2304 	} else {
2305 		int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
2306 		qdf_mem_info_t mem_map_table = {0};
2307 
2308 		while (sw_rd_idx != *pdev->rx_ring.alloc_idx.vaddr) {
2309 			nbuf = pdev->rx_ring.buf.netbufs_ring[sw_rd_idx];
2310 			if (ipa_smmu) {
2311 				if (qdf_unlikely(
2312 					!qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
2313 					qdf_err("smmu not mapped, nbuf: %pK",
2314 						nbuf);
2315 					qdf_assert_always(0);
2316 				}
2317 				qdf_nbuf_set_rx_ipa_smmu_map(nbuf, false);
2318 				qdf_update_mem_map_table(pdev->osdev,
2319 					&mem_map_table,
2320 					QDF_NBUF_CB_PADDR(nbuf),
2321 					HTT_RX_BUF_SIZE);
2322 				qdf_assert_always(
2323 					!cds_smmu_map_unmap(false, 1,
2324 							    &mem_map_table));
2325 			}
2326 #ifdef DEBUG_DMA_DONE
2327 			qdf_nbuf_unmap(pdev->osdev, nbuf,
2328 				       QDF_DMA_BIDIRECTIONAL);
2329 #else
2330 			qdf_nbuf_unmap(pdev->osdev, nbuf,
2331 				       QDF_DMA_FROM_DEVICE);
2332 #endif
2333 			qdf_nbuf_free(nbuf);
2334 			sw_rd_idx++;
2335 			sw_rd_idx &= pdev->rx_ring.size_mask;
2336 		}
2337 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
2338 	}
2339 
2340 	htt_rx_buff_pool_deinit(pdev);
2341 
2342 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2343 				sizeof(uint32_t),
2344 				pdev->rx_ring.alloc_idx.vaddr,
2345 				pdev->rx_ring.alloc_idx.paddr,
2346 				qdf_get_dma_mem_context((&pdev->rx_ring.
2347 							 alloc_idx),
2348 							 memctx));
2349 
2350 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2351 				pdev->rx_ring.size * sizeof(target_paddr_t),
2352 				pdev->rx_ring.buf.paddrs_ring,
2353 				pdev->rx_ring.base_paddr,
2354 				qdf_get_dma_mem_context((&pdev->rx_ring.buf),
2355 							memctx));
2356 
2357 	/* destroy the rx-parallelization refill spinlock */
2358 	qdf_spinlock_destroy(&pdev->rx_ring.refill_lock);
2359 }
2360 
htt_rx_hash_smmu_map(bool map,struct htt_pdev_t * pdev)2361 static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
2362 {
2363 	uint32_t i;
2364 	struct htt_rx_hash_entry *hash_entry;
2365 	struct htt_rx_hash_bucket **hash_table;
2366 	struct htt_list_node *list_iter = NULL;
2367 	qdf_mem_info_t mem_map_table = {0};
2368 	qdf_nbuf_t nbuf;
2369 	int ret;
2370 
2371 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
2372 	hash_table = pdev->rx_ring.hash_table;
2373 
2374 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
2375 		/* Free the hash entries in hash bucket i */
2376 		list_iter = hash_table[i]->listhead.next;
2377 		while (list_iter != &hash_table[i]->listhead) {
2378 			hash_entry =
2379 				(struct htt_rx_hash_entry *)((char *)list_iter -
2380 							     pdev->rx_ring.
2381 							     listnode_offset);
2382 			nbuf = hash_entry->netbuf;
2383 			if (nbuf) {
2384 				if (qdf_unlikely(map ==
2385 					qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
2386 					qdf_err("map/unmap err:%d, nbuf:%pK",
2387 						map, nbuf);
2388 					list_iter = list_iter->next;
2389 					continue;
2390 				}
2391 				qdf_nbuf_set_rx_ipa_smmu_map(nbuf, map);
2392 				qdf_update_mem_map_table(pdev->osdev,
2393 						&mem_map_table,
2394 						QDF_NBUF_CB_PADDR(nbuf),
2395 						HTT_RX_BUF_SIZE);
2396 				ret = cds_smmu_map_unmap(map, 1,
2397 							 &mem_map_table);
2398 				if (ret) {
2399 					qdf_nbuf_set_rx_ipa_smmu_map(nbuf,
2400 								     !map);
2401 					qdf_err("map: %d failure, nbuf: %pK",
2402 						map, nbuf);
2403 					qdf_spin_unlock_bh(
2404 						&pdev->rx_ring.rx_hash_lock);
2405 					return QDF_STATUS_E_FAILURE;
2406 				}
2407 			}
2408 			list_iter = list_iter->next;
2409 		}
2410 	}
2411 
2412 	pdev->rx_ring.smmu_map = map;
2413 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
2414 
2415 	return QDF_STATUS_SUCCESS;
2416 }
2417 
htt_rx_update_smmu_map(struct htt_pdev_t * pdev,bool map)2418 QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map)
2419 {
2420 	QDF_STATUS status;
2421 
2422 	if (!pdev->rx_ring.hash_table)
2423 		return QDF_STATUS_SUCCESS;
2424 
2425 	if (!qdf_mem_smmu_s1_enabled(pdev->osdev) || !pdev->is_ipa_uc_enabled)
2426 		return QDF_STATUS_SUCCESS;
2427 
2428 	qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
2429 	status = htt_rx_hash_smmu_map(map, pdev);
2430 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
2431 
2432 	return status;
2433 }
2434