xref: /wlan-driver/qcacld-3.0/core/dp/htt/htt_rx_ll.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_mem.h>         /* qdf_mem_malloc,free, etc. */
21*5113495bSYour Name #include <qdf_types.h>          /* qdf_print, bool */
22*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
23*5113495bSYour Name #include <qdf_timer.h>		/* qdf_timer_free */
24*5113495bSYour Name 
25*5113495bSYour Name #include <htt.h>                /* HTT_HL_RX_DESC_SIZE */
26*5113495bSYour Name #include <ol_cfg.h>
27*5113495bSYour Name #include <ol_rx.h>
28*5113495bSYour Name #include <ol_htt_rx_api.h>
29*5113495bSYour Name #include <htt_internal.h>       /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
30*5113495bSYour Name #include "regtable.h"
31*5113495bSYour Name 
32*5113495bSYour Name #include <cds_ieee80211_common.h>   /* ieee80211_frame, ieee80211_qoscntl */
33*5113495bSYour Name #include <cds_utils.h>
34*5113495bSYour Name #include <wlan_policy_mgr_api.h>
35*5113495bSYour Name #include "ol_txrx_types.h"
36*5113495bSYour Name #ifdef DEBUG_DMA_DONE
37*5113495bSYour Name #include <asm/barrier.h>
38*5113495bSYour Name #include <wma_api.h>
39*5113495bSYour Name #endif
40*5113495bSYour Name #include <pktlog_ac_fmt.h>
41*5113495bSYour Name #include <wlan_mlme_api.h>
42*5113495bSYour Name 
43*5113495bSYour Name #ifdef DEBUG_DMA_DONE
44*5113495bSYour Name #define MAX_DONE_BIT_CHECK_ITER 5
45*5113495bSYour Name #endif
46*5113495bSYour Name 
47*5113495bSYour Name #ifdef HTT_DEBUG_DATA
48*5113495bSYour Name #define HTT_PKT_DUMP(x) x
49*5113495bSYour Name #else
50*5113495bSYour Name #define HTT_PKT_DUMP(x) /* no-op */
51*5113495bSYour Name #endif
52*5113495bSYour Name 
53*5113495bSYour Name /*--- setup / tear-down functions -------------------------------------------*/
54*5113495bSYour Name 
55*5113495bSYour Name #ifndef HTT_RX_HOST_LATENCY_MAX_MS
56*5113495bSYour Name #define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */	/* very conservative */
57*5113495bSYour Name #endif
58*5113495bSYour Name 
59*5113495bSYour Name  /* very conservative to ensure enough buffers are allocated */
60*5113495bSYour Name #ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
61*5113495bSYour Name #ifdef QCA_WIFI_3_0
62*5113495bSYour Name #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 20
63*5113495bSYour Name #else
64*5113495bSYour Name #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
65*5113495bSYour Name #endif
66*5113495bSYour Name #endif
67*5113495bSYour Name 
68*5113495bSYour Name #ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
69*5113495bSYour Name #define HTT_RX_RING_REFILL_RETRY_TIME_MS    50
70*5113495bSYour Name #endif
71*5113495bSYour Name 
72*5113495bSYour Name #define RX_PADDR_MAGIC_PATTERN 0xDEAD0000
73*5113495bSYour Name 
74*5113495bSYour Name #ifdef ENABLE_DEBUG_ADDRESS_MARKING
75*5113495bSYour Name static qdf_dma_addr_t
htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)76*5113495bSYour Name htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
77*5113495bSYour Name {
78*5113495bSYour Name 	if (sizeof(qdf_dma_addr_t) > 4) {
79*5113495bSYour Name 		/* clear high bits, leave lower 37 bits (paddr) */
80*5113495bSYour Name 		paddr &= 0x01FFFFFFFFF;
81*5113495bSYour Name 		/* mark upper 16 bits of paddr */
82*5113495bSYour Name 		paddr |= (((uint64_t)RX_PADDR_MAGIC_PATTERN) << 32);
83*5113495bSYour Name 	}
84*5113495bSYour Name 	return paddr;
85*5113495bSYour Name }
86*5113495bSYour Name #else
87*5113495bSYour Name static qdf_dma_addr_t
htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)88*5113495bSYour Name htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
89*5113495bSYour Name {
90*5113495bSYour Name 	return paddr;
91*5113495bSYour Name }
92*5113495bSYour Name #endif
93*5113495bSYour Name 
94*5113495bSYour Name /**
95*5113495bSYour Name  * htt_get_first_packet_after_wow_wakeup() - get first packet after wow wakeup
96*5113495bSYour Name  * @msg_word: pointer to rx indication message word
97*5113495bSYour Name  * @buf: pointer to buffer
98*5113495bSYour Name  *
99*5113495bSYour Name  * Return: None
100*5113495bSYour Name  */
101*5113495bSYour Name static void
htt_get_first_packet_after_wow_wakeup(uint32_t * msg_word,qdf_nbuf_t buf)102*5113495bSYour Name htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf)
103*5113495bSYour Name {
104*5113495bSYour Name 	if (HTT_RX_IN_ORD_PADDR_IND_MSDU_INFO_GET(*msg_word) &
105*5113495bSYour Name 			FW_MSDU_INFO_FIRST_WAKEUP_M) {
106*5113495bSYour Name 		qdf_nbuf_mark_wakeup_frame(buf);
107*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
108*5113495bSYour Name 			  "%s: First packet after WOW Wakeup rcvd", __func__);
109*5113495bSYour Name 	}
110*5113495bSYour Name }
111*5113495bSYour Name 
112*5113495bSYour Name /**
113*5113495bSYour Name  * htt_rx_ring_smmu_mapped() - check if rx ring is smmu mapped or not
114*5113495bSYour Name  * @pdev: HTT pdev handle
115*5113495bSYour Name  *
116*5113495bSYour Name  * Return: true or false.
117*5113495bSYour Name  */
htt_rx_ring_smmu_mapped(htt_pdev_handle pdev)118*5113495bSYour Name static inline bool htt_rx_ring_smmu_mapped(htt_pdev_handle pdev)
119*5113495bSYour Name {
120*5113495bSYour Name 	if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
121*5113495bSYour Name 	    pdev->is_ipa_uc_enabled &&
122*5113495bSYour Name 	    pdev->rx_ring.smmu_map)
123*5113495bSYour Name 		return true;
124*5113495bSYour Name 	else
125*5113495bSYour Name 		return false;
126*5113495bSYour Name }
127*5113495bSYour Name 
htt_rx_netbuf_pop(htt_pdev_handle pdev)128*5113495bSYour Name static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
129*5113495bSYour Name {
130*5113495bSYour Name 	int idx;
131*5113495bSYour Name 	qdf_nbuf_t msdu;
132*5113495bSYour Name 
133*5113495bSYour Name 	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
134*5113495bSYour Name 
135*5113495bSYour Name #ifdef DEBUG_DMA_DONE
136*5113495bSYour Name 	pdev->rx_ring.dbg_ring_idx++;
137*5113495bSYour Name 	pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
138*5113495bSYour Name #endif
139*5113495bSYour Name 
140*5113495bSYour Name 	idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
141*5113495bSYour Name 	msdu = pdev->rx_ring.buf.netbufs_ring[idx];
142*5113495bSYour Name 	idx++;
143*5113495bSYour Name 	idx &= pdev->rx_ring.size_mask;
144*5113495bSYour Name 	pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
145*5113495bSYour Name 	qdf_atomic_dec(&pdev->rx_ring.fill_cnt);
146*5113495bSYour Name 	return msdu;
147*5113495bSYour Name }
148*5113495bSYour Name 
htt_rx_ring_elems(struct htt_pdev_t * pdev)149*5113495bSYour Name static inline unsigned int htt_rx_ring_elems(struct htt_pdev_t *pdev)
150*5113495bSYour Name {
151*5113495bSYour Name 	return
152*5113495bSYour Name 		(*pdev->rx_ring.alloc_idx.vaddr -
153*5113495bSYour Name 		 pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
154*5113495bSYour Name }
155*5113495bSYour Name 
156*5113495bSYour Name /**
157*5113495bSYour Name  * htt_rx_buff_pool_init() - initialize the pool of buffers
158*5113495bSYour Name  * @pdev: pointer to device
159*5113495bSYour Name  *
160*5113495bSYour Name  * Return: 0 - success, 1 - failure
161*5113495bSYour Name  */
htt_rx_buff_pool_init(struct htt_pdev_t * pdev)162*5113495bSYour Name static int htt_rx_buff_pool_init(struct htt_pdev_t *pdev)
163*5113495bSYour Name {
164*5113495bSYour Name 	qdf_nbuf_t net_buf;
165*5113495bSYour Name 	int i;
166*5113495bSYour Name 
167*5113495bSYour Name 	pdev->rx_buff_pool.netbufs_ring =
168*5113495bSYour Name 		qdf_mem_malloc(HTT_RX_PRE_ALLOC_POOL_SIZE * sizeof(qdf_nbuf_t));
169*5113495bSYour Name 
170*5113495bSYour Name 	if (!pdev->rx_buff_pool.netbufs_ring)
171*5113495bSYour Name 		return 1; /* failure */
172*5113495bSYour Name 
173*5113495bSYour Name 	qdf_atomic_init(&pdev->rx_buff_pool.fill_cnt);
174*5113495bSYour Name 	qdf_atomic_init(&pdev->rx_buff_pool.refill_low_mem);
175*5113495bSYour Name 
176*5113495bSYour Name 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
177*5113495bSYour Name 		net_buf = qdf_nbuf_alloc(pdev->osdev,
178*5113495bSYour Name 					 HTT_RX_BUF_SIZE,
179*5113495bSYour Name 					 0, 4, false);
180*5113495bSYour Name 		if (net_buf) {
181*5113495bSYour Name 			qdf_atomic_inc(&pdev->rx_buff_pool.fill_cnt);
182*5113495bSYour Name 			/*
183*5113495bSYour Name 			 * Mark this netbuf to differentiate it
184*5113495bSYour Name 			 * from other buf. If set 1, this buf
185*5113495bSYour Name 			 * is from pre allocated pool.
186*5113495bSYour Name 			 */
187*5113495bSYour Name 			QDF_NBUF_CB_RX_PACKET_BUFF_POOL(net_buf) = 1;
188*5113495bSYour Name 		}
189*5113495bSYour Name 		/* Allow NULL to be inserted.
190*5113495bSYour Name 		 * Taken care during alloc from this pool.
191*5113495bSYour Name 		 */
192*5113495bSYour Name 		pdev->rx_buff_pool.netbufs_ring[i] = net_buf;
193*5113495bSYour Name 	}
194*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HTT,
195*5113495bSYour Name 		  QDF_TRACE_LEVEL_INFO,
196*5113495bSYour Name 		  "max pool size %d pool filled %d",
197*5113495bSYour Name 		  HTT_RX_PRE_ALLOC_POOL_SIZE,
198*5113495bSYour Name 		  qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt));
199*5113495bSYour Name 
200*5113495bSYour Name 	qdf_spinlock_create(&pdev->rx_buff_pool.rx_buff_pool_lock);
201*5113495bSYour Name 	return 0;
202*5113495bSYour Name }
203*5113495bSYour Name 
204*5113495bSYour Name /**
205*5113495bSYour Name  * htt_rx_buff_pool_deinit() - deinitialize the pool of buffers
206*5113495bSYour Name  * @pdev: pointer to device
207*5113495bSYour Name  *
208*5113495bSYour Name  * Return: none
209*5113495bSYour Name  */
htt_rx_buff_pool_deinit(struct htt_pdev_t * pdev)210*5113495bSYour Name static void htt_rx_buff_pool_deinit(struct htt_pdev_t *pdev)
211*5113495bSYour Name {
212*5113495bSYour Name 	qdf_nbuf_t net_buf;
213*5113495bSYour Name 	int i;
214*5113495bSYour Name 
215*5113495bSYour Name 	if (!pdev->rx_buff_pool.netbufs_ring)
216*5113495bSYour Name 		return;
217*5113495bSYour Name 
218*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
219*5113495bSYour Name 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
220*5113495bSYour Name 		net_buf = pdev->rx_buff_pool.netbufs_ring[i];
221*5113495bSYour Name 		if (!net_buf)
222*5113495bSYour Name 			continue;
223*5113495bSYour Name 		qdf_nbuf_free(net_buf);
224*5113495bSYour Name 		qdf_atomic_dec(&pdev->rx_buff_pool.fill_cnt);
225*5113495bSYour Name 	}
226*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
227*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HTT,
228*5113495bSYour Name 		  QDF_TRACE_LEVEL_INFO,
229*5113495bSYour Name 		  "max pool size %d pool filled %d",
230*5113495bSYour Name 		  HTT_RX_PRE_ALLOC_POOL_SIZE,
231*5113495bSYour Name 		  qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt));
232*5113495bSYour Name 
233*5113495bSYour Name 	qdf_mem_free(pdev->rx_buff_pool.netbufs_ring);
234*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->rx_buff_pool.rx_buff_pool_lock);
235*5113495bSYour Name }
236*5113495bSYour Name 
237*5113495bSYour Name /**
238*5113495bSYour Name  * htt_rx_buff_pool_refill() - refill the pool with new buf or reuse same buf
239*5113495bSYour Name  * @pdev: pointer to device
240*5113495bSYour Name  * @netbuf: netbuf to reuse
241*5113495bSYour Name  *
242*5113495bSYour Name  * Return: true - if able to alloc new buf and insert into pool,
243*5113495bSYour Name  * false - if need to reuse the netbuf or not able to insert into pool
244*5113495bSYour Name  */
htt_rx_buff_pool_refill(struct htt_pdev_t * pdev,qdf_nbuf_t netbuf)245*5113495bSYour Name static bool htt_rx_buff_pool_refill(struct htt_pdev_t *pdev, qdf_nbuf_t netbuf)
246*5113495bSYour Name {
247*5113495bSYour Name 	bool ret = false;
248*5113495bSYour Name 	qdf_nbuf_t net_buf;
249*5113495bSYour Name 	int i;
250*5113495bSYour Name 
251*5113495bSYour Name 	net_buf = qdf_nbuf_alloc(pdev->osdev,
252*5113495bSYour Name 				 HTT_RX_BUF_SIZE,
253*5113495bSYour Name 				 0, 4, false);
254*5113495bSYour Name 	if (net_buf) {
255*5113495bSYour Name 		/* able to alloc new net_buf.
256*5113495bSYour Name 		 * mark this netbuf as pool buf.
257*5113495bSYour Name 		 */
258*5113495bSYour Name 		QDF_NBUF_CB_RX_PACKET_BUFF_POOL(net_buf) = 1;
259*5113495bSYour Name 		ret = true;
260*5113495bSYour Name 	} else {
261*5113495bSYour Name 		/* reuse the netbuf and
262*5113495bSYour Name 		 * reset all fields of this netbuf.
263*5113495bSYour Name 		 */
264*5113495bSYour Name 		net_buf = netbuf;
265*5113495bSYour Name 		qdf_nbuf_reset(net_buf, 0, 4);
266*5113495bSYour Name 
267*5113495bSYour Name 		/* mark this netbuf as pool buf */
268*5113495bSYour Name 		QDF_NBUF_CB_RX_PACKET_BUFF_POOL(net_buf) = 1;
269*5113495bSYour Name 	}
270*5113495bSYour Name 
271*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
272*5113495bSYour Name 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
273*5113495bSYour Name 		/* insert the netbuf in empty slot of pool */
274*5113495bSYour Name 		if (pdev->rx_buff_pool.netbufs_ring[i])
275*5113495bSYour Name 			continue;
276*5113495bSYour Name 
277*5113495bSYour Name 		pdev->rx_buff_pool.netbufs_ring[i] = net_buf;
278*5113495bSYour Name 		qdf_atomic_inc(&pdev->rx_buff_pool.fill_cnt);
279*5113495bSYour Name 		break;
280*5113495bSYour Name 	}
281*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
282*5113495bSYour Name 
283*5113495bSYour Name 	if (i == HTT_RX_PRE_ALLOC_POOL_SIZE) {
284*5113495bSYour Name 		/* fail to insert into pool, free net_buf */
285*5113495bSYour Name 		qdf_nbuf_free(net_buf);
286*5113495bSYour Name 		ret = false;
287*5113495bSYour Name 	}
288*5113495bSYour Name 
289*5113495bSYour Name 	return ret;
290*5113495bSYour Name }
291*5113495bSYour Name 
292*5113495bSYour Name /**
293*5113495bSYour Name  * htt_rx_buff_alloc() - alloc the net buf from the pool
294*5113495bSYour Name  * @pdev: pointer to device
295*5113495bSYour Name  *
296*5113495bSYour Name  * Return: nbuf or NULL
297*5113495bSYour Name  */
htt_rx_buff_alloc(struct htt_pdev_t * pdev)298*5113495bSYour Name static qdf_nbuf_t htt_rx_buff_alloc(struct htt_pdev_t *pdev)
299*5113495bSYour Name {
300*5113495bSYour Name 	qdf_nbuf_t net_buf = NULL;
301*5113495bSYour Name 	int i;
302*5113495bSYour Name 
303*5113495bSYour Name 	if (!pdev->rx_buff_pool.netbufs_ring)
304*5113495bSYour Name 		return net_buf;
305*5113495bSYour Name 
306*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
307*5113495bSYour Name 	for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
308*5113495bSYour Name 		/* allocate the valid netbuf */
309*5113495bSYour Name 		if (!pdev->rx_buff_pool.netbufs_ring[i])
310*5113495bSYour Name 			continue;
311*5113495bSYour Name 
312*5113495bSYour Name 		net_buf = pdev->rx_buff_pool.netbufs_ring[i];
313*5113495bSYour Name 		qdf_atomic_dec(&pdev->rx_buff_pool.fill_cnt);
314*5113495bSYour Name 		pdev->rx_buff_pool.netbufs_ring[i] = NULL;
315*5113495bSYour Name 		break;
316*5113495bSYour Name 	}
317*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
318*5113495bSYour Name 	return net_buf;
319*5113495bSYour Name }
320*5113495bSYour Name 
321*5113495bSYour Name /**
322*5113495bSYour Name  * htt_rx_ring_buf_attach() - return net buf to attach in ring
323*5113495bSYour Name  * @pdev: pointer to device
324*5113495bSYour Name  *
325*5113495bSYour Name  * Return: nbuf or NULL
326*5113495bSYour Name  */
htt_rx_ring_buf_attach(struct htt_pdev_t * pdev)327*5113495bSYour Name static qdf_nbuf_t htt_rx_ring_buf_attach(struct htt_pdev_t *pdev)
328*5113495bSYour Name {
329*5113495bSYour Name 	qdf_nbuf_t net_buf = NULL;
330*5113495bSYour Name 	bool allocated = true;
331*5113495bSYour Name 
332*5113495bSYour Name 	net_buf =
333*5113495bSYour Name 		qdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
334*5113495bSYour Name 			       0, 4, false);
335*5113495bSYour Name 	if (!net_buf) {
336*5113495bSYour Name 		if (pdev->rx_buff_pool.netbufs_ring &&
337*5113495bSYour Name 		    qdf_atomic_read(&pdev->rx_buff_pool.refill_low_mem) &&
338*5113495bSYour Name 		    qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt))
339*5113495bSYour Name 			net_buf = htt_rx_buff_alloc(pdev);
340*5113495bSYour Name 
341*5113495bSYour Name 		allocated = false; /* allocated from pool */
342*5113495bSYour Name 	}
343*5113495bSYour Name 
344*5113495bSYour Name 	if (allocated || !qdf_atomic_read(&pdev->rx_buff_pool.fill_cnt))
345*5113495bSYour Name 		qdf_atomic_set(&pdev->rx_buff_pool.refill_low_mem, 0);
346*5113495bSYour Name 
347*5113495bSYour Name 	return net_buf;
348*5113495bSYour Name }
349*5113495bSYour Name 
350*5113495bSYour Name /**
351*5113495bSYour Name  * htt_rx_ring_buff_free() - free the net buff or reuse it
352*5113495bSYour Name  * @pdev: pointer to device
353*5113495bSYour Name  * @netbuf: netbuf
354*5113495bSYour Name  *
355*5113495bSYour Name  * Return: none
356*5113495bSYour Name  */
htt_rx_ring_buff_free(struct htt_pdev_t * pdev,qdf_nbuf_t netbuf)357*5113495bSYour Name static void htt_rx_ring_buff_free(struct htt_pdev_t *pdev, qdf_nbuf_t netbuf)
358*5113495bSYour Name {
359*5113495bSYour Name 	bool status = false;
360*5113495bSYour Name 
361*5113495bSYour Name 	if (pdev->rx_buff_pool.netbufs_ring &&
362*5113495bSYour Name 	    QDF_NBUF_CB_RX_PACKET_BUFF_POOL(netbuf)) {
363*5113495bSYour Name 		int i;
364*5113495bSYour Name 
365*5113495bSYour Name 		/* rest this netbuf before putting back into pool */
366*5113495bSYour Name 		qdf_nbuf_reset(netbuf, 0, 4);
367*5113495bSYour Name 
368*5113495bSYour Name 		/* mark this netbuf as pool buf */
369*5113495bSYour Name 		QDF_NBUF_CB_RX_PACKET_BUFF_POOL(netbuf) = 1;
370*5113495bSYour Name 
371*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
372*5113495bSYour Name 		for (i = 0; i < HTT_RX_PRE_ALLOC_POOL_SIZE; i++) {
373*5113495bSYour Name 			/* insert the netbuf in empty slot of pool */
374*5113495bSYour Name 			if (!pdev->rx_buff_pool.netbufs_ring[i]) {
375*5113495bSYour Name 				pdev->rx_buff_pool.netbufs_ring[i] = netbuf;
376*5113495bSYour Name 				qdf_atomic_inc(&pdev->rx_buff_pool.fill_cnt);
377*5113495bSYour Name 				status = true;    /* valid insertion */
378*5113495bSYour Name 				break;
379*5113495bSYour Name 			}
380*5113495bSYour Name 		}
381*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->rx_buff_pool.rx_buff_pool_lock);
382*5113495bSYour Name 	}
383*5113495bSYour Name 	if (!status)
384*5113495bSYour Name 		qdf_nbuf_free(netbuf);
385*5113495bSYour Name }
386*5113495bSYour Name 
387*5113495bSYour Name /* full_reorder_offload case: this function is called with lock held */
htt_rx_ring_fill_n(struct htt_pdev_t * pdev,int num)388*5113495bSYour Name static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
389*5113495bSYour Name {
390*5113495bSYour Name 	int idx;
391*5113495bSYour Name 	QDF_STATUS status;
392*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc;
393*5113495bSYour Name 	int filled = 0;
394*5113495bSYour Name 	int debt_served = 0;
395*5113495bSYour Name 	qdf_mem_info_t mem_map_table = {0};
396*5113495bSYour Name 
397*5113495bSYour Name 	idx = *pdev->rx_ring.alloc_idx.vaddr;
398*5113495bSYour Name 
399*5113495bSYour Name 	if ((idx < 0) || (idx > pdev->rx_ring.size_mask) ||
400*5113495bSYour Name 	    (num > pdev->rx_ring.size))  {
401*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT,
402*5113495bSYour Name 			  QDF_TRACE_LEVEL_ERROR,
403*5113495bSYour Name 			  "%s:rx refill failed!", __func__);
404*5113495bSYour Name 		return filled;
405*5113495bSYour Name 	}
406*5113495bSYour Name 
407*5113495bSYour Name moretofill:
408*5113495bSYour Name 	while (num > 0) {
409*5113495bSYour Name 		qdf_dma_addr_t paddr, paddr_marked;
410*5113495bSYour Name 		qdf_nbuf_t rx_netbuf;
411*5113495bSYour Name 		int headroom;
412*5113495bSYour Name 
413*5113495bSYour Name 		rx_netbuf = htt_rx_ring_buf_attach(pdev);
414*5113495bSYour Name 		if (!rx_netbuf) {
415*5113495bSYour Name 			qdf_timer_stop(&pdev->rx_ring.
416*5113495bSYour Name 						 refill_retry_timer);
417*5113495bSYour Name 			/*
418*5113495bSYour Name 			 * Failed to fill it to the desired level -
419*5113495bSYour Name 			 * we'll start a timer and try again next time.
420*5113495bSYour Name 			 * As long as enough buffers are left in the ring for
421*5113495bSYour Name 			 * another A-MPDU rx, no special recovery is needed.
422*5113495bSYour Name 			 */
423*5113495bSYour Name #ifdef DEBUG_DMA_DONE
424*5113495bSYour Name 			pdev->rx_ring.dbg_refill_cnt++;
425*5113495bSYour Name #endif
426*5113495bSYour Name 			pdev->refill_retry_timer_starts++;
427*5113495bSYour Name 			qdf_timer_start(
428*5113495bSYour Name 				&pdev->rx_ring.refill_retry_timer,
429*5113495bSYour Name 				HTT_RX_RING_REFILL_RETRY_TIME_MS);
430*5113495bSYour Name 			goto update_alloc_idx;
431*5113495bSYour Name 		}
432*5113495bSYour Name 
433*5113495bSYour Name 		/* Clear rx_desc attention word before posting to Rx ring */
434*5113495bSYour Name 		rx_desc = htt_rx_desc(rx_netbuf);
435*5113495bSYour Name 		*(uint32_t *)&rx_desc->attention = 0;
436*5113495bSYour Name 
437*5113495bSYour Name #ifdef DEBUG_DMA_DONE
438*5113495bSYour Name 		*(uint32_t *)&rx_desc->msdu_end = 1;
439*5113495bSYour Name 
440*5113495bSYour Name #define MAGIC_PATTERN 0xDEADBEEF
441*5113495bSYour Name 		*(uint32_t *)&rx_desc->msdu_start = MAGIC_PATTERN;
442*5113495bSYour Name 
443*5113495bSYour Name 		/*
444*5113495bSYour Name 		 * To ensure that attention bit is reset and msdu_end is set
445*5113495bSYour Name 		 * before calling dma_map
446*5113495bSYour Name 		 */
447*5113495bSYour Name 		smp_mb();
448*5113495bSYour Name #endif
449*5113495bSYour Name 		/*
450*5113495bSYour Name 		 * Adjust qdf_nbuf_data to point to the location in the buffer
451*5113495bSYour Name 		 * where the rx descriptor will be filled in.
452*5113495bSYour Name 		 */
453*5113495bSYour Name 		headroom = qdf_nbuf_data(rx_netbuf) - (uint8_t *)rx_desc;
454*5113495bSYour Name 		qdf_nbuf_push_head(rx_netbuf, headroom);
455*5113495bSYour Name 
456*5113495bSYour Name #ifdef DEBUG_DMA_DONE
457*5113495bSYour Name 		status = qdf_nbuf_map(pdev->osdev, rx_netbuf,
458*5113495bSYour Name 				      QDF_DMA_BIDIRECTIONAL);
459*5113495bSYour Name #else
460*5113495bSYour Name 		status = qdf_nbuf_map(pdev->osdev, rx_netbuf,
461*5113495bSYour Name 				      QDF_DMA_FROM_DEVICE);
462*5113495bSYour Name #endif
463*5113495bSYour Name 		if (status != QDF_STATUS_SUCCESS) {
464*5113495bSYour Name 			htt_rx_ring_buff_free(pdev, rx_netbuf);
465*5113495bSYour Name 			goto update_alloc_idx;
466*5113495bSYour Name 		}
467*5113495bSYour Name 
468*5113495bSYour Name 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
469*5113495bSYour Name 		paddr_marked = htt_rx_paddr_mark_high_bits(paddr);
470*5113495bSYour Name 		if (pdev->cfg.is_full_reorder_offload) {
471*5113495bSYour Name 			if (qdf_unlikely(htt_rx_hash_list_insert(
472*5113495bSYour Name 					pdev, paddr_marked, rx_netbuf))) {
473*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_HTT,
474*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
475*5113495bSYour Name 					  "%s: hash insert failed!", __func__);
476*5113495bSYour Name #ifdef DEBUG_DMA_DONE
477*5113495bSYour Name 				qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
478*5113495bSYour Name 					       QDF_DMA_BIDIRECTIONAL);
479*5113495bSYour Name #else
480*5113495bSYour Name 				qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
481*5113495bSYour Name 					       QDF_DMA_FROM_DEVICE);
482*5113495bSYour Name #endif
483*5113495bSYour Name 				htt_rx_ring_buff_free(pdev, rx_netbuf);
484*5113495bSYour Name 
485*5113495bSYour Name 				goto update_alloc_idx;
486*5113495bSYour Name 			}
487*5113495bSYour Name 			htt_rx_dbg_rxbuf_set(pdev, paddr_marked, rx_netbuf);
488*5113495bSYour Name 		} else {
489*5113495bSYour Name 			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
490*5113495bSYour Name 		}
491*5113495bSYour Name 
492*5113495bSYour Name 		/* Caller already protected this function with refill_lock */
493*5113495bSYour Name 		if (qdf_nbuf_is_rx_ipa_smmu_map(rx_netbuf)) {
494*5113495bSYour Name 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
495*5113495bSYour Name 						 paddr, HTT_RX_BUF_SIZE);
496*5113495bSYour Name 			qdf_assert_always(
497*5113495bSYour Name 				!cds_smmu_map_unmap(true, 1, &mem_map_table));
498*5113495bSYour Name 		}
499*5113495bSYour Name 
500*5113495bSYour Name 		pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked;
501*5113495bSYour Name 		qdf_atomic_inc(&pdev->rx_ring.fill_cnt);
502*5113495bSYour Name 
503*5113495bSYour Name 		num--;
504*5113495bSYour Name 		idx++;
505*5113495bSYour Name 		filled++;
506*5113495bSYour Name 		idx &= pdev->rx_ring.size_mask;
507*5113495bSYour Name 	}
508*5113495bSYour Name 
509*5113495bSYour Name 	if (debt_served <  qdf_atomic_read(&pdev->rx_ring.refill_debt)) {
510*5113495bSYour Name 		num = qdf_atomic_read(&pdev->rx_ring.refill_debt) - debt_served;
511*5113495bSYour Name 		debt_served += num;
512*5113495bSYour Name 		goto moretofill;
513*5113495bSYour Name 	}
514*5113495bSYour Name 
515*5113495bSYour Name update_alloc_idx:
516*5113495bSYour Name 	/*
517*5113495bSYour Name 	 * Make sure alloc index write is reflected correctly before FW polls
518*5113495bSYour Name 	 * remote ring write index as compiler can reorder the instructions
519*5113495bSYour Name 	 * based on optimizations.
520*5113495bSYour Name 	 */
521*5113495bSYour Name 	qdf_mb();
522*5113495bSYour Name 	*pdev->rx_ring.alloc_idx.vaddr = idx;
523*5113495bSYour Name 	htt_rx_dbg_rxbuf_indupd(pdev, idx);
524*5113495bSYour Name 
525*5113495bSYour Name 	return filled;
526*5113495bSYour Name }
527*5113495bSYour Name 
htt_rx_ring_size(struct htt_pdev_t * pdev)528*5113495bSYour Name static int htt_rx_ring_size(struct htt_pdev_t *pdev)
529*5113495bSYour Name {
530*5113495bSYour Name 	int size;
531*5113495bSYour Name 	QDF_STATUS status;
532*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
533*5113495bSYour Name 	bool enable_2x2 = true;
534*5113495bSYour Name 
535*5113495bSYour Name 	/*
536*5113495bSYour Name 	 * It is expected that the host CPU will typically be able to service
537*5113495bSYour Name 	 * the rx indication from one A-MPDU before the rx indication from
538*5113495bSYour Name 	 * the subsequent A-MPDU happens, roughly 1-2 ms later.
539*5113495bSYour Name 	 * However, the rx ring should be sized very conservatively, to
540*5113495bSYour Name 	 * accommodate the worst reasonable delay before the host CPU services
541*5113495bSYour Name 	 * a rx indication interrupt.
542*5113495bSYour Name 	 * The rx ring need not be kept full of empty buffers.  In theory,
543*5113495bSYour Name 	 * the htt host SW can dynamically track the low-water mark in the
544*5113495bSYour Name 	 * rx ring, and dynamically adjust the level to which the rx ring
545*5113495bSYour Name 	 * is filled with empty buffers, to dynamically meet the desired
546*5113495bSYour Name 	 * low-water mark.
547*5113495bSYour Name 	 * In contrast, it's difficult to resize the rx ring itself, once
548*5113495bSYour Name 	 * it's in use.
549*5113495bSYour Name 	 * Thus, the ring itself should be sized very conservatively, while
550*5113495bSYour Name 	 * the degree to which the ring is filled with empty buffers should
551*5113495bSYour Name 	 * be sized moderately conservatively.
552*5113495bSYour Name 	 */
553*5113495bSYour Name 	size =
554*5113495bSYour Name 		ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
555*5113495bSYour Name 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
556*5113495bSYour Name 		(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
557*5113495bSYour Name 
558*5113495bSYour Name 	if (size < HTT_RX_RING_SIZE_MIN)
559*5113495bSYour Name 		size = HTT_RX_RING_SIZE_MIN;
560*5113495bSYour Name 	else if (size > HTT_RX_RING_SIZE_MAX)
561*5113495bSYour Name 		size = HTT_RX_RING_SIZE_MAX;
562*5113495bSYour Name 
563*5113495bSYour Name 	size = qdf_get_pwr2(size);
564*5113495bSYour Name 
565*5113495bSYour Name 	if (!soc) {
566*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
567*5113495bSYour Name 		  "Unable to get 2x2 cap soc is NULL ring size:%u selected ", size);
568*5113495bSYour Name 		return size;
569*5113495bSYour Name 	}
570*5113495bSYour Name 
571*5113495bSYour Name 	status = wlan_mlme_get_vht_enable2x2((void *)soc->psoc, &enable_2x2);
572*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status))
573*5113495bSYour Name 		size = (enable_2x2) ? size : QDF_MIN(size, HTT_RX_RING_SIZE_1x1);
574*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
575*5113495bSYour Name 		  "HTT RX refill ring size:%u selected for %s mode", size, enable_2x2 ? "2x2" : "1x1");
576*5113495bSYour Name 
577*5113495bSYour Name 	return size;
578*5113495bSYour Name }
579*5113495bSYour Name 
htt_rx_ring_fill_level(struct htt_pdev_t * pdev)580*5113495bSYour Name static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
581*5113495bSYour Name {
582*5113495bSYour Name 	int size;
583*5113495bSYour Name 
584*5113495bSYour Name 	size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
585*5113495bSYour Name 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
586*5113495bSYour Name 		(8 * HTT_RX_AVG_FRM_BYTES) *
587*5113495bSYour Name 		HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
588*5113495bSYour Name 
589*5113495bSYour Name 	size = qdf_get_pwr2(size);
590*5113495bSYour Name 	/*
591*5113495bSYour Name 	 * Make sure the fill level is at least 1 less than the ring size.
592*5113495bSYour Name 	 * Leaving 1 element empty allows the SW to easily distinguish
593*5113495bSYour Name 	 * between a full ring vs. an empty ring.
594*5113495bSYour Name 	 */
595*5113495bSYour Name 	if (size >= pdev->rx_ring.size)
596*5113495bSYour Name 		size = pdev->rx_ring.size - 1;
597*5113495bSYour Name 
598*5113495bSYour Name 	return size;
599*5113495bSYour Name }
600*5113495bSYour Name 
htt_rx_ring_refill_retry(void * arg)601*5113495bSYour Name static void htt_rx_ring_refill_retry(void *arg)
602*5113495bSYour Name {
603*5113495bSYour Name 	htt_pdev_handle pdev = (htt_pdev_handle)arg;
604*5113495bSYour Name 	int filled = 0;
605*5113495bSYour Name 	int num;
606*5113495bSYour Name 
607*5113495bSYour Name 	pdev->refill_retry_timer_calls++;
608*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
609*5113495bSYour Name 
610*5113495bSYour Name 	num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
611*5113495bSYour Name 	qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
612*5113495bSYour Name 
613*5113495bSYour Name 	qdf_atomic_set(&pdev->rx_buff_pool.refill_low_mem, 1);
614*5113495bSYour Name 
615*5113495bSYour Name 	filled = htt_rx_ring_fill_n(pdev, num);
616*5113495bSYour Name 
617*5113495bSYour Name 	if (filled > num) {
618*5113495bSYour Name 		/* we served ourselves and some other debt */
619*5113495bSYour Name 		/* sub is safer than  = 0 */
620*5113495bSYour Name 		qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
621*5113495bSYour Name 	} else if (num == filled) { /* nothing to be done */
622*5113495bSYour Name 	} else {
623*5113495bSYour Name 		qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
624*5113495bSYour Name 		/* we could not fill all, timer must have been started */
625*5113495bSYour Name 		pdev->refill_retry_timer_doubles++;
626*5113495bSYour Name 	}
627*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
628*5113495bSYour Name }
629*5113495bSYour Name 
630*5113495bSYour Name /*--- rx descriptor field access functions ----------------------------------*/
631*5113495bSYour Name /*
632*5113495bSYour Name  * These functions need to use bit masks and shifts to extract fields
633*5113495bSYour Name  * from the rx descriptors, rather than directly using the bitfields.
634*5113495bSYour Name  * For example, use
635*5113495bSYour Name  *     (desc & FIELD_MASK) >> FIELD_LSB
636*5113495bSYour Name  * rather than
637*5113495bSYour Name  *     desc.field
638*5113495bSYour Name  * This allows the functions to work correctly on either little-endian
639*5113495bSYour Name  * machines (no endianness conversion needed) or big-endian machines
640*5113495bSYour Name  * (endianness conversion provided automatically by the HW DMA's
641*5113495bSYour Name  * byte-swizzling).
642*5113495bSYour Name  */
643*5113495bSYour Name 
644*5113495bSYour Name #ifdef CHECKSUM_OFFLOAD
645*5113495bSYour Name static inline void
htt_set_checksum_result_ll(htt_pdev_handle pdev,qdf_nbuf_t msdu,struct htt_host_rx_desc_base * rx_desc)646*5113495bSYour Name htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
647*5113495bSYour Name 			   struct htt_host_rx_desc_base *rx_desc)
648*5113495bSYour Name {
649*5113495bSYour Name #define MAX_IP_VER          2
650*5113495bSYour Name #define MAX_PROTO_VAL       4
651*5113495bSYour Name 	struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
652*5113495bSYour Name 	unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
653*5113495bSYour Name 
654*5113495bSYour Name 	/*
655*5113495bSYour Name 	 * HW supports TCP & UDP checksum offload for ipv4 and ipv6
656*5113495bSYour Name 	 */
657*5113495bSYour Name 	static const qdf_nbuf_l4_rx_cksum_type_t
658*5113495bSYour Name 		cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
659*5113495bSYour Name 		{
660*5113495bSYour Name 			/* non-fragmented IP packet */
661*5113495bSYour Name 			/* non TCP/UDP packet */
662*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
663*5113495bSYour Name 			/* TCP packet */
664*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_TCP, QDF_NBUF_RX_CKSUM_TCPIPV6},
665*5113495bSYour Name 			/* UDP packet */
666*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_UDP, QDF_NBUF_RX_CKSUM_UDPIPV6},
667*5113495bSYour Name 			/* invalid packet type */
668*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
669*5113495bSYour Name 		},
670*5113495bSYour Name 		{
671*5113495bSYour Name 			/* fragmented IP packet */
672*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
673*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
674*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
675*5113495bSYour Name 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
676*5113495bSYour Name 		}
677*5113495bSYour Name 	};
678*5113495bSYour Name 
679*5113495bSYour Name 	qdf_nbuf_rx_cksum_t cksum = {
680*5113495bSYour Name 		cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
681*5113495bSYour Name 		QDF_NBUF_RX_CKSUM_NONE,
682*5113495bSYour Name 		0
683*5113495bSYour Name 	};
684*5113495bSYour Name 
685*5113495bSYour Name 	if (cksum.l4_type !=
686*5113495bSYour Name 	    (qdf_nbuf_l4_rx_cksum_type_t)QDF_NBUF_RX_CKSUM_NONE) {
687*5113495bSYour Name 		cksum.l4_result =
688*5113495bSYour Name 			((*(uint32_t *)&rx_desc->attention) &
689*5113495bSYour Name 			 RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
690*5113495bSYour Name 			QDF_NBUF_RX_CKSUM_NONE :
691*5113495bSYour Name 			QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
692*5113495bSYour Name 	}
693*5113495bSYour Name 	qdf_nbuf_set_rx_cksum(msdu, &cksum);
694*5113495bSYour Name #undef MAX_IP_VER
695*5113495bSYour Name #undef MAX_PROTO_VAL
696*5113495bSYour Name }
697*5113495bSYour Name 
698*5113495bSYour Name #else
699*5113495bSYour Name 
700*5113495bSYour Name static inline
htt_set_checksum_result_ll(htt_pdev_handle pdev,qdf_nbuf_t msdu,struct htt_host_rx_desc_base * rx_desc)701*5113495bSYour Name void htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
702*5113495bSYour Name 				struct htt_host_rx_desc_base *rx_desc)
703*5113495bSYour Name {
704*5113495bSYour Name }
705*5113495bSYour Name 
706*5113495bSYour Name #endif
707*5113495bSYour Name 
htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev,qdf_nbuf_t msdu)708*5113495bSYour Name static void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
709*5113495bSYour Name {
710*5113495bSYour Name 	return htt_rx_desc(msdu);
711*5113495bSYour Name }
712*5113495bSYour Name 
htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev,void * mpdu_desc)713*5113495bSYour Name static bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
714*5113495bSYour Name {
715*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
716*5113495bSYour Name 		(struct htt_host_rx_desc_base *)mpdu_desc;
717*5113495bSYour Name 
718*5113495bSYour Name 	return (((*((uint32_t *)&rx_desc->mpdu_start)) &
719*5113495bSYour Name 		 RX_MPDU_START_0_ENCRYPTED_MASK) >>
720*5113495bSYour Name 		RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
721*5113495bSYour Name }
722*5113495bSYour Name 
723*5113495bSYour Name static
htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev,void * mpdu_desc)724*5113495bSYour Name bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
725*5113495bSYour Name {
726*5113495bSYour Name 	return false;
727*5113495bSYour Name }
728*5113495bSYour Name 
htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,struct ol_txrx_peer_t * peer,void * mpdu_desc,uint16_t * primary_chan_center_freq_mhz,uint16_t * contig_chan1_center_freq_mhz,uint16_t * contig_chan2_center_freq_mhz,uint8_t * phy_mode)729*5113495bSYour Name static bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
730*5113495bSYour Name 				       struct ol_txrx_peer_t *peer,
731*5113495bSYour Name 				       void *mpdu_desc,
732*5113495bSYour Name 				       uint16_t *primary_chan_center_freq_mhz,
733*5113495bSYour Name 				       uint16_t *contig_chan1_center_freq_mhz,
734*5113495bSYour Name 				       uint16_t *contig_chan2_center_freq_mhz,
735*5113495bSYour Name 				       uint8_t *phy_mode)
736*5113495bSYour Name {
737*5113495bSYour Name 	if (primary_chan_center_freq_mhz)
738*5113495bSYour Name 		*primary_chan_center_freq_mhz = 0;
739*5113495bSYour Name 	if (contig_chan1_center_freq_mhz)
740*5113495bSYour Name 		*contig_chan1_center_freq_mhz = 0;
741*5113495bSYour Name 	if (contig_chan2_center_freq_mhz)
742*5113495bSYour Name 		*contig_chan2_center_freq_mhz = 0;
743*5113495bSYour Name 	if (phy_mode)
744*5113495bSYour Name 		*phy_mode = 0;
745*5113495bSYour Name 	return false;
746*5113495bSYour Name }
747*5113495bSYour Name 
748*5113495bSYour Name static bool
htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev,void * msdu_desc)749*5113495bSYour Name htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
750*5113495bSYour Name {
751*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
752*5113495bSYour Name 		(struct htt_host_rx_desc_base *)msdu_desc;
753*5113495bSYour Name 	return (bool)
754*5113495bSYour Name 		(((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
755*5113495bSYour Name 		  RX_MSDU_END_4_FIRST_MSDU_MASK) >>
756*5113495bSYour Name 		 RX_MSDU_END_4_FIRST_MSDU_LSB);
757*5113495bSYour Name }
758*5113495bSYour Name 
759*5113495bSYour Name static bool
htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev,void * mpdu_desc,uint8_t * key_id)760*5113495bSYour Name htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
761*5113495bSYour Name 			   uint8_t *key_id)
762*5113495bSYour Name {
763*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
764*5113495bSYour Name 						mpdu_desc;
765*5113495bSYour Name 
766*5113495bSYour Name 	if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
767*5113495bSYour Name 		return false;
768*5113495bSYour Name 
769*5113495bSYour Name 	*key_id = ((*(((uint32_t *)&rx_desc->msdu_end) + 1)) &
770*5113495bSYour Name 		   (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
771*5113495bSYour Name 		    RX_MSDU_END_1_KEY_ID_OCT_LSB));
772*5113495bSYour Name 
773*5113495bSYour Name 	return true;
774*5113495bSYour Name }
775*5113495bSYour Name 
776*5113495bSYour Name /**
777*5113495bSYour Name  * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
778*5113495bSYour Name  *                               for the Low Latency driver
779*5113495bSYour Name  * @pdev:                          Handle (pointer) to HTT pdev.
780*5113495bSYour Name  * @mpdu_desc:                     Void pointer to the Rx descriptor for MPDU
781*5113495bSYour Name  *                                 before the beginning of the payload.
782*5113495bSYour Name  *
783*5113495bSYour Name  *  This function returns the retry bit of the 802.11 header for the
784*5113495bSYour Name  *  provided rx MPDU descriptor.
785*5113495bSYour Name  *
786*5113495bSYour Name  * Return:        boolean -- true if retry is set, false otherwise
787*5113495bSYour Name  */
788*5113495bSYour Name static bool
htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev,void * mpdu_desc)789*5113495bSYour Name htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
790*5113495bSYour Name {
791*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
792*5113495bSYour Name 		(struct htt_host_rx_desc_base *)mpdu_desc;
793*5113495bSYour Name 
794*5113495bSYour Name 	return
795*5113495bSYour Name 		(bool)(((*((uint32_t *)&rx_desc->mpdu_start)) &
796*5113495bSYour Name 		RX_MPDU_START_0_RETRY_MASK) >>
797*5113495bSYour Name 		RX_MPDU_START_0_RETRY_LSB);
798*5113495bSYour Name }
799*5113495bSYour Name 
htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev,void * mpdu_desc,bool update_seq_num)800*5113495bSYour Name static uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev,
801*5113495bSYour Name 					    void *mpdu_desc,
802*5113495bSYour Name 					    bool update_seq_num)
803*5113495bSYour Name {
804*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
805*5113495bSYour Name 		(struct htt_host_rx_desc_base *)mpdu_desc;
806*5113495bSYour Name 
807*5113495bSYour Name 	return
808*5113495bSYour Name 		(uint16_t)(((*((uint32_t *)&rx_desc->mpdu_start)) &
809*5113495bSYour Name 			     RX_MPDU_START_0_SEQ_NUM_MASK) >>
810*5113495bSYour Name 			    RX_MPDU_START_0_SEQ_NUM_LSB);
811*5113495bSYour Name }
812*5113495bSYour Name 
813*5113495bSYour Name static void
htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,void * mpdu_desc,union htt_rx_pn_t * pn,int pn_len_bits)814*5113495bSYour Name htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
815*5113495bSYour Name 		       void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
816*5113495bSYour Name {
817*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
818*5113495bSYour Name 		(struct htt_host_rx_desc_base *)mpdu_desc;
819*5113495bSYour Name 
820*5113495bSYour Name 	switch (pn_len_bits) {
821*5113495bSYour Name 	case 24:
822*5113495bSYour Name 		/* bits 23:0 */
823*5113495bSYour Name 		pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
824*5113495bSYour Name 		break;
825*5113495bSYour Name 	case 48:
826*5113495bSYour Name 		/* bits 31:0 */
827*5113495bSYour Name 		pn->pn48 = rx_desc->mpdu_start.pn_31_0;
828*5113495bSYour Name 		/* bits 47:32 */
829*5113495bSYour Name 		pn->pn48 |= ((uint64_t)
830*5113495bSYour Name 			     ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
831*5113495bSYour Name 			      & RX_MPDU_START_2_PN_47_32_MASK))
832*5113495bSYour Name 			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
833*5113495bSYour Name 		break;
834*5113495bSYour Name 	case 128:
835*5113495bSYour Name 		/* bits 31:0 */
836*5113495bSYour Name 		pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
837*5113495bSYour Name 		/* bits 47:32 */
838*5113495bSYour Name 		pn->pn128[0] |=
839*5113495bSYour Name 			((uint64_t)((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
840*5113495bSYour Name 				     & RX_MPDU_START_2_PN_47_32_MASK))
841*5113495bSYour Name 			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
842*5113495bSYour Name 		/* bits 63:48 */
843*5113495bSYour Name 		pn->pn128[0] |=
844*5113495bSYour Name 			((uint64_t)((*(((uint32_t *)&rx_desc->msdu_end) + 2))
845*5113495bSYour Name 				     & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
846*5113495bSYour Name 			<< (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
847*5113495bSYour Name 		/* bits 95:64 */
848*5113495bSYour Name 		pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
849*5113495bSYour Name 		/* bits 127:96 */
850*5113495bSYour Name 		pn->pn128[1] |=
851*5113495bSYour Name 			((uint64_t)rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
852*5113495bSYour Name 		break;
853*5113495bSYour Name 	default:
854*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
855*5113495bSYour Name 			  "Error: invalid length spec (%d bits) for PN",
856*5113495bSYour Name 			  pn_len_bits);
857*5113495bSYour Name 	};
858*5113495bSYour Name }
859*5113495bSYour Name 
860*5113495bSYour Name /**
861*5113495bSYour Name  * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
862*5113495bSYour Name  *                             for Low Latency driver
863*5113495bSYour Name  * @pdev:                        Handle (pointer) to HTT pdev.
864*5113495bSYour Name  * @mpdu_desc:                   Void pointer to the Rx descriptor for the MPDU
865*5113495bSYour Name  *                               before the beginning of the payload.
866*5113495bSYour Name  *
867*5113495bSYour Name  * This function returns the TID set in the 802.11 QoS Control for the MPDU
868*5113495bSYour Name  * in the packet header, by looking at the mpdu_start of the Rx descriptor.
869*5113495bSYour Name  * Rx descriptor gets a copy of the TID from the MAC.
870*5113495bSYour Name  *
871*5113495bSYour Name  * Return:        Actual TID set in the packet header.
872*5113495bSYour Name  */
873*5113495bSYour Name static uint8_t
htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev,void * mpdu_desc)874*5113495bSYour Name htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
875*5113495bSYour Name {
876*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
877*5113495bSYour Name 		(struct htt_host_rx_desc_base *)mpdu_desc;
878*5113495bSYour Name 
879*5113495bSYour Name 	return
880*5113495bSYour Name 		(uint8_t)(((*(((uint32_t *)&rx_desc->mpdu_start) + 2)) &
881*5113495bSYour Name 		RX_MPDU_START_2_TID_MASK) >>
882*5113495bSYour Name 		RX_MPDU_START_2_TID_LSB);
883*5113495bSYour Name }
884*5113495bSYour Name 
htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev,void * msdu_desc)885*5113495bSYour Name static bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev,
886*5113495bSYour Name 					       void *msdu_desc)
887*5113495bSYour Name {
888*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
889*5113495bSYour Name 		(struct htt_host_rx_desc_base *)msdu_desc;
890*5113495bSYour Name 	return (bool)
891*5113495bSYour Name 		(((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
892*5113495bSYour Name 		  RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
893*5113495bSYour Name }
894*5113495bSYour Name 
htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev,void * msdu_desc)895*5113495bSYour Name static int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev,
896*5113495bSYour Name 					      void *msdu_desc)
897*5113495bSYour Name {
898*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
899*5113495bSYour Name 		(struct htt_host_rx_desc_base *)msdu_desc;
900*5113495bSYour Name 	/*
901*5113495bSYour Name 	 * HW rx desc: the mcast_bcast flag is only valid
902*5113495bSYour Name 	 * if first_msdu is set
903*5113495bSYour Name 	 */
904*5113495bSYour Name 	return ((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
905*5113495bSYour Name 		RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
906*5113495bSYour Name }
907*5113495bSYour Name 
htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev,void * msdu_desc)908*5113495bSYour Name static bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
909*5113495bSYour Name {
910*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
911*5113495bSYour Name 		(struct htt_host_rx_desc_base *)msdu_desc;
912*5113495bSYour Name 	return ((*((uint32_t *)&rx_desc->attention)) &
913*5113495bSYour Name 		RX_ATTENTION_0_MCAST_BCAST_MASK)
914*5113495bSYour Name 		>> RX_ATTENTION_0_MCAST_BCAST_LSB;
915*5113495bSYour Name }
916*5113495bSYour Name 
htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev,void * msdu_desc)917*5113495bSYour Name static int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
918*5113495bSYour Name {
919*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc =
920*5113495bSYour Name 		(struct htt_host_rx_desc_base *)msdu_desc;
921*5113495bSYour Name 	return ((*((uint32_t *)&rx_desc->attention)) &
922*5113495bSYour Name 		 RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
923*5113495bSYour Name }
924*5113495bSYour Name 
925*5113495bSYour Name static inline int
htt_rx_offload_msdu_cnt_ll(htt_pdev_handle pdev)926*5113495bSYour Name htt_rx_offload_msdu_cnt_ll(htt_pdev_handle pdev)
927*5113495bSYour Name {
928*5113495bSYour Name 	return htt_rx_ring_elems(pdev);
929*5113495bSYour Name }
930*5113495bSYour Name 
931*5113495bSYour Name static int
htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t offload_deliver_msg,int * vdev_id,int * peer_id,int * tid,uint8_t * fw_desc,qdf_nbuf_t * head_buf,qdf_nbuf_t * tail_buf)932*5113495bSYour Name htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
933*5113495bSYour Name 			   qdf_nbuf_t offload_deliver_msg,
934*5113495bSYour Name 			   int *vdev_id,
935*5113495bSYour Name 			   int *peer_id,
936*5113495bSYour Name 			   int *tid,
937*5113495bSYour Name 			   uint8_t *fw_desc,
938*5113495bSYour Name 			   qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
939*5113495bSYour Name {
940*5113495bSYour Name 	qdf_nbuf_t buf;
941*5113495bSYour Name 	uint32_t *msdu_hdr, msdu_len;
942*5113495bSYour Name 
943*5113495bSYour Name 	*head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
944*5113495bSYour Name 
945*5113495bSYour Name 	if (qdf_unlikely(!buf)) {
946*5113495bSYour Name 		qdf_print("netbuf pop failed!");
947*5113495bSYour Name 		return 1;
948*5113495bSYour Name 	}
949*5113495bSYour Name 
950*5113495bSYour Name 	/* Fake read mpdu_desc to keep desc ptr in sync */
951*5113495bSYour Name 	htt_rx_mpdu_desc_list_next(pdev, NULL);
952*5113495bSYour Name 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
953*5113495bSYour Name #ifdef DEBUG_DMA_DONE
954*5113495bSYour Name 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
955*5113495bSYour Name #else
956*5113495bSYour Name 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
957*5113495bSYour Name #endif
958*5113495bSYour Name 	msdu_hdr = (uint32_t *)qdf_nbuf_data(buf);
959*5113495bSYour Name 
960*5113495bSYour Name 	/* First dword */
961*5113495bSYour Name 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
962*5113495bSYour Name 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
963*5113495bSYour Name 
964*5113495bSYour Name 	/* Second dword */
965*5113495bSYour Name 	msdu_hdr++;
966*5113495bSYour Name 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
967*5113495bSYour Name 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
968*5113495bSYour Name 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
969*5113495bSYour Name 
970*5113495bSYour Name 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
971*5113495bSYour Name 	qdf_nbuf_set_pktlen(buf, msdu_len);
972*5113495bSYour Name 	return 0;
973*5113495bSYour Name }
974*5113495bSYour Name 
975*5113495bSYour Name int
htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,uint32_t * msg_word,int msdu_iter,int * vdev_id,int * peer_id,int * tid,uint8_t * fw_desc,qdf_nbuf_t * head_buf,qdf_nbuf_t * tail_buf)976*5113495bSYour Name htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
977*5113495bSYour Name 				 uint32_t *msg_word,
978*5113495bSYour Name 				 int msdu_iter,
979*5113495bSYour Name 				 int *vdev_id,
980*5113495bSYour Name 				 int *peer_id,
981*5113495bSYour Name 				 int *tid,
982*5113495bSYour Name 				 uint8_t *fw_desc,
983*5113495bSYour Name 				 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
984*5113495bSYour Name {
985*5113495bSYour Name 	qdf_nbuf_t buf;
986*5113495bSYour Name 	uint32_t *msdu_hdr, msdu_len;
987*5113495bSYour Name 	uint32_t *curr_msdu;
988*5113495bSYour Name 	qdf_dma_addr_t paddr;
989*5113495bSYour Name 
990*5113495bSYour Name 	curr_msdu =
991*5113495bSYour Name 		msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
992*5113495bSYour Name 	paddr = htt_rx_in_ord_paddr_get(curr_msdu);
993*5113495bSYour Name 	*head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
994*5113495bSYour Name 
995*5113495bSYour Name 	if (qdf_unlikely(!buf)) {
996*5113495bSYour Name 		qdf_print("netbuf pop failed!");
997*5113495bSYour Name 		return 1;
998*5113495bSYour Name 	}
999*5113495bSYour Name 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
1000*5113495bSYour Name #ifdef DEBUG_DMA_DONE
1001*5113495bSYour Name 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
1002*5113495bSYour Name #else
1003*5113495bSYour Name 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
1004*5113495bSYour Name #endif
1005*5113495bSYour Name 
1006*5113495bSYour Name 	if (pdev->cfg.is_first_wakeup_packet)
1007*5113495bSYour Name 		htt_get_first_packet_after_wow_wakeup(
1008*5113495bSYour Name 			msg_word + NEXT_FIELD_OFFSET_IN32, buf);
1009*5113495bSYour Name 
1010*5113495bSYour Name 	msdu_hdr = (uint32_t *)qdf_nbuf_data(buf);
1011*5113495bSYour Name 
1012*5113495bSYour Name 	/* First dword */
1013*5113495bSYour Name 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1014*5113495bSYour Name 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1015*5113495bSYour Name 
1016*5113495bSYour Name 	/* Second dword */
1017*5113495bSYour Name 	msdu_hdr++;
1018*5113495bSYour Name 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1019*5113495bSYour Name 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1020*5113495bSYour Name 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1021*5113495bSYour Name 
1022*5113495bSYour Name 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1023*5113495bSYour Name 	qdf_nbuf_set_pktlen(buf, msdu_len);
1024*5113495bSYour Name 	return 0;
1025*5113495bSYour Name }
1026*5113495bSYour Name 
1027*5113495bSYour Name #ifdef WLAN_FULL_REORDER_OFFLOAD
1028*5113495bSYour Name 
1029*5113495bSYour Name /* Number of buckets in the hash table */
1030*5113495bSYour Name #define RX_NUM_HASH_BUCKETS 1024        /* This should always be a power of 2 */
1031*5113495bSYour Name #define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
1032*5113495bSYour Name 
1033*5113495bSYour Name /* Number of hash entries allocated per bucket */
1034*5113495bSYour Name #define RX_ENTRIES_SIZE 10
1035*5113495bSYour Name 
1036*5113495bSYour Name #define RX_HASH_FUNCTION(a) \
1037*5113495bSYour Name 	((((a) >> 14) ^ ((a) >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
1038*5113495bSYour Name 
1039*5113495bSYour Name #ifdef RX_HASH_DEBUG_LOG
1040*5113495bSYour Name #define RX_HASH_LOG(x) x
1041*5113495bSYour Name #else
1042*5113495bSYour Name #define RX_HASH_LOG(x)          /* no-op */
1043*5113495bSYour Name #endif
1044*5113495bSYour Name 
1045*5113495bSYour Name /* Return values: 1 - success, 0 - failure */
1046*5113495bSYour Name #define RX_DESC_DISCARD_IS_SET ((*((u_int8_t *)&rx_desc->fw_desc.u.val)) & \
1047*5113495bSYour Name 							FW_RX_DESC_DISCARD_M)
1048*5113495bSYour Name #define RX_DESC_MIC_ERR_IS_SET ((*((u_int8_t *)&rx_desc->fw_desc.u.val)) & \
1049*5113495bSYour Name 							FW_RX_DESC_ANY_ERR_M)
1050*5113495bSYour Name 
1051*5113495bSYour Name #define RX_RING_REFILL_DEBT_MAX 128
1052*5113495bSYour Name 
1053*5113495bSYour Name /* Initializes the circular linked list */
htt_list_init(struct htt_list_node * head)1054*5113495bSYour Name static inline void htt_list_init(struct htt_list_node *head)
1055*5113495bSYour Name {
1056*5113495bSYour Name 	head->prev = head;
1057*5113495bSYour Name 	head->next = head;
1058*5113495bSYour Name }
1059*5113495bSYour Name 
1060*5113495bSYour Name /* Adds entry to the end of the linked list */
htt_list_add_tail(struct htt_list_node * head,struct htt_list_node * node)1061*5113495bSYour Name static inline void htt_list_add_tail(struct htt_list_node *head,
1062*5113495bSYour Name 				     struct htt_list_node *node)
1063*5113495bSYour Name {
1064*5113495bSYour Name 	head->prev->next = node;
1065*5113495bSYour Name 	node->prev = head->prev;
1066*5113495bSYour Name 	node->next = head;
1067*5113495bSYour Name 	head->prev = node;
1068*5113495bSYour Name }
1069*5113495bSYour Name 
1070*5113495bSYour Name /* Removes the entry corresponding to the input node from the linked list */
htt_list_remove(struct htt_list_node * node)1071*5113495bSYour Name static inline void htt_list_remove(struct htt_list_node *node)
1072*5113495bSYour Name {
1073*5113495bSYour Name 	node->prev->next = node->next;
1074*5113495bSYour Name 	node->next->prev = node->prev;
1075*5113495bSYour Name }
1076*5113495bSYour Name 
1077*5113495bSYour Name /* Helper macro to iterate through the linked list */
1078*5113495bSYour Name #define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next;		\
1079*5113495bSYour Name 					   (iter) != (head);		\
1080*5113495bSYour Name 					   (iter) = (iter)->next)	\
1081*5113495bSYour Name 
1082*5113495bSYour Name #ifdef RX_HASH_DEBUG
1083*5113495bSYour Name /* Hash cookie related macros */
1084*5113495bSYour Name #define HTT_RX_HASH_COOKIE 0xDEED
1085*5113495bSYour Name 
1086*5113495bSYour Name #define HTT_RX_HASH_COOKIE_SET(hash_element) \
1087*5113495bSYour Name 	((hash_element)->cookie = HTT_RX_HASH_COOKIE)
1088*5113495bSYour Name 
1089*5113495bSYour Name #define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
1090*5113495bSYour Name 	HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
1091*5113495bSYour Name 
1092*5113495bSYour Name /* Hash count related macros */
1093*5113495bSYour Name #define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
1094*5113495bSYour Name 	((hash_bucket)->count++)
1095*5113495bSYour Name 
1096*5113495bSYour Name #define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
1097*5113495bSYour Name 	((hash_bucket)->count--)
1098*5113495bSYour Name 
1099*5113495bSYour Name #define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket)->count = 0)
1100*5113495bSYour Name 
1101*5113495bSYour Name #define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
1102*5113495bSYour Name 	RX_HASH_LOG(qdf_print(" count %d\n", (hash_bucket)->count))
1103*5113495bSYour Name #else                           /* RX_HASH_DEBUG */
1104*5113495bSYour Name /* Hash cookie related macros */
1105*5113495bSYour Name #define HTT_RX_HASH_COOKIE_SET(hash_element)    /* no-op */
1106*5113495bSYour Name #define HTT_RX_HASH_COOKIE_CHECK(hash_element)  /* no-op */
1107*5113495bSYour Name /* Hash count related macros */
1108*5113495bSYour Name #define HTT_RX_HASH_COUNT_INCR(hash_bucket)     /* no-op */
1109*5113495bSYour Name #define HTT_RX_HASH_COUNT_DECR(hash_bucket)     /* no-op */
1110*5113495bSYour Name #define HTT_RX_HASH_COUNT_PRINT(hash_bucket)    /* no-op */
1111*5113495bSYour Name #define HTT_RX_HASH_COUNT_RESET(hash_bucket)    /* no-op */
1112*5113495bSYour Name #endif /* RX_HASH_DEBUG */
1113*5113495bSYour Name 
1114*5113495bSYour Name /*
1115*5113495bSYour Name  * Inserts the given "physical address - network buffer" pair into the
1116*5113495bSYour Name  * hash table for the given pdev. This function will do the following:
1117*5113495bSYour Name  * 1. Determine which bucket to insert the pair into
1118*5113495bSYour Name  * 2. First try to allocate the hash entry for this pair from the pre-allocated
1119*5113495bSYour Name  *    entries list
1120*5113495bSYour Name  * 3. If there are no more entries in the pre-allocated entries list, allocate
1121*5113495bSYour Name  *    the hash entry from the hash memory pool
1122*5113495bSYour Name  * Note: this function is not thread-safe
1123*5113495bSYour Name  * Returns 0 - success, 1 - failure
1124*5113495bSYour Name  */
1125*5113495bSYour Name int
htt_rx_hash_list_insert(struct htt_pdev_t * pdev,qdf_dma_addr_t paddr,qdf_nbuf_t netbuf)1126*5113495bSYour Name htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
1127*5113495bSYour Name 			qdf_dma_addr_t paddr,
1128*5113495bSYour Name 			qdf_nbuf_t netbuf)
1129*5113495bSYour Name {
1130*5113495bSYour Name 	int i;
1131*5113495bSYour Name 	int rc = 0;
1132*5113495bSYour Name 	struct htt_rx_hash_entry *hash_element = NULL;
1133*5113495bSYour Name 
1134*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1135*5113495bSYour Name 
1136*5113495bSYour Name 	/* get rid of the marking bits if they are available */
1137*5113495bSYour Name 	paddr = htt_paddr_trim_to_37(paddr);
1138*5113495bSYour Name 
1139*5113495bSYour Name 	i = RX_HASH_FUNCTION(paddr);
1140*5113495bSYour Name 
1141*5113495bSYour Name 	/* Check if there are any entries in the pre-allocated free list */
1142*5113495bSYour Name 	if (pdev->rx_ring.hash_table[i]->freepool.next !=
1143*5113495bSYour Name 	    &pdev->rx_ring.hash_table[i]->freepool) {
1144*5113495bSYour Name 		hash_element =
1145*5113495bSYour Name 			(struct htt_rx_hash_entry *)(
1146*5113495bSYour Name 				(char *)
1147*5113495bSYour Name 				pdev->rx_ring.hash_table[i]->freepool.next -
1148*5113495bSYour Name 				pdev->rx_ring.listnode_offset);
1149*5113495bSYour Name 		if (qdf_unlikely(!hash_element)) {
1150*5113495bSYour Name 			HTT_ASSERT_ALWAYS(0);
1151*5113495bSYour Name 			rc = 1;
1152*5113495bSYour Name 			goto hli_end;
1153*5113495bSYour Name 		}
1154*5113495bSYour Name 
1155*5113495bSYour Name 		htt_list_remove(pdev->rx_ring.hash_table[i]->freepool.next);
1156*5113495bSYour Name 	} else {
1157*5113495bSYour Name 		hash_element = qdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
1158*5113495bSYour Name 		if (qdf_unlikely(!hash_element)) {
1159*5113495bSYour Name 			HTT_ASSERT_ALWAYS(0);
1160*5113495bSYour Name 			rc = 1;
1161*5113495bSYour Name 			goto hli_end;
1162*5113495bSYour Name 		}
1163*5113495bSYour Name 		hash_element->fromlist = 0;
1164*5113495bSYour Name 	}
1165*5113495bSYour Name 
1166*5113495bSYour Name 	hash_element->netbuf = netbuf;
1167*5113495bSYour Name 	hash_element->paddr = paddr;
1168*5113495bSYour Name 	HTT_RX_HASH_COOKIE_SET(hash_element);
1169*5113495bSYour Name 
1170*5113495bSYour Name 	htt_list_add_tail(&pdev->rx_ring.hash_table[i]->listhead,
1171*5113495bSYour Name 			  &hash_element->listnode);
1172*5113495bSYour Name 
1173*5113495bSYour Name 	RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x netbuf %pK bucket %d\n",
1174*5113495bSYour Name 			      paddr, netbuf, (int)i));
1175*5113495bSYour Name 
1176*5113495bSYour Name 	if (htt_rx_ring_smmu_mapped(pdev)) {
1177*5113495bSYour Name 		if (qdf_unlikely(qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) {
1178*5113495bSYour Name 			qdf_err("Already smmu mapped, nbuf: %pK",
1179*5113495bSYour Name 				netbuf);
1180*5113495bSYour Name 			qdf_assert_always(0);
1181*5113495bSYour Name 		}
1182*5113495bSYour Name 		qdf_nbuf_set_rx_ipa_smmu_map(netbuf, true);
1183*5113495bSYour Name 	}
1184*5113495bSYour Name 
1185*5113495bSYour Name 	HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
1186*5113495bSYour Name 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
1187*5113495bSYour Name 
1188*5113495bSYour Name hli_end:
1189*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1190*5113495bSYour Name 	return rc;
1191*5113495bSYour Name }
1192*5113495bSYour Name 
1193*5113495bSYour Name /*
1194*5113495bSYour Name  * Given a physical address this function will find the corresponding network
1195*5113495bSYour Name  *  buffer from the hash table.
1196*5113495bSYour Name  *  paddr is already stripped off of higher marking bits.
1197*5113495bSYour Name  */
htt_rx_hash_list_lookup(struct htt_pdev_t * pdev,qdf_dma_addr_t paddr)1198*5113495bSYour Name qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev,
1199*5113495bSYour Name 				   qdf_dma_addr_t     paddr)
1200*5113495bSYour Name {
1201*5113495bSYour Name 	uint32_t i;
1202*5113495bSYour Name 	struct htt_list_node *list_iter = NULL;
1203*5113495bSYour Name 	qdf_nbuf_t netbuf = NULL;
1204*5113495bSYour Name 	struct htt_rx_hash_entry *hash_entry;
1205*5113495bSYour Name 
1206*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1207*5113495bSYour Name 
1208*5113495bSYour Name 	if (!pdev->rx_ring.hash_table) {
1209*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1210*5113495bSYour Name 		return NULL;
1211*5113495bSYour Name 	}
1212*5113495bSYour Name 
1213*5113495bSYour Name 	i = RX_HASH_FUNCTION(paddr);
1214*5113495bSYour Name 
1215*5113495bSYour Name 	HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i]->listhead) {
1216*5113495bSYour Name 		hash_entry = (struct htt_rx_hash_entry *)
1217*5113495bSYour Name 			     ((char *)list_iter -
1218*5113495bSYour Name 			      pdev->rx_ring.listnode_offset);
1219*5113495bSYour Name 
1220*5113495bSYour Name 		HTT_RX_HASH_COOKIE_CHECK(hash_entry);
1221*5113495bSYour Name 
1222*5113495bSYour Name 		if (hash_entry->paddr == paddr) {
1223*5113495bSYour Name 			/* Found the entry corresponding to paddr */
1224*5113495bSYour Name 			netbuf = hash_entry->netbuf;
1225*5113495bSYour Name 			/* set netbuf to NULL to trace if freed entry
1226*5113495bSYour Name 			 * is getting unmapped in hash deinit.
1227*5113495bSYour Name 			 */
1228*5113495bSYour Name 			hash_entry->netbuf = NULL;
1229*5113495bSYour Name 			htt_list_remove(&hash_entry->listnode);
1230*5113495bSYour Name 			HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
1231*5113495bSYour Name 			/*
1232*5113495bSYour Name 			 * if the rx entry is from the pre-allocated list,
1233*5113495bSYour Name 			 * return it
1234*5113495bSYour Name 			 */
1235*5113495bSYour Name 			if (hash_entry->fromlist)
1236*5113495bSYour Name 				htt_list_add_tail(
1237*5113495bSYour Name 					&pdev->rx_ring.hash_table[i]->freepool,
1238*5113495bSYour Name 					&hash_entry->listnode);
1239*5113495bSYour Name 			else
1240*5113495bSYour Name 				qdf_mem_free(hash_entry);
1241*5113495bSYour Name 
1242*5113495bSYour Name 			htt_rx_dbg_rxbuf_reset(pdev, netbuf);
1243*5113495bSYour Name 			break;
1244*5113495bSYour Name 		}
1245*5113495bSYour Name 	}
1246*5113495bSYour Name 
1247*5113495bSYour Name 	if (netbuf && htt_rx_ring_smmu_mapped(pdev)) {
1248*5113495bSYour Name 		if (qdf_unlikely(!qdf_nbuf_is_rx_ipa_smmu_map(netbuf))) {
1249*5113495bSYour Name 			qdf_err("smmu not mapped nbuf: %pK", netbuf);
1250*5113495bSYour Name 			qdf_assert_always(0);
1251*5113495bSYour Name 		}
1252*5113495bSYour Name 	}
1253*5113495bSYour Name 
1254*5113495bSYour Name 	RX_HASH_LOG(qdf_print("rx hash: paddr 0x%llx, netbuf %pK, bucket %d\n",
1255*5113495bSYour Name 			      (unsigned long long)paddr, netbuf, (int)i));
1256*5113495bSYour Name 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
1257*5113495bSYour Name 
1258*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1259*5113495bSYour Name 
1260*5113495bSYour Name 	if (!netbuf) {
1261*5113495bSYour Name 		qdf_print("rx hash: no entry found for %llx!\n",
1262*5113495bSYour Name 			  (unsigned long long)paddr);
1263*5113495bSYour Name 		cds_trigger_recovery(QDF_RX_HASH_NO_ENTRY_FOUND);
1264*5113495bSYour Name 	}
1265*5113495bSYour Name 
1266*5113495bSYour Name 	return netbuf;
1267*5113495bSYour Name }
1268*5113495bSYour Name 
1269*5113495bSYour Name /*
1270*5113495bSYour Name  * Initialization function of the rx buffer hash table. This function will
1271*5113495bSYour Name  * allocate a hash table of a certain pre-determined size and initialize all
1272*5113495bSYour Name  * the elements
1273*5113495bSYour Name  */
htt_rx_hash_init(struct htt_pdev_t * pdev)1274*5113495bSYour Name static int htt_rx_hash_init(struct htt_pdev_t *pdev)
1275*5113495bSYour Name {
1276*5113495bSYour Name 	int i, j;
1277*5113495bSYour Name 	int rc = 0;
1278*5113495bSYour Name 	void *allocation;
1279*5113495bSYour Name 
1280*5113495bSYour Name 	HTT_ASSERT2(QDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
1281*5113495bSYour Name 
1282*5113495bSYour Name 	/* hash table is array of bucket pointers */
1283*5113495bSYour Name 	pdev->rx_ring.hash_table =
1284*5113495bSYour Name 		qdf_mem_malloc(RX_NUM_HASH_BUCKETS *
1285*5113495bSYour Name 			       sizeof(struct htt_rx_hash_bucket *));
1286*5113495bSYour Name 
1287*5113495bSYour Name 	if (!pdev->rx_ring.hash_table)
1288*5113495bSYour Name 		return 1;
1289*5113495bSYour Name 
1290*5113495bSYour Name 	qdf_spinlock_create(&pdev->rx_ring.rx_hash_lock);
1291*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1292*5113495bSYour Name 
1293*5113495bSYour Name 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
1294*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1295*5113495bSYour Name 		/* pre-allocate bucket and pool of entries for this bucket */
1296*5113495bSYour Name 		allocation = qdf_mem_malloc((sizeof(struct htt_rx_hash_bucket) +
1297*5113495bSYour Name 			(RX_ENTRIES_SIZE * sizeof(struct htt_rx_hash_entry))));
1298*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1299*5113495bSYour Name 		pdev->rx_ring.hash_table[i] = allocation;
1300*5113495bSYour Name 
1301*5113495bSYour Name 		HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
1302*5113495bSYour Name 
1303*5113495bSYour Name 		/* initialize the hash table buckets */
1304*5113495bSYour Name 		htt_list_init(&pdev->rx_ring.hash_table[i]->listhead);
1305*5113495bSYour Name 
1306*5113495bSYour Name 		/* initialize the hash table free pool per bucket */
1307*5113495bSYour Name 		htt_list_init(&pdev->rx_ring.hash_table[i]->freepool);
1308*5113495bSYour Name 
1309*5113495bSYour Name 		/* pre-allocate a pool of entries for this bucket */
1310*5113495bSYour Name 		pdev->rx_ring.hash_table[i]->entries =
1311*5113495bSYour Name 			(struct htt_rx_hash_entry *)
1312*5113495bSYour Name 			((uint8_t *)pdev->rx_ring.hash_table[i] +
1313*5113495bSYour Name 			sizeof(struct htt_rx_hash_bucket));
1314*5113495bSYour Name 
1315*5113495bSYour Name 		if (!pdev->rx_ring.hash_table[i]->entries) {
1316*5113495bSYour Name 			qdf_print("rx hash bucket %d entries alloc failed\n",
1317*5113495bSYour Name 				  (int)i);
1318*5113495bSYour Name 			while (i) {
1319*5113495bSYour Name 				i--;
1320*5113495bSYour Name 				qdf_mem_free(pdev->rx_ring.hash_table[i]);
1321*5113495bSYour Name 			}
1322*5113495bSYour Name 			qdf_mem_free(pdev->rx_ring.hash_table);
1323*5113495bSYour Name 			pdev->rx_ring.hash_table = NULL;
1324*5113495bSYour Name 			rc = 1;
1325*5113495bSYour Name 			goto hi_end;
1326*5113495bSYour Name 		}
1327*5113495bSYour Name 
1328*5113495bSYour Name 		/* initialize the free list with pre-allocated entries */
1329*5113495bSYour Name 		for (j = 0; j < RX_ENTRIES_SIZE; j++) {
1330*5113495bSYour Name 			pdev->rx_ring.hash_table[i]->entries[j].fromlist = 1;
1331*5113495bSYour Name 			htt_list_add_tail(
1332*5113495bSYour Name 				&pdev->rx_ring.hash_table[i]->freepool,
1333*5113495bSYour Name 				&pdev->rx_ring.hash_table[i]->entries[j].
1334*5113495bSYour Name 				listnode);
1335*5113495bSYour Name 		}
1336*5113495bSYour Name 	}
1337*5113495bSYour Name 
1338*5113495bSYour Name 	pdev->rx_ring.listnode_offset =
1339*5113495bSYour Name 		qdf_offsetof(struct htt_rx_hash_entry, listnode);
1340*5113495bSYour Name hi_end:
1341*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1342*5113495bSYour Name 
1343*5113495bSYour Name 	return rc;
1344*5113495bSYour Name }
1345*5113495bSYour Name 
1346*5113495bSYour Name /* De -initialization function of the rx buffer hash table. This function will
1347*5113495bSYour Name  *   free up the hash table which includes freeing all the pending rx buffers
1348*5113495bSYour Name  */
htt_rx_hash_deinit(struct htt_pdev_t * pdev)1349*5113495bSYour Name static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
1350*5113495bSYour Name {
1351*5113495bSYour Name 	uint32_t i;
1352*5113495bSYour Name 	struct htt_rx_hash_entry *hash_entry;
1353*5113495bSYour Name 	struct htt_rx_hash_bucket **hash_table;
1354*5113495bSYour Name 	struct htt_list_node *list_iter = NULL;
1355*5113495bSYour Name 	qdf_mem_info_t mem_map_table = {0};
1356*5113495bSYour Name 	bool ipa_smmu = false;
1357*5113495bSYour Name 
1358*5113495bSYour Name 	if (!pdev->rx_ring.hash_table)
1359*5113495bSYour Name 		return;
1360*5113495bSYour Name 
1361*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
1362*5113495bSYour Name 	ipa_smmu = htt_rx_ring_smmu_mapped(pdev);
1363*5113495bSYour Name 	hash_table = pdev->rx_ring.hash_table;
1364*5113495bSYour Name 	pdev->rx_ring.hash_table = NULL;
1365*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
1366*5113495bSYour Name 
1367*5113495bSYour Name 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
1368*5113495bSYour Name 		/* Free the hash entries in hash bucket i */
1369*5113495bSYour Name 		list_iter = hash_table[i]->listhead.next;
1370*5113495bSYour Name 		while (list_iter != &hash_table[i]->listhead) {
1371*5113495bSYour Name 			hash_entry =
1372*5113495bSYour Name 				(struct htt_rx_hash_entry *)((char *)list_iter -
1373*5113495bSYour Name 							     pdev->rx_ring.
1374*5113495bSYour Name 							     listnode_offset);
1375*5113495bSYour Name 			if (hash_entry->netbuf) {
1376*5113495bSYour Name 				if (ipa_smmu) {
1377*5113495bSYour Name 					if (qdf_unlikely(
1378*5113495bSYour Name 						!qdf_nbuf_is_rx_ipa_smmu_map(
1379*5113495bSYour Name 							hash_entry->netbuf))) {
1380*5113495bSYour Name 						qdf_err("nbuf: %pK NOT mapped",
1381*5113495bSYour Name 							hash_entry->netbuf);
1382*5113495bSYour Name 						qdf_assert_always(0);
1383*5113495bSYour Name 					}
1384*5113495bSYour Name 					qdf_nbuf_set_rx_ipa_smmu_map(
1385*5113495bSYour Name 							hash_entry->netbuf,
1386*5113495bSYour Name 							false);
1387*5113495bSYour Name 					qdf_update_mem_map_table(pdev->osdev,
1388*5113495bSYour Name 						&mem_map_table,
1389*5113495bSYour Name 						QDF_NBUF_CB_PADDR(
1390*5113495bSYour Name 							hash_entry->netbuf),
1391*5113495bSYour Name 						HTT_RX_BUF_SIZE);
1392*5113495bSYour Name 
1393*5113495bSYour Name 					qdf_assert_always(
1394*5113495bSYour Name 						!cds_smmu_map_unmap(
1395*5113495bSYour Name 							false, 1,
1396*5113495bSYour Name 							&mem_map_table));
1397*5113495bSYour Name 				}
1398*5113495bSYour Name #ifdef DEBUG_DMA_DONE
1399*5113495bSYour Name 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
1400*5113495bSYour Name 					       QDF_DMA_BIDIRECTIONAL);
1401*5113495bSYour Name #else
1402*5113495bSYour Name 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
1403*5113495bSYour Name 					       QDF_DMA_FROM_DEVICE);
1404*5113495bSYour Name #endif
1405*5113495bSYour Name 				qdf_nbuf_free(hash_entry->netbuf);
1406*5113495bSYour Name 				hash_entry->paddr = 0;
1407*5113495bSYour Name 			}
1408*5113495bSYour Name 			list_iter = list_iter->next;
1409*5113495bSYour Name 
1410*5113495bSYour Name 			if (!hash_entry->fromlist)
1411*5113495bSYour Name 				qdf_mem_free(hash_entry);
1412*5113495bSYour Name 		}
1413*5113495bSYour Name 
1414*5113495bSYour Name 		qdf_mem_free(hash_table[i]);
1415*5113495bSYour Name 	}
1416*5113495bSYour Name 	qdf_mem_free(hash_table);
1417*5113495bSYour Name 
1418*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->rx_ring.rx_hash_lock);
1419*5113495bSYour Name }
1420*5113495bSYour Name 
htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev,uint32_t num)1421*5113495bSYour Name int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
1422*5113495bSYour Name {
1423*5113495bSYour Name 	int filled = 0;
1424*5113495bSYour Name 
1425*5113495bSYour Name 	if (!qdf_spin_trylock_bh(&pdev->rx_ring.refill_lock)) {
1426*5113495bSYour Name 		if (qdf_atomic_read(&pdev->rx_ring.refill_debt)
1427*5113495bSYour Name 			 < RX_RING_REFILL_DEBT_MAX) {
1428*5113495bSYour Name 			qdf_atomic_add(num, &pdev->rx_ring.refill_debt);
1429*5113495bSYour Name 			pdev->rx_buff_debt_invoked++;
1430*5113495bSYour Name 			return filled; /* 0 */
1431*5113495bSYour Name 		}
1432*5113495bSYour Name 		/*
1433*5113495bSYour Name 		 * else:
1434*5113495bSYour Name 		 * If we have quite a debt, then it is better for the lock
1435*5113495bSYour Name 		 * holder to finish its work and then acquire the lock and
1436*5113495bSYour Name 		 * fill our own part.
1437*5113495bSYour Name 		 */
1438*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
1439*5113495bSYour Name 	}
1440*5113495bSYour Name 	pdev->rx_buff_fill_n_invoked++;
1441*5113495bSYour Name 
1442*5113495bSYour Name 	filled = htt_rx_ring_fill_n(pdev, num);
1443*5113495bSYour Name 
1444*5113495bSYour Name 	if (filled > num) {
1445*5113495bSYour Name 		/* we served ourselves and some other debt */
1446*5113495bSYour Name 		/* sub is safer than  = 0 */
1447*5113495bSYour Name 		qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
1448*5113495bSYour Name 	} else {
1449*5113495bSYour Name 		qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
1450*5113495bSYour Name 	}
1451*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
1452*5113495bSYour Name 
1453*5113495bSYour Name 	return filled;
1454*5113495bSYour Name }
1455*5113495bSYour Name 
1456*5113495bSYour Name #if defined(WLAN_FEATURE_TSF_PLUS) && !defined(CONFIG_HL_SUPPORT)
1457*5113495bSYour Name /**
1458*5113495bSYour Name  * htt_rx_tail_msdu_timestamp() - update tail msdu tsf64 timestamp
1459*5113495bSYour Name  * @tail_rx_desc: pointer to tail msdu descriptor
1460*5113495bSYour Name  * @timestamp_rx_desc: pointer to timestamp msdu descriptor
1461*5113495bSYour Name  *
1462*5113495bSYour Name  * Return: none
1463*5113495bSYour Name  */
htt_rx_tail_msdu_timestamp(struct htt_host_rx_desc_base * tail_rx_desc,struct htt_host_rx_desc_base * timestamp_rx_desc)1464*5113495bSYour Name static inline void htt_rx_tail_msdu_timestamp(
1465*5113495bSYour Name 			struct htt_host_rx_desc_base *tail_rx_desc,
1466*5113495bSYour Name 			struct htt_host_rx_desc_base *timestamp_rx_desc)
1467*5113495bSYour Name {
1468*5113495bSYour Name 	if (tail_rx_desc) {
1469*5113495bSYour Name 		if (!timestamp_rx_desc) {
1470*5113495bSYour Name 			tail_rx_desc->ppdu_end.wb_timestamp_lower_32 = 0;
1471*5113495bSYour Name 			tail_rx_desc->ppdu_end.wb_timestamp_upper_32 = 0;
1472*5113495bSYour Name 		} else {
1473*5113495bSYour Name 			if (timestamp_rx_desc != tail_rx_desc) {
1474*5113495bSYour Name 				tail_rx_desc->ppdu_end.wb_timestamp_lower_32 =
1475*5113495bSYour Name 			timestamp_rx_desc->ppdu_end.wb_timestamp_lower_32;
1476*5113495bSYour Name 				tail_rx_desc->ppdu_end.wb_timestamp_upper_32 =
1477*5113495bSYour Name 			timestamp_rx_desc->ppdu_end.wb_timestamp_upper_32;
1478*5113495bSYour Name 			}
1479*5113495bSYour Name 		}
1480*5113495bSYour Name 	}
1481*5113495bSYour Name }
1482*5113495bSYour Name #else
htt_rx_tail_msdu_timestamp(struct htt_host_rx_desc_base * tail_rx_desc,struct htt_host_rx_desc_base * timestamp_rx_desc)1483*5113495bSYour Name static inline void htt_rx_tail_msdu_timestamp(
1484*5113495bSYour Name 			struct htt_host_rx_desc_base *tail_rx_desc,
1485*5113495bSYour Name 			struct htt_host_rx_desc_base *timestamp_rx_desc)
1486*5113495bSYour Name {
1487*5113495bSYour Name }
1488*5113495bSYour Name #endif
1489*5113495bSYour Name 
1490*5113495bSYour Name static int
htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * replenish_cnt)1491*5113495bSYour Name htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1492*5113495bSYour Name 				qdf_nbuf_t rx_ind_msg,
1493*5113495bSYour Name 				qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1494*5113495bSYour Name 				uint32_t *replenish_cnt)
1495*5113495bSYour Name {
1496*5113495bSYour Name 	qdf_nbuf_t msdu, next, prev = NULL;
1497*5113495bSYour Name 	uint8_t *rx_ind_data;
1498*5113495bSYour Name 	uint32_t *msg_word;
1499*5113495bSYour Name 	uint32_t rx_ctx_id;
1500*5113495bSYour Name 	unsigned int msdu_count = 0;
1501*5113495bSYour Name 	uint8_t offload_ind, frag_ind;
1502*5113495bSYour Name 	uint8_t peer_id;
1503*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc = NULL;
1504*5113495bSYour Name 	enum qdf_dp_tx_rx_status status = QDF_TX_RX_STATUS_OK;
1505*5113495bSYour Name 	qdf_dma_addr_t paddr;
1506*5113495bSYour Name 	qdf_mem_info_t mem_map_table = {0};
1507*5113495bSYour Name 	int ret = 1;
1508*5113495bSYour Name 	struct htt_host_rx_desc_base *timestamp_rx_desc = NULL;
1509*5113495bSYour Name 
1510*5113495bSYour Name 	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1511*5113495bSYour Name 
1512*5113495bSYour Name 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1513*5113495bSYour Name 	rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(rx_ind_msg);
1514*5113495bSYour Name 	msg_word = (uint32_t *)rx_ind_data;
1515*5113495bSYour Name 	peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1516*5113495bSYour Name 					*(u_int32_t *)rx_ind_data);
1517*5113495bSYour Name 
1518*5113495bSYour Name 	offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1519*5113495bSYour Name 	frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
1520*5113495bSYour Name 
1521*5113495bSYour Name 	/* Get the total number of MSDUs */
1522*5113495bSYour Name 	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
1523*5113495bSYour Name 	HTT_RX_CHECK_MSDU_COUNT(msdu_count);
1524*5113495bSYour Name 
1525*5113495bSYour Name 	ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
1526*5113495bSYour Name 	htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
1527*5113495bSYour Name 
1528*5113495bSYour Name 	msg_word =
1529*5113495bSYour Name 		(uint32_t *)(rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
1530*5113495bSYour Name 	if (offload_ind) {
1531*5113495bSYour Name 		ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
1532*5113495bSYour Name 							msg_word);
1533*5113495bSYour Name 		*head_msdu = *tail_msdu = NULL;
1534*5113495bSYour Name 		ret = 0;
1535*5113495bSYour Name 		goto end;
1536*5113495bSYour Name 	}
1537*5113495bSYour Name 
1538*5113495bSYour Name 	paddr = htt_rx_in_ord_paddr_get(msg_word);
1539*5113495bSYour Name 	(*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
1540*5113495bSYour Name 
1541*5113495bSYour Name 	if (qdf_unlikely(!msdu)) {
1542*5113495bSYour Name 		qdf_print("netbuf pop failed!");
1543*5113495bSYour Name 		*tail_msdu = NULL;
1544*5113495bSYour Name 		pdev->rx_ring.pop_fail_cnt++;
1545*5113495bSYour Name 		ret = 0;
1546*5113495bSYour Name 		goto end;
1547*5113495bSYour Name 	}
1548*5113495bSYour Name 
1549*5113495bSYour Name 	while (msdu_count > 0) {
1550*5113495bSYour Name 		if (qdf_nbuf_is_rx_ipa_smmu_map(msdu)) {
1551*5113495bSYour Name 			/*
1552*5113495bSYour Name 			 * nbuf was already detached from hash_entry,
1553*5113495bSYour Name 			 * there is no parallel IPA context to access
1554*5113495bSYour Name 			 * this nbuf for smmu map/unmap, so updating
1555*5113495bSYour Name 			 * this flag here without lock.
1556*5113495bSYour Name 			 *
1557*5113495bSYour Name 			 * This flag was not updated in netbuf_pop context
1558*5113495bSYour Name 			 * htt_rx_hash_list_lookup (where lock held), to
1559*5113495bSYour Name 			 * differentiate whether this nbuf to be
1560*5113495bSYour Name 			 * smmu unmapped or it was never mapped so far.
1561*5113495bSYour Name 			 */
1562*5113495bSYour Name 			qdf_nbuf_set_rx_ipa_smmu_map(msdu, false);
1563*5113495bSYour Name 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
1564*5113495bSYour Name 						 QDF_NBUF_CB_PADDR(msdu),
1565*5113495bSYour Name 						 HTT_RX_BUF_SIZE);
1566*5113495bSYour Name 			qdf_assert_always(
1567*5113495bSYour Name 				!cds_smmu_map_unmap(false, 1, &mem_map_table));
1568*5113495bSYour Name 		}
1569*5113495bSYour Name 
1570*5113495bSYour Name 		/*
1571*5113495bSYour Name 		 * Set the netbuf length to be the entire buffer length
1572*5113495bSYour Name 		 * initially, so the unmap will unmap the entire buffer.
1573*5113495bSYour Name 		 */
1574*5113495bSYour Name 		qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
1575*5113495bSYour Name #ifdef DEBUG_DMA_DONE
1576*5113495bSYour Name 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
1577*5113495bSYour Name #else
1578*5113495bSYour Name 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
1579*5113495bSYour Name #endif
1580*5113495bSYour Name 		msdu_count--;
1581*5113495bSYour Name 
1582*5113495bSYour Name 		if (pdev->rx_buff_pool.netbufs_ring &&
1583*5113495bSYour Name 		    QDF_NBUF_CB_RX_PACKET_BUFF_POOL(msdu) &&
1584*5113495bSYour Name 		    !htt_rx_buff_pool_refill(pdev, msdu)) {
1585*5113495bSYour Name 			if (!msdu_count) {
1586*5113495bSYour Name 				if (!prev) {
1587*5113495bSYour Name 					*head_msdu = *tail_msdu = NULL;
1588*5113495bSYour Name 					ret = 1;
1589*5113495bSYour Name 					goto end;
1590*5113495bSYour Name 				}
1591*5113495bSYour Name 				*tail_msdu = prev;
1592*5113495bSYour Name 				qdf_nbuf_set_next(prev, NULL);
1593*5113495bSYour Name 				goto end;
1594*5113495bSYour Name 			} else {
1595*5113495bSYour Name 				/* get the next msdu */
1596*5113495bSYour Name 				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1597*5113495bSYour Name 				paddr = htt_rx_in_ord_paddr_get(msg_word);
1598*5113495bSYour Name 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1599*5113495bSYour Name 				if (qdf_unlikely(!next)) {
1600*5113495bSYour Name 					qdf_print("netbuf pop failed!");
1601*5113495bSYour Name 					*tail_msdu = NULL;
1602*5113495bSYour Name 					pdev->rx_ring.pop_fail_cnt++;
1603*5113495bSYour Name 					ret = 0;
1604*5113495bSYour Name 					goto end;
1605*5113495bSYour Name 				}
1606*5113495bSYour Name 				/* if this is not the first msdu, update the
1607*5113495bSYour Name 				 * next pointer of the preceding msdu
1608*5113495bSYour Name 				 */
1609*5113495bSYour Name 				if (prev) {
1610*5113495bSYour Name 					qdf_nbuf_set_next(prev, next);
1611*5113495bSYour Name 				} else {
1612*5113495bSYour Name 					/* if this is the first msdu, update
1613*5113495bSYour Name 					 * head pointer
1614*5113495bSYour Name 					 */
1615*5113495bSYour Name 					*head_msdu = next;
1616*5113495bSYour Name 				}
1617*5113495bSYour Name 				msdu = next;
1618*5113495bSYour Name 				continue;
1619*5113495bSYour Name 			}
1620*5113495bSYour Name 		}
1621*5113495bSYour Name 
1622*5113495bSYour Name 		/* cache consistency has been taken care of by qdf_nbuf_unmap */
1623*5113495bSYour Name 		rx_desc = htt_rx_desc(msdu);
1624*5113495bSYour Name 		htt_rx_extract_lro_info(msdu, rx_desc);
1625*5113495bSYour Name 
1626*5113495bSYour Name 		/* check if the msdu is last mpdu */
1627*5113495bSYour Name 		if (rx_desc->attention.last_mpdu)
1628*5113495bSYour Name 			timestamp_rx_desc = rx_desc;
1629*5113495bSYour Name 
1630*5113495bSYour Name 		/*
1631*5113495bSYour Name 		 * Make the netbuf's data pointer point to the payload rather
1632*5113495bSYour Name 		 * than the descriptor.
1633*5113495bSYour Name 		 */
1634*5113495bSYour Name 		qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
1635*5113495bSYour Name 
1636*5113495bSYour Name 		QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
1637*5113495bSYour Name 		qdf_dp_trace_set_track(msdu, QDF_RX);
1638*5113495bSYour Name 		QDF_NBUF_CB_TX_PACKET_TRACK(msdu) = QDF_NBUF_TX_PKT_DATA_TRACK;
1639*5113495bSYour Name 		QDF_NBUF_CB_RX_CTX_ID(msdu) = rx_ctx_id;
1640*5113495bSYour Name 
1641*5113495bSYour Name 		if (qdf_nbuf_is_ipv4_arp_pkt(msdu))
1642*5113495bSYour Name 			QDF_NBUF_CB_GET_PACKET_TYPE(msdu) =
1643*5113495bSYour Name 				QDF_NBUF_CB_PACKET_TYPE_ARP;
1644*5113495bSYour Name 
1645*5113495bSYour Name 		DPTRACE(qdf_dp_trace(msdu,
1646*5113495bSYour Name 				     QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD,
1647*5113495bSYour Name 				     QDF_TRACE_DEFAULT_PDEV_ID,
1648*5113495bSYour Name 				     qdf_nbuf_data_addr(msdu),
1649*5113495bSYour Name 				     sizeof(qdf_nbuf_data(msdu)), QDF_RX));
1650*5113495bSYour Name 
1651*5113495bSYour Name 		qdf_nbuf_trim_tail(msdu,
1652*5113495bSYour Name 				   HTT_RX_BUF_SIZE -
1653*5113495bSYour Name 				   (RX_STD_DESC_SIZE +
1654*5113495bSYour Name 				    HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
1655*5113495bSYour Name 				    *(msg_word + NEXT_FIELD_OFFSET_IN32))));
1656*5113495bSYour Name #if defined(HELIUMPLUS_DEBUG)
1657*5113495bSYour Name 		ol_txrx_dump_pkt(msdu, 0, 64);
1658*5113495bSYour Name #endif
1659*5113495bSYour Name 		*((uint8_t *)&rx_desc->fw_desc.u.val) =
1660*5113495bSYour Name 			HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word +
1661*5113495bSYour Name 						NEXT_FIELD_OFFSET_IN32));
1662*5113495bSYour Name 
1663*5113495bSYour Name 		/* calling callback function for packet logging */
1664*5113495bSYour Name 		if (pdev->rx_pkt_dump_cb) {
1665*5113495bSYour Name 			if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
1666*5113495bSYour Name 					 !RX_DESC_DISCARD_IS_SET))
1667*5113495bSYour Name 				status = QDF_TX_RX_STATUS_FW_DISCARD;
1668*5113495bSYour Name 			pdev->rx_pkt_dump_cb(msdu, peer_id, status);
1669*5113495bSYour Name 		}
1670*5113495bSYour Name 
1671*5113495bSYour Name 		if (pdev->cfg.is_first_wakeup_packet)
1672*5113495bSYour Name 			htt_get_first_packet_after_wow_wakeup(
1673*5113495bSYour Name 				msg_word + NEXT_FIELD_OFFSET_IN32, msdu);
1674*5113495bSYour Name 
1675*5113495bSYour Name 		/* if discard flag is set (SA is self MAC), then
1676*5113495bSYour Name 		 * don't check mic failure.
1677*5113495bSYour Name 		 */
1678*5113495bSYour Name 		if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
1679*5113495bSYour Name 				 !RX_DESC_DISCARD_IS_SET)) {
1680*5113495bSYour Name 			uint8_t tid =
1681*5113495bSYour Name 				HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1682*5113495bSYour Name 					*(u_int32_t *)rx_ind_data);
1683*5113495bSYour Name 			ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
1684*5113495bSYour Name 						rx_desc, msdu);
1685*5113495bSYour Name 
1686*5113495bSYour Name 			htt_rx_desc_frame_free(pdev, msdu);
1687*5113495bSYour Name 			/* if this is the last msdu */
1688*5113495bSYour Name 			if (!msdu_count) {
1689*5113495bSYour Name 				/* if this is the only msdu */
1690*5113495bSYour Name 				if (!prev) {
1691*5113495bSYour Name 					*head_msdu = *tail_msdu = NULL;
1692*5113495bSYour Name 					ret = 0;
1693*5113495bSYour Name 					goto end;
1694*5113495bSYour Name 				}
1695*5113495bSYour Name 				*tail_msdu = prev;
1696*5113495bSYour Name 				qdf_nbuf_set_next(prev, NULL);
1697*5113495bSYour Name 				goto end;
1698*5113495bSYour Name 			} else { /* if this is not the last msdu */
1699*5113495bSYour Name 				/* get the next msdu */
1700*5113495bSYour Name 				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1701*5113495bSYour Name 				paddr = htt_rx_in_ord_paddr_get(msg_word);
1702*5113495bSYour Name 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1703*5113495bSYour Name 				if (qdf_unlikely(!next)) {
1704*5113495bSYour Name 					qdf_print("netbuf pop failed!");
1705*5113495bSYour Name 					*tail_msdu = NULL;
1706*5113495bSYour Name 					pdev->rx_ring.pop_fail_cnt++;
1707*5113495bSYour Name 					ret = 0;
1708*5113495bSYour Name 					goto end;
1709*5113495bSYour Name 				}
1710*5113495bSYour Name 
1711*5113495bSYour Name 				/* if this is not the first msdu, update the
1712*5113495bSYour Name 				 * next pointer of the preceding msdu
1713*5113495bSYour Name 				 */
1714*5113495bSYour Name 				if (prev) {
1715*5113495bSYour Name 					qdf_nbuf_set_next(prev, next);
1716*5113495bSYour Name 				} else {
1717*5113495bSYour Name 					/* if this is the first msdu, update the
1718*5113495bSYour Name 					 * head pointer
1719*5113495bSYour Name 					 */
1720*5113495bSYour Name 					*head_msdu = next;
1721*5113495bSYour Name 				}
1722*5113495bSYour Name 				msdu = next;
1723*5113495bSYour Name 				continue;
1724*5113495bSYour Name 			}
1725*5113495bSYour Name 		}
1726*5113495bSYour Name 		/* check if this is the last msdu */
1727*5113495bSYour Name 		if (msdu_count) {
1728*5113495bSYour Name 			msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1729*5113495bSYour Name 			paddr = htt_rx_in_ord_paddr_get(msg_word);
1730*5113495bSYour Name 			next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1731*5113495bSYour Name 			if (qdf_unlikely(!next)) {
1732*5113495bSYour Name 				qdf_print("netbuf pop failed!");
1733*5113495bSYour Name 				*tail_msdu = NULL;
1734*5113495bSYour Name 				pdev->rx_ring.pop_fail_cnt++;
1735*5113495bSYour Name 				ret = 0;
1736*5113495bSYour Name 				goto end;
1737*5113495bSYour Name 			}
1738*5113495bSYour Name 			qdf_nbuf_set_next(msdu, next);
1739*5113495bSYour Name 			prev = msdu;
1740*5113495bSYour Name 			msdu = next;
1741*5113495bSYour Name 		} else {
1742*5113495bSYour Name 			*tail_msdu = msdu;
1743*5113495bSYour Name 			qdf_nbuf_set_next(msdu, NULL);
1744*5113495bSYour Name 		}
1745*5113495bSYour Name 	}
1746*5113495bSYour Name 
1747*5113495bSYour Name 	htt_rx_tail_msdu_timestamp(rx_desc, timestamp_rx_desc);
1748*5113495bSYour Name 
1749*5113495bSYour Name end:
1750*5113495bSYour Name 	return ret;
1751*5113495bSYour Name }
1752*5113495bSYour Name 
htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t netbuf)1753*5113495bSYour Name static void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
1754*5113495bSYour Name 						  qdf_nbuf_t netbuf)
1755*5113495bSYour Name {
1756*5113495bSYour Name 	return (void *)htt_rx_desc(netbuf);
1757*5113495bSYour Name }
1758*5113495bSYour Name #else
1759*5113495bSYour Name 
1760*5113495bSYour Name static inline
htt_rx_hash_init(struct htt_pdev_t * pdev)1761*5113495bSYour Name int htt_rx_hash_init(struct htt_pdev_t *pdev)
1762*5113495bSYour Name {
1763*5113495bSYour Name 	return 0;
1764*5113495bSYour Name }
1765*5113495bSYour Name 
1766*5113495bSYour Name static inline
htt_rx_hash_deinit(struct htt_pdev_t * pdev)1767*5113495bSYour Name void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
1768*5113495bSYour Name {
1769*5113495bSYour Name }
1770*5113495bSYour Name 
1771*5113495bSYour Name static inline int
htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * replenish_cnt)1772*5113495bSYour Name htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1773*5113495bSYour Name 				qdf_nbuf_t rx_ind_msg,
1774*5113495bSYour Name 				qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1775*5113495bSYour Name 				uint32_t *replenish_cnt)
1776*5113495bSYour Name {
1777*5113495bSYour Name 	return 0;
1778*5113495bSYour Name }
1779*5113495bSYour Name 
1780*5113495bSYour Name static inline
htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t netbuf)1781*5113495bSYour Name void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
1782*5113495bSYour Name 					   qdf_nbuf_t netbuf)
1783*5113495bSYour Name {
1784*5113495bSYour Name 	return NULL;
1785*5113495bSYour Name }
1786*5113495bSYour Name #endif
1787*5113495bSYour Name 
1788*5113495bSYour Name #ifdef WLAN_PARTIAL_REORDER_OFFLOAD
1789*5113495bSYour Name 
1790*5113495bSYour Name /* AR9888v1 WORKAROUND for EV#112367 */
1791*5113495bSYour Name /* FIX THIS - remove this WAR when the bug is fixed */
1792*5113495bSYour Name #define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
1793*5113495bSYour Name 
1794*5113495bSYour Name static int
htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * msdu_count)1795*5113495bSYour Name htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
1796*5113495bSYour Name 		    qdf_nbuf_t rx_ind_msg,
1797*5113495bSYour Name 		    qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1798*5113495bSYour Name 		    uint32_t *msdu_count)
1799*5113495bSYour Name {
1800*5113495bSYour Name 	int msdu_len, msdu_chaining = 0;
1801*5113495bSYour Name 	qdf_nbuf_t msdu;
1802*5113495bSYour Name 	struct htt_host_rx_desc_base *rx_desc;
1803*5113495bSYour Name 	uint8_t *rx_ind_data;
1804*5113495bSYour Name 	uint32_t *msg_word, num_msdu_bytes;
1805*5113495bSYour Name 	qdf_dma_addr_t rx_desc_paddr;
1806*5113495bSYour Name 	enum htt_t2h_msg_type msg_type;
1807*5113495bSYour Name 	uint8_t pad_bytes = 0;
1808*5113495bSYour Name 
1809*5113495bSYour Name 	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
1810*5113495bSYour Name 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1811*5113495bSYour Name 	msg_word = (uint32_t *)rx_ind_data;
1812*5113495bSYour Name 
1813*5113495bSYour Name 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1814*5113495bSYour Name 
1815*5113495bSYour Name 	if (qdf_unlikely(msg_type == HTT_T2H_MSG_TYPE_RX_FRAG_IND)) {
1816*5113495bSYour Name 		num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
1817*5113495bSYour Name 			*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
1818*5113495bSYour Name 	} else {
1819*5113495bSYour Name 		num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1820*5113495bSYour Name 			*(msg_word
1821*5113495bSYour Name 			  + HTT_RX_IND_HDR_PREFIX_SIZE32
1822*5113495bSYour Name 			  + HTT_RX_PPDU_DESC_SIZE32));
1823*5113495bSYour Name 	}
1824*5113495bSYour Name 	msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
1825*5113495bSYour Name 	while (1) {
1826*5113495bSYour Name 		int last_msdu, msdu_len_invalid, msdu_chained;
1827*5113495bSYour Name 		int byte_offset;
1828*5113495bSYour Name 		qdf_nbuf_t next;
1829*5113495bSYour Name 
1830*5113495bSYour Name 		/*
1831*5113495bSYour Name 		 * Set the netbuf length to be the entire buffer length
1832*5113495bSYour Name 		 * initially, so the unmap will unmap the entire buffer.
1833*5113495bSYour Name 		 */
1834*5113495bSYour Name 		qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
1835*5113495bSYour Name #ifdef DEBUG_DMA_DONE
1836*5113495bSYour Name 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
1837*5113495bSYour Name #else
1838*5113495bSYour Name 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
1839*5113495bSYour Name #endif
1840*5113495bSYour Name 
1841*5113495bSYour Name 		/* cache consistency has been taken care of by qdf_nbuf_unmap */
1842*5113495bSYour Name 
1843*5113495bSYour Name 		/*
1844*5113495bSYour Name 		 * Now read the rx descriptor.
1845*5113495bSYour Name 		 * Set the length to the appropriate value.
1846*5113495bSYour Name 		 * Check if this MSDU completes a MPDU.
1847*5113495bSYour Name 		 */
1848*5113495bSYour Name 		rx_desc = htt_rx_desc(msdu);
1849*5113495bSYour Name #if defined(HELIUMPLUS)
1850*5113495bSYour Name 		if (HTT_WIFI_IP(pdev, 2, 0))
1851*5113495bSYour Name 			pad_bytes = rx_desc->msdu_end.l3_header_padding;
1852*5113495bSYour Name #endif /* defined(HELIUMPLUS) */
1853*5113495bSYour Name 
1854*5113495bSYour Name 		/*
1855*5113495bSYour Name 		 * Save PADDR of descriptor and make the netbuf's data pointer
1856*5113495bSYour Name 		 * point to the payload rather than the descriptor.
1857*5113495bSYour Name 		 */
1858*5113495bSYour Name 		rx_desc_paddr = QDF_NBUF_CB_PADDR(msdu);
1859*5113495bSYour Name 		qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION +
1860*5113495bSYour Name 					 pad_bytes);
1861*5113495bSYour Name 
1862*5113495bSYour Name 		/*
1863*5113495bSYour Name 		 * Sanity check - confirm the HW is finished filling in
1864*5113495bSYour Name 		 * the rx data.
1865*5113495bSYour Name 		 * If the HW and SW are working correctly, then it's guaranteed
1866*5113495bSYour Name 		 * that the HW's MAC DMA is done before this point in the SW.
1867*5113495bSYour Name 		 * To prevent the case that we handle a stale Rx descriptor,
1868*5113495bSYour Name 		 * just assert for now until we have a way to recover.
1869*5113495bSYour Name 		 */
1870*5113495bSYour Name 
1871*5113495bSYour Name #ifdef DEBUG_DMA_DONE
1872*5113495bSYour Name 		if (qdf_unlikely(!((*(uint32_t *)&rx_desc->attention)
1873*5113495bSYour Name 				   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1874*5113495bSYour Name 			int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
1875*5113495bSYour Name 
1876*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1877*5113495bSYour Name 				  "malformed frame");
1878*5113495bSYour Name 
1879*5113495bSYour Name 			while (dbg_iter &&
1880*5113495bSYour Name 			       (!((*(uint32_t *)&rx_desc->attention) &
1881*5113495bSYour Name 				  RX_ATTENTION_0_MSDU_DONE_MASK))) {
1882*5113495bSYour Name 				qdf_mdelay(1);
1883*5113495bSYour Name 				qdf_mem_dma_sync_single_for_cpu(
1884*5113495bSYour Name 					pdev->osdev,
1885*5113495bSYour Name 					rx_desc_paddr,
1886*5113495bSYour Name 					HTT_RX_STD_DESC_RESERVATION,
1887*5113495bSYour Name 					DMA_FROM_DEVICE);
1888*5113495bSYour Name 
1889*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_HTT,
1890*5113495bSYour Name 					  QDF_TRACE_LEVEL_INFO,
1891*5113495bSYour Name 					  "debug iter %d success %d", dbg_iter,
1892*5113495bSYour Name 					  pdev->rx_ring.dbg_sync_success);
1893*5113495bSYour Name 
1894*5113495bSYour Name 				dbg_iter--;
1895*5113495bSYour Name 			}
1896*5113495bSYour Name 
1897*5113495bSYour Name 			if (qdf_unlikely(!((*(uint32_t *)&rx_desc->attention)
1898*5113495bSYour Name 					   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1899*5113495bSYour Name #ifdef HTT_RX_RESTORE
1900*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_HTT,
1901*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1902*5113495bSYour Name 					  "RX done bit error detected!");
1903*5113495bSYour Name 
1904*5113495bSYour Name 				qdf_nbuf_set_next(msdu, NULL);
1905*5113495bSYour Name 				*tail_msdu = msdu;
1906*5113495bSYour Name 				pdev->rx_ring.rx_reset = 1;
1907*5113495bSYour Name 				return msdu_chaining;
1908*5113495bSYour Name #else
1909*5113495bSYour Name 				wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
1910*5113495bSYour Name 						    0, GEN_CMD);
1911*5113495bSYour Name 				HTT_ASSERT_ALWAYS(0);
1912*5113495bSYour Name #endif
1913*5113495bSYour Name 			}
1914*5113495bSYour Name 			pdev->rx_ring.dbg_sync_success++;
1915*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1916*5113495bSYour Name 				  "debug iter %d success %d", dbg_iter,
1917*5113495bSYour Name 				  pdev->rx_ring.dbg_sync_success);
1918*5113495bSYour Name 		}
1919*5113495bSYour Name #else
1920*5113495bSYour Name 		HTT_ASSERT_ALWAYS((*(uint32_t *)&rx_desc->attention) &
1921*5113495bSYour Name 				  RX_ATTENTION_0_MSDU_DONE_MASK);
1922*5113495bSYour Name #endif
1923*5113495bSYour Name 		/*
1924*5113495bSYour Name 		 * Copy the FW rx descriptor for this MSDU from the rx
1925*5113495bSYour Name 		 * indication message into the MSDU's netbuf.
1926*5113495bSYour Name 		 * HL uses the same rx indication message definition as LL, and
1927*5113495bSYour Name 		 * simply appends new info (fields from the HW rx desc, and the
1928*5113495bSYour Name 		 * MSDU payload itself).
1929*5113495bSYour Name 		 * So, the offset into the rx indication message only has to
1930*5113495bSYour Name 		 * account for the standard offset of the per-MSDU FW rx
1931*5113495bSYour Name 		 * desc info within the message, and how many bytes of the
1932*5113495bSYour Name 		 * per-MSDU FW rx desc info have already been consumed.
1933*5113495bSYour Name 		 * (And the endianness of the host,
1934*5113495bSYour Name 		 * since for a big-endian host, the rx ind message contents,
1935*5113495bSYour Name 		 * including the per-MSDU rx desc bytes, were byteswapped during
1936*5113495bSYour Name 		 * upload.)
1937*5113495bSYour Name 		 */
1938*5113495bSYour Name 		if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
1939*5113495bSYour Name 			if (qdf_unlikely
1940*5113495bSYour Name 				    (msg_type == HTT_T2H_MSG_TYPE_RX_FRAG_IND))
1941*5113495bSYour Name 				byte_offset =
1942*5113495bSYour Name 					HTT_ENDIAN_BYTE_IDX_SWAP
1943*5113495bSYour Name 					(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
1944*5113495bSYour Name 			else
1945*5113495bSYour Name 				byte_offset =
1946*5113495bSYour Name 					HTT_ENDIAN_BYTE_IDX_SWAP
1947*5113495bSYour Name 					(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
1948*5113495bSYour Name 						pdev->rx_ind_msdu_byte_idx);
1949*5113495bSYour Name 
1950*5113495bSYour Name 			*((uint8_t *)&rx_desc->fw_desc.u.val) =
1951*5113495bSYour Name 				rx_ind_data[byte_offset];
1952*5113495bSYour Name 			/*
1953*5113495bSYour Name 			 * The target is expected to only provide the basic
1954*5113495bSYour Name 			 * per-MSDU rx descriptors.  Just to be sure,
1955*5113495bSYour Name 			 * verify that the target has not attached
1956*5113495bSYour Name 			 * extension data (e.g. LRO flow ID).
1957*5113495bSYour Name 			 */
1958*5113495bSYour Name 			/*
1959*5113495bSYour Name 			 * The assertion below currently doesn't work for
1960*5113495bSYour Name 			 * RX_FRAG_IND messages, since their format differs
1961*5113495bSYour Name 			 * from the RX_IND format (no FW rx PPDU desc in
1962*5113495bSYour Name 			 * the current RX_FRAG_IND message).
1963*5113495bSYour Name 			 * If the RX_FRAG_IND message format is updated to match
1964*5113495bSYour Name 			 * the RX_IND message format, then the following
1965*5113495bSYour Name 			 * assertion can be restored.
1966*5113495bSYour Name 			 */
1967*5113495bSYour Name 			/*
1968*5113495bSYour Name 			 * qdf_assert((rx_ind_data[byte_offset] &
1969*5113495bSYour Name 			 * FW_RX_DESC_EXT_M) == 0);
1970*5113495bSYour Name 			 */
1971*5113495bSYour Name 			pdev->rx_ind_msdu_byte_idx += 1;
1972*5113495bSYour Name 			/* or more, if there's ext data */
1973*5113495bSYour Name 		} else {
1974*5113495bSYour Name 			/*
1975*5113495bSYour Name 			 * When an oversized AMSDU happened, FW will lost some
1976*5113495bSYour Name 			 * of MSDU status - in this case, the FW descriptors
1977*5113495bSYour Name 			 * provided will be less than the actual MSDUs
1978*5113495bSYour Name 			 * inside this MPDU.
1979*5113495bSYour Name 			 * Mark the FW descriptors so that it will still
1980*5113495bSYour Name 			 * deliver to upper stack, if no CRC error for the MPDU.
1981*5113495bSYour Name 			 *
1982*5113495bSYour Name 			 * FIX THIS - the FW descriptors are actually for MSDUs
1983*5113495bSYour Name 			 * in the end of this A-MSDU instead of the beginning.
1984*5113495bSYour Name 			 */
1985*5113495bSYour Name 			*((uint8_t *)&rx_desc->fw_desc.u.val) = 0;
1986*5113495bSYour Name 		}
1987*5113495bSYour Name 
1988*5113495bSYour Name 		/*
1989*5113495bSYour Name 		 *  TCP/UDP checksum offload support
1990*5113495bSYour Name 		 */
1991*5113495bSYour Name 		htt_set_checksum_result_ll(pdev, msdu, rx_desc);
1992*5113495bSYour Name 
1993*5113495bSYour Name 		msdu_len_invalid = (*(uint32_t *)&rx_desc->attention) &
1994*5113495bSYour Name 				   RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
1995*5113495bSYour Name 		msdu_chained = (((*(uint32_t *)&rx_desc->frag_info) &
1996*5113495bSYour Name 				 RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
1997*5113495bSYour Name 				RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
1998*5113495bSYour Name 		msdu_len =
1999*5113495bSYour Name 			((*((uint32_t *)&rx_desc->msdu_start)) &
2000*5113495bSYour Name 			 RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
2001*5113495bSYour Name 			RX_MSDU_START_0_MSDU_LENGTH_LSB;
2002*5113495bSYour Name 
2003*5113495bSYour Name 		do {
2004*5113495bSYour Name 			if (!msdu_len_invalid && !msdu_chained) {
2005*5113495bSYour Name #if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
2006*5113495bSYour Name 				if (msdu_len > 0x3000)
2007*5113495bSYour Name 					break;
2008*5113495bSYour Name #endif
2009*5113495bSYour Name 				qdf_nbuf_trim_tail(msdu,
2010*5113495bSYour Name 						   HTT_RX_BUF_SIZE -
2011*5113495bSYour Name 						   (RX_STD_DESC_SIZE +
2012*5113495bSYour Name 						    msdu_len));
2013*5113495bSYour Name 			}
2014*5113495bSYour Name 		} while (0);
2015*5113495bSYour Name 
2016*5113495bSYour Name 		while (msdu_chained--) {
2017*5113495bSYour Name 			next = htt_rx_netbuf_pop(pdev);
2018*5113495bSYour Name 			qdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
2019*5113495bSYour Name 			msdu_len -= HTT_RX_BUF_SIZE;
2020*5113495bSYour Name 			qdf_nbuf_set_next(msdu, next);
2021*5113495bSYour Name 			msdu = next;
2022*5113495bSYour Name 			msdu_chaining = 1;
2023*5113495bSYour Name 
2024*5113495bSYour Name 			if (msdu_chained == 0) {
2025*5113495bSYour Name 				/* Trim the last one to the correct size -
2026*5113495bSYour Name 				 * accounting for inconsistent HW lengths
2027*5113495bSYour Name 				 * causing length overflows and underflows
2028*5113495bSYour Name 				 */
2029*5113495bSYour Name 				if (((unsigned int)msdu_len) >
2030*5113495bSYour Name 				    ((unsigned int)
2031*5113495bSYour Name 				     (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
2032*5113495bSYour Name 					msdu_len =
2033*5113495bSYour Name 						(HTT_RX_BUF_SIZE -
2034*5113495bSYour Name 						 RX_STD_DESC_SIZE);
2035*5113495bSYour Name 				}
2036*5113495bSYour Name 
2037*5113495bSYour Name 				qdf_nbuf_trim_tail(next,
2038*5113495bSYour Name 						   HTT_RX_BUF_SIZE -
2039*5113495bSYour Name 						   (RX_STD_DESC_SIZE +
2040*5113495bSYour Name 						    msdu_len));
2041*5113495bSYour Name 			}
2042*5113495bSYour Name 		}
2043*5113495bSYour Name 
2044*5113495bSYour Name 		last_msdu =
2045*5113495bSYour Name 			((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
2046*5113495bSYour Name 			 RX_MSDU_END_4_LAST_MSDU_MASK) >>
2047*5113495bSYour Name 			RX_MSDU_END_4_LAST_MSDU_LSB;
2048*5113495bSYour Name 
2049*5113495bSYour Name 		if (last_msdu) {
2050*5113495bSYour Name 			qdf_nbuf_set_next(msdu, NULL);
2051*5113495bSYour Name 			break;
2052*5113495bSYour Name 		}
2053*5113495bSYour Name 
2054*5113495bSYour Name 		next = htt_rx_netbuf_pop(pdev);
2055*5113495bSYour Name 		qdf_nbuf_set_next(msdu, next);
2056*5113495bSYour Name 		msdu = next;
2057*5113495bSYour Name 	}
2058*5113495bSYour Name 	*tail_msdu = msdu;
2059*5113495bSYour Name 
2060*5113495bSYour Name 	/*
2061*5113495bSYour Name 	 * Don't refill the ring yet.
2062*5113495bSYour Name 	 * First, the elements popped here are still in use - it is
2063*5113495bSYour Name 	 * not safe to overwrite them until the matching call to
2064*5113495bSYour Name 	 * mpdu_desc_list_next.
2065*5113495bSYour Name 	 * Second, for efficiency it is preferable to refill the rx ring
2066*5113495bSYour Name 	 * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
2067*5113495bSYour Name 	 * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
2068*5113495bSYour Name 	 * Consequently, we'll rely on the txrx SW to tell us when it is done
2069*5113495bSYour Name 	 * pulling all the PPDU's rx buffers out of the rx ring, and then
2070*5113495bSYour Name 	 * refill it just once.
2071*5113495bSYour Name 	 */
2072*5113495bSYour Name 	return msdu_chaining;
2073*5113495bSYour Name }
2074*5113495bSYour Name 
2075*5113495bSYour Name static
htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)2076*5113495bSYour Name void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
2077*5113495bSYour Name {
2078*5113495bSYour Name 	int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
2079*5113495bSYour Name 	qdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
2080*5113495bSYour Name 
2081*5113495bSYour Name 	pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
2082*5113495bSYour Name 	return (void *)htt_rx_desc(netbuf);
2083*5113495bSYour Name }
2084*5113495bSYour Name 
2085*5113495bSYour Name #else
2086*5113495bSYour Name 
2087*5113495bSYour Name static inline int
htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * msdu_count)2088*5113495bSYour Name htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
2089*5113495bSYour Name 		    qdf_nbuf_t rx_ind_msg,
2090*5113495bSYour Name 		    qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
2091*5113495bSYour Name 		    uint32_t *msdu_count)
2092*5113495bSYour Name {
2093*5113495bSYour Name 	return 0;
2094*5113495bSYour Name }
2095*5113495bSYour Name 
2096*5113495bSYour Name static inline
htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev,qdf_nbuf_t rx_ind_msg)2097*5113495bSYour Name void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
2098*5113495bSYour Name {
2099*5113495bSYour Name 	return NULL;
2100*5113495bSYour Name }
2101*5113495bSYour Name #endif
2102*5113495bSYour Name 
2103*5113495bSYour Name /**
2104*5113495bSYour Name  * htt_rx_fill_ring_count() - replenish rx msdu buffer
2105*5113495bSYour Name  * @pdev: Handle (pointer) to HTT pdev.
2106*5113495bSYour Name  *
2107*5113495bSYour Name  * This function will replenish the rx buffer to the max number
2108*5113495bSYour Name  * that can be kept in the ring
2109*5113495bSYour Name  *
2110*5113495bSYour Name  * Return: None
2111*5113495bSYour Name  */
htt_rx_fill_ring_count(htt_pdev_handle pdev)2112*5113495bSYour Name void htt_rx_fill_ring_count(htt_pdev_handle pdev)
2113*5113495bSYour Name {
2114*5113495bSYour Name 	int num_to_fill;
2115*5113495bSYour Name 
2116*5113495bSYour Name 	num_to_fill = pdev->rx_ring.fill_level -
2117*5113495bSYour Name 		qdf_atomic_read(&pdev->rx_ring.fill_cnt);
2118*5113495bSYour Name 	htt_rx_ring_fill_n(pdev, num_to_fill /* okay if <= 0 */);
2119*5113495bSYour Name }
2120*5113495bSYour Name 
htt_rx_attach(struct htt_pdev_t * pdev)2121*5113495bSYour Name int htt_rx_attach(struct htt_pdev_t *pdev)
2122*5113495bSYour Name {
2123*5113495bSYour Name 	qdf_dma_addr_t paddr;
2124*5113495bSYour Name 	uint32_t ring_elem_size = sizeof(target_paddr_t);
2125*5113495bSYour Name 
2126*5113495bSYour Name 	pdev->rx_ring.size = htt_rx_ring_size(pdev);
2127*5113495bSYour Name 	HTT_ASSERT2(QDF_IS_PWR2(pdev->rx_ring.size));
2128*5113495bSYour Name 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
2129*5113495bSYour Name 
2130*5113495bSYour Name 	/*
2131*5113495bSYour Name 	 * Set the initial value for the level to which the rx ring
2132*5113495bSYour Name 	 * should be filled, based on the max throughput and the worst
2133*5113495bSYour Name 	 * likely latency for the host to fill the rx ring.
2134*5113495bSYour Name 	 * In theory, this fill level can be dynamically adjusted from
2135*5113495bSYour Name 	 * the initial value set here to reflect the actual host latency
2136*5113495bSYour Name 	 * rather than a conservative assumption.
2137*5113495bSYour Name 	 */
2138*5113495bSYour Name 	pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
2139*5113495bSYour Name 
2140*5113495bSYour Name 	if (pdev->cfg.is_full_reorder_offload) {
2141*5113495bSYour Name 		if (htt_rx_hash_init(pdev))
2142*5113495bSYour Name 			goto fail1;
2143*5113495bSYour Name 
2144*5113495bSYour Name 		/* allocate the target index */
2145*5113495bSYour Name 		pdev->rx_ring.target_idx.vaddr =
2146*5113495bSYour Name 			 qdf_mem_alloc_consistent(pdev->osdev, pdev->osdev->dev,
2147*5113495bSYour Name 						  sizeof(uint32_t), &paddr);
2148*5113495bSYour Name 
2149*5113495bSYour Name 		if (!pdev->rx_ring.target_idx.vaddr)
2150*5113495bSYour Name 			goto fail2;
2151*5113495bSYour Name 
2152*5113495bSYour Name 		pdev->rx_ring.target_idx.paddr = paddr;
2153*5113495bSYour Name 		*pdev->rx_ring.target_idx.vaddr = 0;
2154*5113495bSYour Name 	} else {
2155*5113495bSYour Name 		pdev->rx_ring.buf.netbufs_ring =
2156*5113495bSYour Name 			qdf_mem_malloc(pdev->rx_ring.size * sizeof(qdf_nbuf_t));
2157*5113495bSYour Name 		if (!pdev->rx_ring.buf.netbufs_ring)
2158*5113495bSYour Name 			goto fail1;
2159*5113495bSYour Name 
2160*5113495bSYour Name 		pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
2161*5113495bSYour Name 		pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
2162*5113495bSYour Name 	}
2163*5113495bSYour Name 
2164*5113495bSYour Name 	pdev->rx_ring.buf.paddrs_ring =
2165*5113495bSYour Name 		qdf_mem_alloc_consistent(
2166*5113495bSYour Name 			pdev->osdev, pdev->osdev->dev,
2167*5113495bSYour Name 			 pdev->rx_ring.size * ring_elem_size,
2168*5113495bSYour Name 			 &paddr);
2169*5113495bSYour Name 	if (!pdev->rx_ring.buf.paddrs_ring)
2170*5113495bSYour Name 		goto fail3;
2171*5113495bSYour Name 
2172*5113495bSYour Name 	pdev->rx_ring.base_paddr = paddr;
2173*5113495bSYour Name 	pdev->rx_ring.alloc_idx.vaddr =
2174*5113495bSYour Name 		 qdf_mem_alloc_consistent(
2175*5113495bSYour Name 			pdev->osdev, pdev->osdev->dev,
2176*5113495bSYour Name 			 sizeof(uint32_t), &paddr);
2177*5113495bSYour Name 
2178*5113495bSYour Name 	if (!pdev->rx_ring.alloc_idx.vaddr)
2179*5113495bSYour Name 		goto fail4;
2180*5113495bSYour Name 
2181*5113495bSYour Name 	pdev->rx_ring.alloc_idx.paddr = paddr;
2182*5113495bSYour Name 	*pdev->rx_ring.alloc_idx.vaddr = 0;
2183*5113495bSYour Name 
2184*5113495bSYour Name 	if (htt_rx_buff_pool_init(pdev))
2185*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
2186*5113495bSYour Name 			  "HTT: pre allocated packet pool alloc failed");
2187*5113495bSYour Name 
2188*5113495bSYour Name 	/*
2189*5113495bSYour Name 	 * Initialize the Rx refill reference counter to be one so that
2190*5113495bSYour Name 	 * only one thread is allowed to refill the Rx ring.
2191*5113495bSYour Name 	 */
2192*5113495bSYour Name 	qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
2193*5113495bSYour Name 	qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
2194*5113495bSYour Name 
2195*5113495bSYour Name 	/* Initialize the refill_lock and debt (for rx-parallelization) */
2196*5113495bSYour Name 	qdf_spinlock_create(&pdev->rx_ring.refill_lock);
2197*5113495bSYour Name 	qdf_atomic_init(&pdev->rx_ring.refill_debt);
2198*5113495bSYour Name 
2199*5113495bSYour Name 	/* Initialize the Rx refill retry timer */
2200*5113495bSYour Name 	qdf_timer_init(pdev->osdev,
2201*5113495bSYour Name 		       &pdev->rx_ring.refill_retry_timer,
2202*5113495bSYour Name 		       htt_rx_ring_refill_retry, (void *)pdev,
2203*5113495bSYour Name 		       QDF_TIMER_TYPE_SW);
2204*5113495bSYour Name 
2205*5113495bSYour Name 	qdf_atomic_init(&pdev->rx_ring.fill_cnt);
2206*5113495bSYour Name 	pdev->rx_ring.pop_fail_cnt = 0;
2207*5113495bSYour Name #ifdef DEBUG_DMA_DONE
2208*5113495bSYour Name 	pdev->rx_ring.dbg_ring_idx = 0;
2209*5113495bSYour Name 	pdev->rx_ring.dbg_refill_cnt = 0;
2210*5113495bSYour Name 	pdev->rx_ring.dbg_sync_success = 0;
2211*5113495bSYour Name #endif
2212*5113495bSYour Name #ifdef HTT_RX_RESTORE
2213*5113495bSYour Name 	pdev->rx_ring.rx_reset = 0;
2214*5113495bSYour Name 	pdev->rx_ring.htt_rx_restore = 0;
2215*5113495bSYour Name #endif
2216*5113495bSYour Name 	htt_rx_dbg_rxbuf_init(pdev);
2217*5113495bSYour Name 	htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
2218*5113495bSYour Name 
2219*5113495bSYour Name 	if (pdev->cfg.is_full_reorder_offload) {
2220*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO_LOW,
2221*5113495bSYour Name 			  "HTT: full reorder offload enabled");
2222*5113495bSYour Name 		htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2223*5113495bSYour Name 		htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2224*5113495bSYour Name 		htt_rx_mpdu_desc_list_next =
2225*5113495bSYour Name 			 htt_rx_in_ord_mpdu_desc_list_next_ll;
2226*5113495bSYour Name 	} else {
2227*5113495bSYour Name 		htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
2228*5113495bSYour Name 		htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
2229*5113495bSYour Name 		htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
2230*5113495bSYour Name 	}
2231*5113495bSYour Name 
2232*5113495bSYour Name 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
2233*5113495bSYour Name 		htt_rx_amsdu_pop = htt_rx_mon_amsdu_rx_in_order_pop_ll;
2234*5113495bSYour Name 
2235*5113495bSYour Name 	htt_rx_offload_msdu_cnt = htt_rx_offload_msdu_cnt_ll;
2236*5113495bSYour Name 	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
2237*5113495bSYour Name 	htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
2238*5113495bSYour Name 	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
2239*5113495bSYour Name 	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
2240*5113495bSYour Name 	htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
2241*5113495bSYour Name 	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
2242*5113495bSYour Name 	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
2243*5113495bSYour Name 	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
2244*5113495bSYour Name 	htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
2245*5113495bSYour Name 	htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
2246*5113495bSYour Name 	htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
2247*5113495bSYour Name 	htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
2248*5113495bSYour Name 	htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
2249*5113495bSYour Name 	htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
2250*5113495bSYour Name 	htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
2251*5113495bSYour Name 
2252*5113495bSYour Name 	return 0;               /* success */
2253*5113495bSYour Name 
2254*5113495bSYour Name fail4:
2255*5113495bSYour Name 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2256*5113495bSYour Name 				pdev->rx_ring.size * sizeof(target_paddr_t),
2257*5113495bSYour Name 				pdev->rx_ring.buf.paddrs_ring,
2258*5113495bSYour Name 				pdev->rx_ring.base_paddr,
2259*5113495bSYour Name 				qdf_get_dma_mem_context((&pdev->rx_ring.buf),
2260*5113495bSYour Name 							memctx));
2261*5113495bSYour Name 
2262*5113495bSYour Name fail3:
2263*5113495bSYour Name 	if (pdev->cfg.is_full_reorder_offload)
2264*5113495bSYour Name 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2265*5113495bSYour Name 					sizeof(uint32_t),
2266*5113495bSYour Name 					pdev->rx_ring.target_idx.vaddr,
2267*5113495bSYour Name 					pdev->rx_ring.target_idx.paddr,
2268*5113495bSYour Name 					qdf_get_dma_mem_context((&pdev->
2269*5113495bSYour Name 								 rx_ring.
2270*5113495bSYour Name 								 target_idx),
2271*5113495bSYour Name 								 memctx));
2272*5113495bSYour Name 	else
2273*5113495bSYour Name 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
2274*5113495bSYour Name 
2275*5113495bSYour Name fail2:
2276*5113495bSYour Name 	if (pdev->cfg.is_full_reorder_offload)
2277*5113495bSYour Name 		htt_rx_hash_deinit(pdev);
2278*5113495bSYour Name 
2279*5113495bSYour Name fail1:
2280*5113495bSYour Name 	return 1;               /* failure */
2281*5113495bSYour Name }
2282*5113495bSYour Name 
htt_rx_detach(struct htt_pdev_t * pdev)2283*5113495bSYour Name void htt_rx_detach(struct htt_pdev_t *pdev)
2284*5113495bSYour Name {
2285*5113495bSYour Name 	bool ipa_smmu = false;
2286*5113495bSYour Name 	qdf_nbuf_t nbuf;
2287*5113495bSYour Name 
2288*5113495bSYour Name 	qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
2289*5113495bSYour Name 	qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
2290*5113495bSYour Name 	htt_rx_dbg_rxbuf_deinit(pdev);
2291*5113495bSYour Name 
2292*5113495bSYour Name 	ipa_smmu = htt_rx_ring_smmu_mapped(pdev);
2293*5113495bSYour Name 
2294*5113495bSYour Name 	if (pdev->cfg.is_full_reorder_offload) {
2295*5113495bSYour Name 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2296*5113495bSYour Name 					sizeof(uint32_t),
2297*5113495bSYour Name 					pdev->rx_ring.target_idx.vaddr,
2298*5113495bSYour Name 					pdev->rx_ring.target_idx.paddr,
2299*5113495bSYour Name 					qdf_get_dma_mem_context((&pdev->
2300*5113495bSYour Name 								 rx_ring.
2301*5113495bSYour Name 								 target_idx),
2302*5113495bSYour Name 								 memctx));
2303*5113495bSYour Name 		htt_rx_hash_deinit(pdev);
2304*5113495bSYour Name 	} else {
2305*5113495bSYour Name 		int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
2306*5113495bSYour Name 		qdf_mem_info_t mem_map_table = {0};
2307*5113495bSYour Name 
2308*5113495bSYour Name 		while (sw_rd_idx != *pdev->rx_ring.alloc_idx.vaddr) {
2309*5113495bSYour Name 			nbuf = pdev->rx_ring.buf.netbufs_ring[sw_rd_idx];
2310*5113495bSYour Name 			if (ipa_smmu) {
2311*5113495bSYour Name 				if (qdf_unlikely(
2312*5113495bSYour Name 					!qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
2313*5113495bSYour Name 					qdf_err("smmu not mapped, nbuf: %pK",
2314*5113495bSYour Name 						nbuf);
2315*5113495bSYour Name 					qdf_assert_always(0);
2316*5113495bSYour Name 				}
2317*5113495bSYour Name 				qdf_nbuf_set_rx_ipa_smmu_map(nbuf, false);
2318*5113495bSYour Name 				qdf_update_mem_map_table(pdev->osdev,
2319*5113495bSYour Name 					&mem_map_table,
2320*5113495bSYour Name 					QDF_NBUF_CB_PADDR(nbuf),
2321*5113495bSYour Name 					HTT_RX_BUF_SIZE);
2322*5113495bSYour Name 				qdf_assert_always(
2323*5113495bSYour Name 					!cds_smmu_map_unmap(false, 1,
2324*5113495bSYour Name 							    &mem_map_table));
2325*5113495bSYour Name 			}
2326*5113495bSYour Name #ifdef DEBUG_DMA_DONE
2327*5113495bSYour Name 			qdf_nbuf_unmap(pdev->osdev, nbuf,
2328*5113495bSYour Name 				       QDF_DMA_BIDIRECTIONAL);
2329*5113495bSYour Name #else
2330*5113495bSYour Name 			qdf_nbuf_unmap(pdev->osdev, nbuf,
2331*5113495bSYour Name 				       QDF_DMA_FROM_DEVICE);
2332*5113495bSYour Name #endif
2333*5113495bSYour Name 			qdf_nbuf_free(nbuf);
2334*5113495bSYour Name 			sw_rd_idx++;
2335*5113495bSYour Name 			sw_rd_idx &= pdev->rx_ring.size_mask;
2336*5113495bSYour Name 		}
2337*5113495bSYour Name 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
2338*5113495bSYour Name 	}
2339*5113495bSYour Name 
2340*5113495bSYour Name 	htt_rx_buff_pool_deinit(pdev);
2341*5113495bSYour Name 
2342*5113495bSYour Name 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2343*5113495bSYour Name 				sizeof(uint32_t),
2344*5113495bSYour Name 				pdev->rx_ring.alloc_idx.vaddr,
2345*5113495bSYour Name 				pdev->rx_ring.alloc_idx.paddr,
2346*5113495bSYour Name 				qdf_get_dma_mem_context((&pdev->rx_ring.
2347*5113495bSYour Name 							 alloc_idx),
2348*5113495bSYour Name 							 memctx));
2349*5113495bSYour Name 
2350*5113495bSYour Name 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2351*5113495bSYour Name 				pdev->rx_ring.size * sizeof(target_paddr_t),
2352*5113495bSYour Name 				pdev->rx_ring.buf.paddrs_ring,
2353*5113495bSYour Name 				pdev->rx_ring.base_paddr,
2354*5113495bSYour Name 				qdf_get_dma_mem_context((&pdev->rx_ring.buf),
2355*5113495bSYour Name 							memctx));
2356*5113495bSYour Name 
2357*5113495bSYour Name 	/* destroy the rx-parallelization refill spinlock */
2358*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->rx_ring.refill_lock);
2359*5113495bSYour Name }
2360*5113495bSYour Name 
htt_rx_hash_smmu_map(bool map,struct htt_pdev_t * pdev)2361*5113495bSYour Name static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
2362*5113495bSYour Name {
2363*5113495bSYour Name 	uint32_t i;
2364*5113495bSYour Name 	struct htt_rx_hash_entry *hash_entry;
2365*5113495bSYour Name 	struct htt_rx_hash_bucket **hash_table;
2366*5113495bSYour Name 	struct htt_list_node *list_iter = NULL;
2367*5113495bSYour Name 	qdf_mem_info_t mem_map_table = {0};
2368*5113495bSYour Name 	qdf_nbuf_t nbuf;
2369*5113495bSYour Name 	int ret;
2370*5113495bSYour Name 
2371*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
2372*5113495bSYour Name 	hash_table = pdev->rx_ring.hash_table;
2373*5113495bSYour Name 
2374*5113495bSYour Name 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
2375*5113495bSYour Name 		/* Free the hash entries in hash bucket i */
2376*5113495bSYour Name 		list_iter = hash_table[i]->listhead.next;
2377*5113495bSYour Name 		while (list_iter != &hash_table[i]->listhead) {
2378*5113495bSYour Name 			hash_entry =
2379*5113495bSYour Name 				(struct htt_rx_hash_entry *)((char *)list_iter -
2380*5113495bSYour Name 							     pdev->rx_ring.
2381*5113495bSYour Name 							     listnode_offset);
2382*5113495bSYour Name 			nbuf = hash_entry->netbuf;
2383*5113495bSYour Name 			if (nbuf) {
2384*5113495bSYour Name 				if (qdf_unlikely(map ==
2385*5113495bSYour Name 					qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
2386*5113495bSYour Name 					qdf_err("map/unmap err:%d, nbuf:%pK",
2387*5113495bSYour Name 						map, nbuf);
2388*5113495bSYour Name 					list_iter = list_iter->next;
2389*5113495bSYour Name 					continue;
2390*5113495bSYour Name 				}
2391*5113495bSYour Name 				qdf_nbuf_set_rx_ipa_smmu_map(nbuf, map);
2392*5113495bSYour Name 				qdf_update_mem_map_table(pdev->osdev,
2393*5113495bSYour Name 						&mem_map_table,
2394*5113495bSYour Name 						QDF_NBUF_CB_PADDR(nbuf),
2395*5113495bSYour Name 						HTT_RX_BUF_SIZE);
2396*5113495bSYour Name 				ret = cds_smmu_map_unmap(map, 1,
2397*5113495bSYour Name 							 &mem_map_table);
2398*5113495bSYour Name 				if (ret) {
2399*5113495bSYour Name 					qdf_nbuf_set_rx_ipa_smmu_map(nbuf,
2400*5113495bSYour Name 								     !map);
2401*5113495bSYour Name 					qdf_err("map: %d failure, nbuf: %pK",
2402*5113495bSYour Name 						map, nbuf);
2403*5113495bSYour Name 					qdf_spin_unlock_bh(
2404*5113495bSYour Name 						&pdev->rx_ring.rx_hash_lock);
2405*5113495bSYour Name 					return QDF_STATUS_E_FAILURE;
2406*5113495bSYour Name 				}
2407*5113495bSYour Name 			}
2408*5113495bSYour Name 			list_iter = list_iter->next;
2409*5113495bSYour Name 		}
2410*5113495bSYour Name 	}
2411*5113495bSYour Name 
2412*5113495bSYour Name 	pdev->rx_ring.smmu_map = map;
2413*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
2414*5113495bSYour Name 
2415*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2416*5113495bSYour Name }
2417*5113495bSYour Name 
htt_rx_update_smmu_map(struct htt_pdev_t * pdev,bool map)2418*5113495bSYour Name QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map)
2419*5113495bSYour Name {
2420*5113495bSYour Name 	QDF_STATUS status;
2421*5113495bSYour Name 
2422*5113495bSYour Name 	if (!pdev->rx_ring.hash_table)
2423*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
2424*5113495bSYour Name 
2425*5113495bSYour Name 	if (!qdf_mem_smmu_s1_enabled(pdev->osdev) || !pdev->is_ipa_uc_enabled)
2426*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
2427*5113495bSYour Name 
2428*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
2429*5113495bSYour Name 	status = htt_rx_hash_smmu_map(map, pdev);
2430*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
2431*5113495bSYour Name 
2432*5113495bSYour Name 	return status;
2433*5113495bSYour Name }
2434