xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/li/dp_li_rx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "cdp_txrx_cmn_struct.h"
21*5113495bSYour Name #include "hal_hw_headers.h"
22*5113495bSYour Name #include "dp_types.h"
23*5113495bSYour Name #include "dp_rx.h"
24*5113495bSYour Name #include "dp_tx.h"
25*5113495bSYour Name #include "dp_li_rx.h"
26*5113495bSYour Name #include "dp_peer.h"
27*5113495bSYour Name #include "hal_rx.h"
28*5113495bSYour Name #include "hal_li_rx.h"
29*5113495bSYour Name #include "hal_api.h"
30*5113495bSYour Name #include "hal_li_api.h"
31*5113495bSYour Name #include "qdf_nbuf.h"
32*5113495bSYour Name #ifdef MESH_MODE_SUPPORT
33*5113495bSYour Name #include "if_meta_hdr.h"
34*5113495bSYour Name #endif
35*5113495bSYour Name #include "dp_internal.h"
36*5113495bSYour Name #include "dp_ipa.h"
37*5113495bSYour Name #ifdef WIFI_MONITOR_SUPPORT
38*5113495bSYour Name #include <dp_mon.h>
39*5113495bSYour Name #endif
40*5113495bSYour Name #ifdef FEATURE_WDS
41*5113495bSYour Name #include "dp_txrx_wds.h"
42*5113495bSYour Name #endif
43*5113495bSYour Name #include "dp_hist.h"
44*5113495bSYour Name #include "dp_rx_buffer_pool.h"
45*5113495bSYour Name #include "dp_li.h"
46*5113495bSYour Name 
47*5113495bSYour Name static inline
is_sa_da_idx_valid(uint32_t max_ast,qdf_nbuf_t nbuf,struct hal_rx_msdu_metadata msdu_info)48*5113495bSYour Name bool is_sa_da_idx_valid(uint32_t max_ast,
49*5113495bSYour Name 			qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info)
50*5113495bSYour Name {
51*5113495bSYour Name 	if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) ||
52*5113495bSYour Name 	    (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) &&
53*5113495bSYour Name 	     (msdu_info.da_idx > max_ast)))
54*5113495bSYour Name 		return false;
55*5113495bSYour Name 
56*5113495bSYour Name 	return true;
57*5113495bSYour Name }
58*5113495bSYour Name 
59*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
60*5113495bSYour Name #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC)
61*5113495bSYour Name /**
62*5113495bSYour Name  * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check
63*5113495bSYour Name  * @soc: core DP main context
64*5113495bSYour Name  * @txrx_peer: dp peer handler
65*5113495bSYour Name  * @rx_tlv_hdr: start of the rx TLV header
66*5113495bSYour Name  * @nbuf: pkt buffer
67*5113495bSYour Name  *
68*5113495bSYour Name  * Return: bool (true if it is a looped back pkt else false)
69*5113495bSYour Name  */
dp_rx_mec_check_wrapper(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)70*5113495bSYour Name static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
71*5113495bSYour Name 					   struct dp_txrx_peer *txrx_peer,
72*5113495bSYour Name 					   uint8_t *rx_tlv_hdr,
73*5113495bSYour Name 					   qdf_nbuf_t nbuf)
74*5113495bSYour Name {
75*5113495bSYour Name 	return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf);
76*5113495bSYour Name }
77*5113495bSYour Name #else
dp_rx_mec_check_wrapper(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)78*5113495bSYour Name static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
79*5113495bSYour Name 					   struct dp_txrx_peer *txrx_peer,
80*5113495bSYour Name 					   uint8_t *rx_tlv_hdr,
81*5113495bSYour Name 					   qdf_nbuf_t nbuf)
82*5113495bSYour Name {
83*5113495bSYour Name 	return false;
84*5113495bSYour Name }
85*5113495bSYour Name #endif
86*5113495bSYour Name #endif
87*5113495bSYour Name 
88*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLE
89*5113495bSYour Name static bool
dp_rx_intrabss_ucast_check_li(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_txrx_peer * ta_txrx_peer,struct hal_rx_msdu_metadata * msdu_metadata,uint8_t * p_tx_vdev_id)90*5113495bSYour Name dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
91*5113495bSYour Name 			      struct dp_txrx_peer *ta_txrx_peer,
92*5113495bSYour Name 			      struct hal_rx_msdu_metadata *msdu_metadata,
93*5113495bSYour Name 			      uint8_t *p_tx_vdev_id)
94*5113495bSYour Name {
95*5113495bSYour Name 	uint16_t da_peer_id;
96*5113495bSYour Name 	struct dp_txrx_peer *da_peer;
97*5113495bSYour Name 	struct dp_ast_entry *ast_entry;
98*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
99*5113495bSYour Name 
100*5113495bSYour Name 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
101*5113495bSYour Name 		return false;
102*5113495bSYour Name 
103*5113495bSYour Name 	ast_entry = soc->ast_table[msdu_metadata->da_idx];
104*5113495bSYour Name 	if (!ast_entry)
105*5113495bSYour Name 		return false;
106*5113495bSYour Name 
107*5113495bSYour Name 	if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
108*5113495bSYour Name 		ast_entry->is_active = TRUE;
109*5113495bSYour Name 		return false;
110*5113495bSYour Name 	}
111*5113495bSYour Name 
112*5113495bSYour Name 	da_peer_id = ast_entry->peer_id;
113*5113495bSYour Name 	/* TA peer cannot be same as peer(DA) on which AST is present
114*5113495bSYour Name 	 * this indicates a change in topology and that AST entries
115*5113495bSYour Name 	 * are yet to be updated.
116*5113495bSYour Name 	 */
117*5113495bSYour Name 	if (da_peer_id == ta_txrx_peer->peer_id ||
118*5113495bSYour Name 	    da_peer_id == HTT_INVALID_PEER)
119*5113495bSYour Name 		return false;
120*5113495bSYour Name 
121*5113495bSYour Name 	da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id,
122*5113495bSYour Name 					     &txrx_ref_handle, DP_MOD_ID_RX);
123*5113495bSYour Name 	if (!da_peer)
124*5113495bSYour Name 		return false;
125*5113495bSYour Name 
126*5113495bSYour Name 	*p_tx_vdev_id = da_peer->vdev->vdev_id;
127*5113495bSYour Name 	/* If the source or destination peer in the isolation
128*5113495bSYour Name 	 * list then dont forward instead push to bridge stack.
129*5113495bSYour Name 	 */
130*5113495bSYour Name 	if (dp_get_peer_isolation(ta_txrx_peer) ||
131*5113495bSYour Name 	    dp_get_peer_isolation(da_peer) ||
132*5113495bSYour Name 	    da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) {
133*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
134*5113495bSYour Name 		return false;
135*5113495bSYour Name 	}
136*5113495bSYour Name 
137*5113495bSYour Name 	if (da_peer->bss_peer) {
138*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
139*5113495bSYour Name 		return false;
140*5113495bSYour Name 	}
141*5113495bSYour Name 
142*5113495bSYour Name 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
143*5113495bSYour Name 	return true;
144*5113495bSYour Name }
145*5113495bSYour Name 
146*5113495bSYour Name /*
147*5113495bSYour Name  * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic
148*5113495bSYour Name  *
149*5113495bSYour Name  * @soc: core txrx main context
150*5113495bSYour Name  * @ta_txrx_peer	: source peer entry
151*5113495bSYour Name  * @rx_tlv_hdr	: start address of rx tlvs
152*5113495bSYour Name  * @nbuf	: nbuf that has to be intrabss forwarded
153*5113495bSYour Name  *
154*5113495bSYour Name  * Return: bool: true if it is forwarded else false
155*5113495bSYour Name  */
156*5113495bSYour Name static bool
dp_rx_intrabss_fwd_li(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf,struct hal_rx_msdu_metadata msdu_metadata,struct cdp_tid_rx_stats * tid_stats)157*5113495bSYour Name dp_rx_intrabss_fwd_li(struct dp_soc *soc,
158*5113495bSYour Name 		      struct dp_txrx_peer *ta_txrx_peer,
159*5113495bSYour Name 		      uint8_t *rx_tlv_hdr,
160*5113495bSYour Name 		      qdf_nbuf_t nbuf,
161*5113495bSYour Name 		      struct hal_rx_msdu_metadata msdu_metadata,
162*5113495bSYour Name 		      struct cdp_tid_rx_stats *tid_stats)
163*5113495bSYour Name {
164*5113495bSYour Name 	uint8_t tx_vdev_id;
165*5113495bSYour Name 
166*5113495bSYour Name 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
167*5113495bSYour Name 	 * source, then clone the pkt and send the cloned pkt for
168*5113495bSYour Name 	 * intra BSS forwarding and original pkt up the network stack
169*5113495bSYour Name 	 * Note: how do we handle multicast pkts. do we forward
170*5113495bSYour Name 	 * all multicast pkts as is or let a higher layer module
171*5113495bSYour Name 	 * like igmpsnoop decide whether to forward or not with
172*5113495bSYour Name 	 * Mcast enhancement.
173*5113495bSYour Name 	 */
174*5113495bSYour Name 	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer)
175*5113495bSYour Name 		return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr,
176*5113495bSYour Name 					       nbuf, tid_stats, 0);
177*5113495bSYour Name 
178*5113495bSYour Name 	if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr,
179*5113495bSYour Name 					    nbuf))
180*5113495bSYour Name 		return true;
181*5113495bSYour Name 
182*5113495bSYour Name 	if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer,
183*5113495bSYour Name 					  &msdu_metadata, &tx_vdev_id))
184*5113495bSYour Name 		return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id,
185*5113495bSYour Name 						rx_tlv_hdr, nbuf, tid_stats,
186*5113495bSYour Name 						0);
187*5113495bSYour Name 
188*5113495bSYour Name 	return false;
189*5113495bSYour Name }
190*5113495bSYour Name #endif
191*5113495bSYour Name 
dp_rx_process_li(struct dp_intr * int_ctx,hal_ring_handle_t hal_ring_hdl,uint8_t reo_ring_num,uint32_t quota)192*5113495bSYour Name uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
193*5113495bSYour Name 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
194*5113495bSYour Name 			  uint32_t quota)
195*5113495bSYour Name {
196*5113495bSYour Name 	hal_ring_desc_t ring_desc;
197*5113495bSYour Name 	hal_ring_desc_t last_prefetched_hw_desc;
198*5113495bSYour Name 	hal_soc_handle_t hal_soc;
199*5113495bSYour Name 	struct dp_rx_desc *rx_desc = NULL;
200*5113495bSYour Name 	struct dp_rx_desc *last_prefetched_sw_desc = NULL;
201*5113495bSYour Name 	qdf_nbuf_t nbuf, next;
202*5113495bSYour Name 	bool near_full;
203*5113495bSYour Name 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
204*5113495bSYour Name 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
205*5113495bSYour Name 	uint32_t num_pending = 0;
206*5113495bSYour Name 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
207*5113495bSYour Name 	uint16_t msdu_len = 0;
208*5113495bSYour Name 	uint16_t peer_id;
209*5113495bSYour Name 	uint8_t vdev_id;
210*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
211*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
212*5113495bSYour Name 	struct dp_vdev *vdev;
213*5113495bSYour Name 	uint32_t pkt_len = 0;
214*5113495bSYour Name 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
215*5113495bSYour Name 	struct hal_rx_msdu_desc_info msdu_desc_info;
216*5113495bSYour Name 	enum hal_reo_error_status error;
217*5113495bSYour Name 	uint32_t peer_mdata;
218*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
219*5113495bSYour Name 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
220*5113495bSYour Name 	uint8_t mac_id = 0;
221*5113495bSYour Name 	struct dp_pdev *rx_pdev;
222*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
223*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
224*5113495bSYour Name 	struct dp_soc *soc = int_ctx->soc;
225*5113495bSYour Name 	struct cdp_tid_rx_stats *tid_stats;
226*5113495bSYour Name 	qdf_nbuf_t nbuf_head;
227*5113495bSYour Name 	qdf_nbuf_t nbuf_tail;
228*5113495bSYour Name 	qdf_nbuf_t deliver_list_head;
229*5113495bSYour Name 	qdf_nbuf_t deliver_list_tail;
230*5113495bSYour Name 	uint32_t num_rx_bufs_reaped = 0;
231*5113495bSYour Name 	uint32_t intr_id;
232*5113495bSYour Name 	struct hif_opaque_softc *scn;
233*5113495bSYour Name 	int32_t tid = 0;
234*5113495bSYour Name 	bool is_prev_msdu_last = true;
235*5113495bSYour Name 	uint32_t rx_ol_pkt_cnt = 0;
236*5113495bSYour Name 	uint32_t num_entries = 0;
237*5113495bSYour Name 	struct hal_rx_msdu_metadata msdu_metadata;
238*5113495bSYour Name 	QDF_STATUS status;
239*5113495bSYour Name 	qdf_nbuf_t ebuf_head;
240*5113495bSYour Name 	qdf_nbuf_t ebuf_tail;
241*5113495bSYour Name 	uint8_t pkt_capture_offload = 0;
242*5113495bSYour Name 	int max_reap_limit;
243*5113495bSYour Name 	uint32_t old_tid;
244*5113495bSYour Name 	uint32_t peer_ext_stats;
245*5113495bSYour Name 	uint32_t dsf;
246*5113495bSYour Name 	uint32_t max_ast;
247*5113495bSYour Name 	uint64_t current_time = 0;
248*5113495bSYour Name 	uint16_t buf_size;
249*5113495bSYour Name 
250*5113495bSYour Name 	DP_HIST_INIT();
251*5113495bSYour Name 
252*5113495bSYour Name 	qdf_assert_always(soc && hal_ring_hdl);
253*5113495bSYour Name 	hal_soc = soc->hal_soc;
254*5113495bSYour Name 	qdf_assert_always(hal_soc);
255*5113495bSYour Name 
256*5113495bSYour Name 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
257*5113495bSYour Name 
258*5113495bSYour Name 	scn = soc->hif_handle;
259*5113495bSYour Name 	intr_id = int_ctx->dp_intr_id;
260*5113495bSYour Name 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
261*5113495bSYour Name 	dp_runtime_pm_mark_last_busy(soc);
262*5113495bSYour Name 
263*5113495bSYour Name more_data:
264*5113495bSYour Name 	/* reset local variables here to be re-used in the function */
265*5113495bSYour Name 	nbuf_head = NULL;
266*5113495bSYour Name 	nbuf_tail = NULL;
267*5113495bSYour Name 	deliver_list_head = NULL;
268*5113495bSYour Name 	deliver_list_tail = NULL;
269*5113495bSYour Name 	txrx_peer = NULL;
270*5113495bSYour Name 	vdev = NULL;
271*5113495bSYour Name 	num_rx_bufs_reaped = 0;
272*5113495bSYour Name 	ebuf_head = NULL;
273*5113495bSYour Name 	ebuf_tail = NULL;
274*5113495bSYour Name 	max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
275*5113495bSYour Name 
276*5113495bSYour Name 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
277*5113495bSYour Name 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
278*5113495bSYour Name 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
279*5113495bSYour Name 	qdf_mem_zero(head, sizeof(head));
280*5113495bSYour Name 	qdf_mem_zero(tail, sizeof(tail));
281*5113495bSYour Name 	old_tid = 0xff;
282*5113495bSYour Name 	dsf = 0;
283*5113495bSYour Name 	peer_ext_stats = 0;
284*5113495bSYour Name 	max_ast = 0;
285*5113495bSYour Name 	rx_pdev = NULL;
286*5113495bSYour Name 	tid_stats = NULL;
287*5113495bSYour Name 
288*5113495bSYour Name 	dp_pkt_get_timestamp(&current_time);
289*5113495bSYour Name 
290*5113495bSYour Name 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
291*5113495bSYour Name 		/*
292*5113495bSYour Name 		 * Need API to convert from hal_ring pointer to
293*5113495bSYour Name 		 * Ring Type / Ring Id combo
294*5113495bSYour Name 		 */
295*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
296*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
297*5113495bSYour Name 			  FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
298*5113495bSYour Name 		goto done;
299*5113495bSYour Name 	}
300*5113495bSYour Name 
301*5113495bSYour Name 	if (!num_pending)
302*5113495bSYour Name 		num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
303*5113495bSYour Name 
304*5113495bSYour Name 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
305*5113495bSYour Name 
306*5113495bSYour Name 	if (num_pending > quota)
307*5113495bSYour Name 		num_pending = quota;
308*5113495bSYour Name 
309*5113495bSYour Name 	last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
310*5113495bSYour Name 						       num_pending);
311*5113495bSYour Name 
312*5113495bSYour Name 	peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
313*5113495bSYour Name 	max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
314*5113495bSYour Name 	/*
315*5113495bSYour Name 	 * start reaping the buffers from reo ring and queue
316*5113495bSYour Name 	 * them in per vdev queue.
317*5113495bSYour Name 	 * Process the received pkts in a different per vdev loop.
318*5113495bSYour Name 	 */
319*5113495bSYour Name 	while (qdf_likely(num_pending)) {
320*5113495bSYour Name 		ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
321*5113495bSYour Name 
322*5113495bSYour Name 		if (qdf_unlikely(!ring_desc))
323*5113495bSYour Name 			break;
324*5113495bSYour Name 
325*5113495bSYour Name 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
326*5113495bSYour Name 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
327*5113495bSYour Name 			dp_rx_err("%pK: HAL RING 0x%pK:error %d",
328*5113495bSYour Name 				  soc, hal_ring_hdl, error);
329*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
330*5113495bSYour Name 				     1);
331*5113495bSYour Name 			/* Don't know how to deal with this -- assert */
332*5113495bSYour Name 			qdf_assert(0);
333*5113495bSYour Name 		}
334*5113495bSYour Name 
335*5113495bSYour Name 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
336*5113495bSYour Name 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
337*5113495bSYour Name 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
338*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
339*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
340*5113495bSYour Name 			break;
341*5113495bSYour Name 		}
342*5113495bSYour Name 
343*5113495bSYour Name 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
344*5113495bSYour Name 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
345*5113495bSYour Name 					   ring_desc, rx_desc);
346*5113495bSYour Name 		if (QDF_IS_STATUS_ERROR(status)) {
347*5113495bSYour Name 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
348*5113495bSYour Name 				qdf_assert_always(!rx_desc->unmapped);
349*5113495bSYour Name 				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
350*5113495bSYour Name 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
351*5113495bSYour Name 							    rx_desc->pool_id);
352*5113495bSYour Name 				dp_rx_add_to_free_desc_list(
353*5113495bSYour Name 							&head[rx_desc->pool_id],
354*5113495bSYour Name 							&tail[rx_desc->pool_id],
355*5113495bSYour Name 							rx_desc);
356*5113495bSYour Name 			}
357*5113495bSYour Name 			continue;
358*5113495bSYour Name 		}
359*5113495bSYour Name 
360*5113495bSYour Name 		/*
361*5113495bSYour Name 		 * this is a unlikely scenario where the host is reaping
362*5113495bSYour Name 		 * a descriptor which it already reaped just a while ago
363*5113495bSYour Name 		 * but is yet to replenish it back to HW.
364*5113495bSYour Name 		 * In this case host will dump the last 128 descriptors
365*5113495bSYour Name 		 * including the software descriptor rx_desc and assert.
366*5113495bSYour Name 		 */
367*5113495bSYour Name 
368*5113495bSYour Name 		if (qdf_unlikely(!rx_desc->in_use)) {
369*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
370*5113495bSYour Name 			dp_info_rl("Reaping rx_desc not in use!");
371*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
372*5113495bSYour Name 						   ring_desc, rx_desc);
373*5113495bSYour Name 			/* ignore duplicate RX desc and continue to process */
374*5113495bSYour Name 			/* Pop out the descriptor */
375*5113495bSYour Name 			continue;
376*5113495bSYour Name 		}
377*5113495bSYour Name 
378*5113495bSYour Name 		status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
379*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
380*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
381*5113495bSYour Name 			dp_info_rl("Nbuf sanity check failure!");
382*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
383*5113495bSYour Name 						   ring_desc, rx_desc);
384*5113495bSYour Name 			rx_desc->in_err_state = 1;
385*5113495bSYour Name 			continue;
386*5113495bSYour Name 		}
387*5113495bSYour Name 
388*5113495bSYour Name 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
389*5113495bSYour Name 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
390*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
391*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
392*5113495bSYour Name 						   ring_desc, rx_desc);
393*5113495bSYour Name 		}
394*5113495bSYour Name 
395*5113495bSYour Name 		/* Get MPDU DESC info */
396*5113495bSYour Name 		hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info);
397*5113495bSYour Name 
398*5113495bSYour Name 		/* Get MSDU DESC info */
399*5113495bSYour Name 		hal_rx_msdu_desc_info_get_li(ring_desc, &msdu_desc_info);
400*5113495bSYour Name 
401*5113495bSYour Name 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
402*5113495bSYour Name 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
403*5113495bSYour Name 			/* previous msdu has end bit set, so current one is
404*5113495bSYour Name 			 * the new MPDU
405*5113495bSYour Name 			 */
406*5113495bSYour Name 			if (is_prev_msdu_last) {
407*5113495bSYour Name 				/* For new MPDU check if we can read complete
408*5113495bSYour Name 				 * MPDU by comparing the number of buffers
409*5113495bSYour Name 				 * available and number of buffers needed to
410*5113495bSYour Name 				 * reap this MPDU
411*5113495bSYour Name 				 */
412*5113495bSYour Name 				if ((msdu_desc_info.msdu_len /
413*5113495bSYour Name 				     (buf_size -
414*5113495bSYour Name 				      soc->rx_pkt_tlv_size) + 1) >
415*5113495bSYour Name 				    num_pending) {
416*5113495bSYour Name 					DP_STATS_INC(soc,
417*5113495bSYour Name 						     rx.msdu_scatter_wait_break,
418*5113495bSYour Name 						     1);
419*5113495bSYour Name 					dp_rx_cookie_reset_invalid_bit(
420*5113495bSYour Name 								     ring_desc);
421*5113495bSYour Name 					/* As we are going to break out of the
422*5113495bSYour Name 					 * loop because of unavailability of
423*5113495bSYour Name 					 * descs to form complete SG, we need to
424*5113495bSYour Name 					 * reset the TP in the REO destination
425*5113495bSYour Name 					 * ring.
426*5113495bSYour Name 					 */
427*5113495bSYour Name 					hal_srng_dst_dec_tp(hal_soc,
428*5113495bSYour Name 							    hal_ring_hdl);
429*5113495bSYour Name 					break;
430*5113495bSYour Name 				}
431*5113495bSYour Name 				is_prev_msdu_last = false;
432*5113495bSYour Name 			}
433*5113495bSYour Name 		}
434*5113495bSYour Name 
435*5113495bSYour Name 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
436*5113495bSYour Name 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
437*5113495bSYour Name 
438*5113495bSYour Name 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
439*5113495bSYour Name 				 HAL_MPDU_F_RAW_AMPDU))
440*5113495bSYour Name 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
441*5113495bSYour Name 
442*5113495bSYour Name 		if (!is_prev_msdu_last &&
443*5113495bSYour Name 		    !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION))
444*5113495bSYour Name 			is_prev_msdu_last = true;
445*5113495bSYour Name 
446*5113495bSYour Name 		rx_bufs_reaped[rx_desc->pool_id]++;
447*5113495bSYour Name 		peer_mdata = mpdu_desc_info.peer_meta_data;
448*5113495bSYour Name 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
449*5113495bSYour Name 			dp_rx_peer_metadata_peer_id_get_li(soc, peer_mdata);
450*5113495bSYour Name 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
451*5113495bSYour Name 			DP_PEER_METADATA_VDEV_ID_GET_LI(peer_mdata);
452*5113495bSYour Name 
453*5113495bSYour Name 		/* to indicate whether this msdu is rx offload */
454*5113495bSYour Name 		pkt_capture_offload =
455*5113495bSYour Name 			DP_PEER_METADATA_OFFLOAD_GET_LI(peer_mdata);
456*5113495bSYour Name 
457*5113495bSYour Name 		/*
458*5113495bSYour Name 		 * save msdu flags first, last and continuation msdu in
459*5113495bSYour Name 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
460*5113495bSYour Name 		 * length to nbuf->cb. This ensures the info required for
461*5113495bSYour Name 		 * per pkt processing is always in the same cache line.
462*5113495bSYour Name 		 * This helps in improving throughput for smaller pkt
463*5113495bSYour Name 		 * sizes.
464*5113495bSYour Name 		 */
465*5113495bSYour Name 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
466*5113495bSYour Name 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
467*5113495bSYour Name 
468*5113495bSYour Name 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
469*5113495bSYour Name 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
470*5113495bSYour Name 
471*5113495bSYour Name 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
472*5113495bSYour Name 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
473*5113495bSYour Name 
474*5113495bSYour Name 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
475*5113495bSYour Name 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
476*5113495bSYour Name 
477*5113495bSYour Name 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
478*5113495bSYour Name 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
479*5113495bSYour Name 
480*5113495bSYour Name 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
481*5113495bSYour Name 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
482*5113495bSYour Name 
483*5113495bSYour Name 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
484*5113495bSYour Name 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
485*5113495bSYour Name 
486*5113495bSYour Name 		/* set reo dest indication */
487*5113495bSYour Name 		qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
488*5113495bSYour Name 				rx_desc->nbuf,
489*5113495bSYour Name 				HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc));
490*5113495bSYour Name 
491*5113495bSYour Name 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
492*5113495bSYour Name 
493*5113495bSYour Name 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
494*5113495bSYour Name 
495*5113495bSYour Name 		/*
496*5113495bSYour Name 		 * move unmap after scattered msdu waiting break logic
497*5113495bSYour Name 		 * in case double skb unmap happened.
498*5113495bSYour Name 		 */
499*5113495bSYour Name 		dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
500*5113495bSYour Name 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
501*5113495bSYour Name 				   ebuf_tail, rx_desc);
502*5113495bSYour Name 
503*5113495bSYour Name 		quota -= 1;
504*5113495bSYour Name 		num_pending -= 1;
505*5113495bSYour Name 
506*5113495bSYour Name 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
507*5113495bSYour Name 					    &tail[rx_desc->pool_id], rx_desc);
508*5113495bSYour Name 		num_rx_bufs_reaped++;
509*5113495bSYour Name 
510*5113495bSYour Name 		dp_rx_prefetch_hw_sw_nbuf_desc(soc, hal_soc, num_pending,
511*5113495bSYour Name 					       hal_ring_hdl,
512*5113495bSYour Name 					       &last_prefetched_hw_desc,
513*5113495bSYour Name 					       &last_prefetched_sw_desc);
514*5113495bSYour Name 
515*5113495bSYour Name 		/*
516*5113495bSYour Name 		 * only if complete msdu is received for scatter case,
517*5113495bSYour Name 		 * then allow break.
518*5113495bSYour Name 		 */
519*5113495bSYour Name 		if (is_prev_msdu_last &&
520*5113495bSYour Name 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
521*5113495bSYour Name 						  max_reap_limit))
522*5113495bSYour Name 			break;
523*5113495bSYour Name 	}
524*5113495bSYour Name done:
525*5113495bSYour Name 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
526*5113495bSYour Name 
527*5113495bSYour Name 	dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
528*5113495bSYour Name 
529*5113495bSYour Name 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
530*5113495bSYour Name 		/*
531*5113495bSYour Name 		 * continue with next mac_id if no pkts were reaped
532*5113495bSYour Name 		 * from that pool
533*5113495bSYour Name 		 */
534*5113495bSYour Name 		if (!rx_bufs_reaped[mac_id])
535*5113495bSYour Name 			continue;
536*5113495bSYour Name 
537*5113495bSYour Name 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
538*5113495bSYour Name 
539*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
540*5113495bSYour Name 
541*5113495bSYour Name 		dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng,
542*5113495bSYour Name 					       rx_desc_pool,
543*5113495bSYour Name 					       rx_bufs_reaped[mac_id],
544*5113495bSYour Name 					       &head[mac_id], &tail[mac_id]);
545*5113495bSYour Name 	}
546*5113495bSYour Name 
547*5113495bSYour Name 	dp_verbose_debug("replenished %u", rx_bufs_reaped[0]);
548*5113495bSYour Name 	/* Peer can be NULL is case of LFR */
549*5113495bSYour Name 	if (qdf_likely(txrx_peer))
550*5113495bSYour Name 		vdev = NULL;
551*5113495bSYour Name 
552*5113495bSYour Name 	/*
553*5113495bSYour Name 	 * BIG loop where each nbuf is dequeued from global queue,
554*5113495bSYour Name 	 * processed and queued back on a per vdev basis. These nbufs
555*5113495bSYour Name 	 * are sent to stack as and when we run out of nbufs
556*5113495bSYour Name 	 * or a new nbuf dequeued from global queue has a different
557*5113495bSYour Name 	 * vdev when compared to previous nbuf.
558*5113495bSYour Name 	 */
559*5113495bSYour Name 	nbuf = nbuf_head;
560*5113495bSYour Name 	while (nbuf) {
561*5113495bSYour Name 		next = nbuf->next;
562*5113495bSYour Name 		dp_rx_prefetch_nbuf_data(nbuf, next);
563*5113495bSYour Name 
564*5113495bSYour Name 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
565*5113495bSYour Name 			nbuf = next;
566*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
567*5113495bSYour Name 			continue;
568*5113495bSYour Name 		}
569*5113495bSYour Name 
570*5113495bSYour Name 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
571*5113495bSYour Name 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
572*5113495bSYour Name 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
573*5113495bSYour Name 
574*5113495bSYour Name 		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
575*5113495bSYour Name 					peer_id, vdev_id)) {
576*5113495bSYour Name 			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
577*5113495bSYour Name 					       deliver_list_head,
578*5113495bSYour Name 					       deliver_list_tail);
579*5113495bSYour Name 			deliver_list_head = NULL;
580*5113495bSYour Name 			deliver_list_tail = NULL;
581*5113495bSYour Name 		}
582*5113495bSYour Name 
583*5113495bSYour Name 		/* Get TID from struct cb->tid_val, save to tid */
584*5113495bSYour Name 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
585*5113495bSYour Name 			tid = qdf_nbuf_get_tid_val(nbuf);
586*5113495bSYour Name 
587*5113495bSYour Name 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) {
588*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
589*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
590*5113495bSYour Name 			nbuf = next;
591*5113495bSYour Name 			continue;
592*5113495bSYour Name 		}
593*5113495bSYour Name 
594*5113495bSYour Name 		if (qdf_unlikely(!txrx_peer)) {
595*5113495bSYour Name 			txrx_peer =
596*5113495bSYour Name 			dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id,
597*5113495bSYour Name 						     &txrx_ref_handle,
598*5113495bSYour Name 						     pkt_capture_offload,
599*5113495bSYour Name 						     &vdev,
600*5113495bSYour Name 						     &rx_pdev, &dsf,
601*5113495bSYour Name 						     &old_tid);
602*5113495bSYour Name 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
603*5113495bSYour Name 				nbuf = next;
604*5113495bSYour Name 				continue;
605*5113495bSYour Name 			}
606*5113495bSYour Name 		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
607*5113495bSYour Name 			dp_txrx_peer_unref_delete(txrx_ref_handle,
608*5113495bSYour Name 						  DP_MOD_ID_RX);
609*5113495bSYour Name 
610*5113495bSYour Name 			txrx_peer =
611*5113495bSYour Name 			dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id,
612*5113495bSYour Name 						     &txrx_ref_handle,
613*5113495bSYour Name 						     pkt_capture_offload,
614*5113495bSYour Name 						     &vdev,
615*5113495bSYour Name 						     &rx_pdev, &dsf,
616*5113495bSYour Name 						     &old_tid);
617*5113495bSYour Name 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
618*5113495bSYour Name 				nbuf = next;
619*5113495bSYour Name 				continue;
620*5113495bSYour Name 			}
621*5113495bSYour Name 		}
622*5113495bSYour Name 
623*5113495bSYour Name 		if (txrx_peer) {
624*5113495bSYour Name 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
625*5113495bSYour Name 			qdf_dp_trace_set_track(nbuf, QDF_RX);
626*5113495bSYour Name 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
627*5113495bSYour Name 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
628*5113495bSYour Name 				QDF_NBUF_RX_PKT_DATA_TRACK;
629*5113495bSYour Name 		}
630*5113495bSYour Name 
631*5113495bSYour Name 		rx_bufs_used++;
632*5113495bSYour Name 
633*5113495bSYour Name 		/* when hlos tid override is enabled, save tid in
634*5113495bSYour Name 		 * skb->priority
635*5113495bSYour Name 		 */
636*5113495bSYour Name 		if (qdf_unlikely(vdev->skip_sw_tid_classification &
637*5113495bSYour Name 					DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
638*5113495bSYour Name 			qdf_nbuf_set_priority(nbuf, tid);
639*5113495bSYour Name 
640*5113495bSYour Name 		DP_RX_TID_SAVE(nbuf, tid);
641*5113495bSYour Name 		if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
642*5113495bSYour Name 		    dp_rx_pkt_tracepoints_enabled())
643*5113495bSYour Name 			qdf_nbuf_set_timestamp(nbuf);
644*5113495bSYour Name 
645*5113495bSYour Name 		if (qdf_likely(old_tid != tid)) {
646*5113495bSYour Name 			tid_stats =
647*5113495bSYour Name 		&rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
648*5113495bSYour Name 			old_tid = tid;
649*5113495bSYour Name 		}
650*5113495bSYour Name 
651*5113495bSYour Name 		/*
652*5113495bSYour Name 		 * Check if DMA completed -- msdu_done is the last bit
653*5113495bSYour Name 		 * to be written
654*5113495bSYour Name 		 */
655*5113495bSYour Name 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(nbuf))) {
656*5113495bSYour Name 			if (qdf_unlikely(!hal_rx_attn_msdu_done_get_li(
657*5113495bSYour Name 								 rx_tlv_hdr))) {
658*5113495bSYour Name 				dp_err_rl("MSDU DONE failure");
659*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
660*5113495bSYour Name 				hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
661*5113495bSYour Name 						     QDF_TRACE_LEVEL_INFO);
662*5113495bSYour Name 				tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
663*5113495bSYour Name 				qdf_assert(0);
664*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
665*5113495bSYour Name 				nbuf = next;
666*5113495bSYour Name 				continue;
667*5113495bSYour Name 			} else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li(
668*5113495bSYour Name 								 rx_tlv_hdr))) {
669*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.msdu_len_err, 1);
670*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
671*5113495bSYour Name 				nbuf = next;
672*5113495bSYour Name 				continue;
673*5113495bSYour Name 			}
674*5113495bSYour Name 		}
675*5113495bSYour Name 
676*5113495bSYour Name 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
677*5113495bSYour Name 		/*
678*5113495bSYour Name 		 * First IF condition:
679*5113495bSYour Name 		 * 802.11 Fragmented pkts are reinjected to REO
680*5113495bSYour Name 		 * HW block as SG pkts and for these pkts we only
681*5113495bSYour Name 		 * need to pull the RX TLVS header length.
682*5113495bSYour Name 		 * Second IF condition:
683*5113495bSYour Name 		 * The below condition happens when an MSDU is spread
684*5113495bSYour Name 		 * across multiple buffers. This can happen in two cases
685*5113495bSYour Name 		 * 1. The nbuf size is smaller then the received msdu.
686*5113495bSYour Name 		 *    ex: we have set the nbuf size to 2048 during
687*5113495bSYour Name 		 *        nbuf_alloc. but we received an msdu which is
688*5113495bSYour Name 		 *        2304 bytes in size then this msdu is spread
689*5113495bSYour Name 		 *        across 2 nbufs.
690*5113495bSYour Name 		 *
691*5113495bSYour Name 		 * 2. AMSDUs when RAW mode is enabled.
692*5113495bSYour Name 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
693*5113495bSYour Name 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
694*5113495bSYour Name 		 *        spread across 2nd nbuf and 3rd nbuf.
695*5113495bSYour Name 		 *
696*5113495bSYour Name 		 * for these scenarios let us create a skb frag_list and
697*5113495bSYour Name 		 * append these buffers till the last MSDU of the AMSDU
698*5113495bSYour Name 		 * Third condition:
699*5113495bSYour Name 		 * This is the most likely case, we receive 802.3 pkts
700*5113495bSYour Name 		 * decapsulated by HW, here we need to set the pkt length.
701*5113495bSYour Name 		 */
702*5113495bSYour Name 		hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
703*5113495bSYour Name 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
704*5113495bSYour Name 			bool is_mcbc, is_sa_vld, is_da_vld;
705*5113495bSYour Name 
706*5113495bSYour Name 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
707*5113495bSYour Name 								 rx_tlv_hdr);
708*5113495bSYour Name 			is_sa_vld =
709*5113495bSYour Name 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
710*5113495bSYour Name 								rx_tlv_hdr);
711*5113495bSYour Name 			is_da_vld =
712*5113495bSYour Name 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
713*5113495bSYour Name 								rx_tlv_hdr);
714*5113495bSYour Name 
715*5113495bSYour Name 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
716*5113495bSYour Name 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
717*5113495bSYour Name 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
718*5113495bSYour Name 
719*5113495bSYour Name 			qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
720*5113495bSYour Name 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
721*5113495bSYour Name 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
722*5113495bSYour Name 			nbuf = dp_rx_sg_create(soc, nbuf);
723*5113495bSYour Name 			next = nbuf->next;
724*5113495bSYour Name 
725*5113495bSYour Name 			if (qdf_nbuf_is_raw_frame(nbuf)) {
726*5113495bSYour Name 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
727*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
728*5113495bSYour Name 							      rx.raw, 1,
729*5113495bSYour Name 							      msdu_len,
730*5113495bSYour Name 							      0);
731*5113495bSYour Name 			} else {
732*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
733*5113495bSYour Name 
734*5113495bSYour Name 				if (!dp_rx_is_sg_supported()) {
735*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
736*5113495bSYour Name 					dp_info_rl("sg msdu len %d, dropped",
737*5113495bSYour Name 						   msdu_len);
738*5113495bSYour Name 					nbuf = next;
739*5113495bSYour Name 					continue;
740*5113495bSYour Name 				}
741*5113495bSYour Name 			}
742*5113495bSYour Name 		} else {
743*5113495bSYour Name 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
744*5113495bSYour Name 			pkt_len = msdu_len +
745*5113495bSYour Name 				  msdu_metadata.l3_hdr_pad +
746*5113495bSYour Name 				  soc->rx_pkt_tlv_size;
747*5113495bSYour Name 
748*5113495bSYour Name 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
749*5113495bSYour Name 			dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad);
750*5113495bSYour Name 		}
751*5113495bSYour Name 
752*5113495bSYour Name 		dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
753*5113495bSYour Name 
754*5113495bSYour Name 		/*
755*5113495bSYour Name 		 * process frame for mulitpass phrase processing
756*5113495bSYour Name 		 */
757*5113495bSYour Name 		if (qdf_unlikely(vdev->multipass_en)) {
758*5113495bSYour Name 			if (dp_rx_multipass_process(txrx_peer, nbuf,
759*5113495bSYour Name 						    tid) == false) {
760*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
761*5113495bSYour Name 							  rx.multipass_rx_pkt_drop,
762*5113495bSYour Name 							  1, 0);
763*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
764*5113495bSYour Name 				nbuf = next;
765*5113495bSYour Name 				continue;
766*5113495bSYour Name 			}
767*5113495bSYour Name 		}
768*5113495bSYour Name 
769*5113495bSYour Name 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
770*5113495bSYour Name 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
771*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC(txrx_peer,
772*5113495bSYour Name 						  rx.policy_check_drop,
773*5113495bSYour Name 						  1, 0);
774*5113495bSYour Name 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
775*5113495bSYour Name 			/* Drop & free packet */
776*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
777*5113495bSYour Name 			/* Statistics */
778*5113495bSYour Name 			nbuf = next;
779*5113495bSYour Name 			continue;
780*5113495bSYour Name 		}
781*5113495bSYour Name 
782*5113495bSYour Name 		if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) &&
783*5113495bSYour Name 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
784*5113495bSYour Name 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
785*5113495bSYour Name 								rx_tlv_hdr) ==
786*5113495bSYour Name 				  false))) {
787*5113495bSYour Name 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
788*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC(txrx_peer,
789*5113495bSYour Name 						  rx.nawds_mcast_drop,
790*5113495bSYour Name 						  1, 0);
791*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
792*5113495bSYour Name 			nbuf = next;
793*5113495bSYour Name 			continue;
794*5113495bSYour Name 		}
795*5113495bSYour Name 
796*5113495bSYour Name 		/*
797*5113495bSYour Name 		 * Drop non-EAPOL frames from unauthorized peer.
798*5113495bSYour Name 		 */
799*5113495bSYour Name 		if (qdf_likely(txrx_peer) &&
800*5113495bSYour Name 		    qdf_unlikely(!txrx_peer->authorize) &&
801*5113495bSYour Name 		    !qdf_nbuf_is_raw_frame(nbuf)) {
802*5113495bSYour Name 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
803*5113495bSYour Name 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
804*5113495bSYour Name 
805*5113495bSYour Name 			if (!is_eapol) {
806*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
807*5113495bSYour Name 							  rx.peer_unauth_rx_pkt_drop,
808*5113495bSYour Name 							  1, 0);
809*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
810*5113495bSYour Name 				nbuf = next;
811*5113495bSYour Name 				continue;
812*5113495bSYour Name 			}
813*5113495bSYour Name 		}
814*5113495bSYour Name 
815*5113495bSYour Name 		if (soc->process_rx_status)
816*5113495bSYour Name 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
817*5113495bSYour Name 
818*5113495bSYour Name 		/* Update the protocol tag in SKB based on CCE metadata */
819*5113495bSYour Name 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
820*5113495bSYour Name 					  reo_ring_num, false, true);
821*5113495bSYour Name 
822*5113495bSYour Name 		/* Update the flow tag in SKB based on FSE metadata */
823*5113495bSYour Name 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
824*5113495bSYour Name 
825*5113495bSYour Name 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
826*5113495bSYour Name 					reo_ring_num, tid_stats, 0);
827*5113495bSYour Name 
828*5113495bSYour Name 		if (qdf_unlikely(vdev->mesh_vdev)) {
829*5113495bSYour Name 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
830*5113495bSYour Name 					== QDF_STATUS_SUCCESS) {
831*5113495bSYour Name 				dp_rx_info("%pK: mesh pkt filtered", soc);
832*5113495bSYour Name 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
833*5113495bSYour Name 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
834*5113495bSYour Name 					     1);
835*5113495bSYour Name 
836*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
837*5113495bSYour Name 				nbuf = next;
838*5113495bSYour Name 				continue;
839*5113495bSYour Name 			}
840*5113495bSYour Name 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
841*5113495bSYour Name 					      txrx_peer);
842*5113495bSYour Name 		}
843*5113495bSYour Name 
844*5113495bSYour Name 		if (qdf_likely(vdev->rx_decap_type ==
845*5113495bSYour Name 			       htt_cmn_pkt_type_ethernet) &&
846*5113495bSYour Name 		    qdf_likely(!vdev->mesh_vdev)) {
847*5113495bSYour Name 			/* Due to HW issue, sometimes we see that the sa_idx
848*5113495bSYour Name 			 * and da_idx are invalid with sa_valid and da_valid
849*5113495bSYour Name 			 * bits set
850*5113495bSYour Name 			 *
851*5113495bSYour Name 			 * in this case we also see that value of
852*5113495bSYour Name 			 * sa_sw_peer_id is set as 0
853*5113495bSYour Name 			 *
854*5113495bSYour Name 			 * Drop the packet if sa_idx and da_idx OOB or
855*5113495bSYour Name 			 * sa_sw_peerid is 0
856*5113495bSYour Name 			 */
857*5113495bSYour Name 			if (!is_sa_da_idx_valid(max_ast, nbuf,
858*5113495bSYour Name 						msdu_metadata)) {
859*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
860*5113495bSYour Name 				nbuf = next;
861*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
862*5113495bSYour Name 				continue;
863*5113495bSYour Name 			}
864*5113495bSYour Name 			if (qdf_unlikely(dp_rx_mec_check_wrapper(soc,
865*5113495bSYour Name 								 txrx_peer,
866*5113495bSYour Name 								 rx_tlv_hdr,
867*5113495bSYour Name 								 nbuf))) {
868*5113495bSYour Name 				/* this is a looped back MCBC pkt,drop it */
869*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
870*5113495bSYour Name 							      rx.mec_drop, 1,
871*5113495bSYour Name 							      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
872*5113495bSYour Name 							      0);
873*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
874*5113495bSYour Name 				nbuf = next;
875*5113495bSYour Name 				continue;
876*5113495bSYour Name 			}
877*5113495bSYour Name 			/* WDS Source Port Learning */
878*5113495bSYour Name 			if (qdf_likely(vdev->wds_enabled))
879*5113495bSYour Name 				dp_rx_wds_srcport_learn(soc,
880*5113495bSYour Name 							rx_tlv_hdr,
881*5113495bSYour Name 							txrx_peer,
882*5113495bSYour Name 							nbuf,
883*5113495bSYour Name 							msdu_metadata);
884*5113495bSYour Name 
885*5113495bSYour Name 			/* Intrabss-fwd */
886*5113495bSYour Name 			if (dp_rx_check_ap_bridge(vdev))
887*5113495bSYour Name 				if (dp_rx_intrabss_fwd_li(soc, txrx_peer,
888*5113495bSYour Name 							  rx_tlv_hdr,
889*5113495bSYour Name 							  nbuf,
890*5113495bSYour Name 							  msdu_metadata,
891*5113495bSYour Name 							  tid_stats)) {
892*5113495bSYour Name 					nbuf = next;
893*5113495bSYour Name 					tid_stats->intrabss_cnt++;
894*5113495bSYour Name 					continue; /* Get next desc */
895*5113495bSYour Name 				}
896*5113495bSYour Name 		}
897*5113495bSYour Name 
898*5113495bSYour Name 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
899*5113495bSYour Name 
900*5113495bSYour Name 		dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
901*5113495bSYour Name 							 nbuf);
902*5113495bSYour Name 
903*5113495bSYour Name 		dp_rx_update_stats(soc, nbuf);
904*5113495bSYour Name 
905*5113495bSYour Name 		dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
906*5113495bSYour Name 				     current_time, nbuf);
907*5113495bSYour Name 
908*5113495bSYour Name 		DP_RX_LIST_APPEND(deliver_list_head,
909*5113495bSYour Name 				  deliver_list_tail,
910*5113495bSYour Name 				  nbuf);
911*5113495bSYour Name 		DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1,
912*5113495bSYour Name 					   QDF_NBUF_CB_RX_PKT_LEN(nbuf));
913*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
914*5113495bSYour Name 					      rx.rx_success, 1,
915*5113495bSYour Name 					      QDF_NBUF_CB_RX_PKT_LEN(nbuf), 0);
916*5113495bSYour Name 		if (qdf_unlikely(txrx_peer->in_twt))
917*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
918*5113495bSYour Name 						      rx.to_stack_twt, 1,
919*5113495bSYour Name 						      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
920*5113495bSYour Name 						      0);
921*5113495bSYour Name 
922*5113495bSYour Name 		tid_stats->delivered_to_stack++;
923*5113495bSYour Name 		nbuf = next;
924*5113495bSYour Name 	}
925*5113495bSYour Name 
926*5113495bSYour Name 	DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
927*5113495bSYour Name 			       pkt_capture_offload,
928*5113495bSYour Name 			       deliver_list_head,
929*5113495bSYour Name 			       deliver_list_tail);
930*5113495bSYour Name 
931*5113495bSYour Name 	if (qdf_likely(txrx_peer))
932*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
933*5113495bSYour Name 
934*5113495bSYour Name 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
935*5113495bSYour Name 		if (quota) {
936*5113495bSYour Name 			num_pending =
937*5113495bSYour Name 				dp_rx_srng_get_num_pending(hal_soc,
938*5113495bSYour Name 							   hal_ring_hdl,
939*5113495bSYour Name 							   num_entries,
940*5113495bSYour Name 							   &near_full);
941*5113495bSYour Name 			if (num_pending) {
942*5113495bSYour Name 				DP_STATS_INC(soc, rx.hp_oos2, 1);
943*5113495bSYour Name 
944*5113495bSYour Name 				if (!hif_exec_should_yield(scn, intr_id))
945*5113495bSYour Name 					goto more_data;
946*5113495bSYour Name 
947*5113495bSYour Name 				if (qdf_unlikely(near_full)) {
948*5113495bSYour Name 					DP_STATS_INC(soc, rx.near_full, 1);
949*5113495bSYour Name 					goto more_data;
950*5113495bSYour Name 				}
951*5113495bSYour Name 			}
952*5113495bSYour Name 		}
953*5113495bSYour Name 
954*5113495bSYour Name 		if (vdev && vdev->osif_fisa_flush)
955*5113495bSYour Name 			vdev->osif_fisa_flush(soc, reo_ring_num);
956*5113495bSYour Name 
957*5113495bSYour Name 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
958*5113495bSYour Name 			vdev->osif_gro_flush(vdev->osif_vdev,
959*5113495bSYour Name 					     reo_ring_num);
960*5113495bSYour Name 		}
961*5113495bSYour Name 	}
962*5113495bSYour Name 
963*5113495bSYour Name 	/* Update histogram statistics by looping through pdev's */
964*5113495bSYour Name 	DP_RX_HIST_STATS_PER_PDEV();
965*5113495bSYour Name 
966*5113495bSYour Name 	return rx_bufs_used; /* Assume no scale factor for now */
967*5113495bSYour Name }
968*5113495bSYour Name 
dp_rx_desc_pool_init_li(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)969*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc,
970*5113495bSYour Name 				   struct rx_desc_pool *rx_desc_pool,
971*5113495bSYour Name 				   uint32_t pool_id)
972*5113495bSYour Name {
973*5113495bSYour Name 	return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id);
974*5113495bSYour Name 
975*5113495bSYour Name }
976*5113495bSYour Name 
dp_rx_desc_pool_deinit_li(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)977*5113495bSYour Name void dp_rx_desc_pool_deinit_li(struct dp_soc *soc,
978*5113495bSYour Name 			       struct rx_desc_pool *rx_desc_pool,
979*5113495bSYour Name 			       uint32_t pool_id)
980*5113495bSYour Name {
981*5113495bSYour Name }
982*5113495bSYour Name 
dp_wbm_get_rx_desc_from_hal_desc_li(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)983*5113495bSYour Name QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li(
984*5113495bSYour Name 					struct dp_soc *soc,
985*5113495bSYour Name 					void *ring_desc,
986*5113495bSYour Name 					struct dp_rx_desc **r_rx_desc)
987*5113495bSYour Name {
988*5113495bSYour Name 	struct hal_buf_info buf_info = {0};
989*5113495bSYour Name 	hal_soc_handle_t hal_soc = soc->hal_soc;
990*5113495bSYour Name 
991*5113495bSYour Name 	/* only cookie and rbm will be valid in buf_info */
992*5113495bSYour Name 	hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
993*5113495bSYour Name 				  &buf_info);
994*5113495bSYour Name 
995*5113495bSYour Name 	if (qdf_unlikely(buf_info.rbm !=
996*5113495bSYour Name 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) {
997*5113495bSYour Name 		/* TODO */
998*5113495bSYour Name 		/* Call appropriate handler */
999*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
1000*5113495bSYour Name 		dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm);
1001*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
1002*5113495bSYour Name 	}
1003*5113495bSYour Name 
1004*5113495bSYour Name 	if (!dp_rx_is_sw_cookie_valid(soc, buf_info.sw_cookie)) {
1005*5113495bSYour Name 		dp_rx_err("invalid sw_cookie 0x%x", buf_info.sw_cookie);
1006*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
1007*5113495bSYour Name 	}
1008*5113495bSYour Name 
1009*5113495bSYour Name 	*r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie);
1010*5113495bSYour Name 
1011*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1012*5113495bSYour Name }
1013*5113495bSYour Name 
dp_rx_chain_msdus_li(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint8_t mac_id)1014*5113495bSYour Name bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
1015*5113495bSYour Name 			  uint8_t *rx_tlv_hdr, uint8_t mac_id)
1016*5113495bSYour Name {
1017*5113495bSYour Name 	bool mpdu_done = false;
1018*5113495bSYour Name 	qdf_nbuf_t curr_nbuf = NULL;
1019*5113495bSYour Name 	qdf_nbuf_t tmp_nbuf = NULL;
1020*5113495bSYour Name 
1021*5113495bSYour Name 	/* TODO: Currently only single radio is supported, hence
1022*5113495bSYour Name 	 * pdev hard coded to '0' index
1023*5113495bSYour Name 	 */
1024*5113495bSYour Name 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1025*5113495bSYour Name 
1026*5113495bSYour Name 	if (!dp_pdev) {
1027*5113495bSYour Name 		dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
1028*5113495bSYour Name 		return mpdu_done;
1029*5113495bSYour Name 	}
1030*5113495bSYour Name 	/* if invalid peer SG list has max values free the buffers in list
1031*5113495bSYour Name 	 * and treat current buffer as start of list
1032*5113495bSYour Name 	 *
1033*5113495bSYour Name 	 * current logic to detect the last buffer from attn_tlv is not reliable
1034*5113495bSYour Name 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
1035*5113495bSYour Name 	 * up
1036*5113495bSYour Name 	 */
1037*5113495bSYour Name 	if (!dp_pdev->first_nbuf ||
1038*5113495bSYour Name 	    (dp_pdev->invalid_peer_head_msdu &&
1039*5113495bSYour Name 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
1040*5113495bSYour Name 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
1041*5113495bSYour Name 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1042*5113495bSYour Name 		dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
1043*5113495bSYour Name 						      rx_tlv_hdr);
1044*5113495bSYour Name 		dp_pdev->first_nbuf = true;
1045*5113495bSYour Name 
1046*5113495bSYour Name 		/* If the new nbuf received is the first msdu of the
1047*5113495bSYour Name 		 * amsdu and there are msdus in the invalid peer msdu
1048*5113495bSYour Name 		 * list, then let us free all the msdus of the invalid
1049*5113495bSYour Name 		 * peer msdu list.
1050*5113495bSYour Name 		 * This scenario can happen when we start receiving
1051*5113495bSYour Name 		 * new a-msdu even before the previous a-msdu is completely
1052*5113495bSYour Name 		 * received.
1053*5113495bSYour Name 		 */
1054*5113495bSYour Name 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
1055*5113495bSYour Name 		while (curr_nbuf) {
1056*5113495bSYour Name 			tmp_nbuf = curr_nbuf->next;
1057*5113495bSYour Name 			dp_rx_nbuf_free(curr_nbuf);
1058*5113495bSYour Name 			curr_nbuf = tmp_nbuf;
1059*5113495bSYour Name 		}
1060*5113495bSYour Name 
1061*5113495bSYour Name 		dp_pdev->invalid_peer_head_msdu = NULL;
1062*5113495bSYour Name 		dp_pdev->invalid_peer_tail_msdu = NULL;
1063*5113495bSYour Name 
1064*5113495bSYour Name 		dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
1065*5113495bSYour Name 	}
1066*5113495bSYour Name 
1067*5113495bSYour Name 	if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(soc->hal_soc,
1068*5113495bSYour Name 							    rx_tlv_hdr) &&
1069*5113495bSYour Name 	    hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1070*5113495bSYour Name 		qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1071*5113495bSYour Name 		qdf_assert_always(dp_pdev->first_nbuf);
1072*5113495bSYour Name 		dp_pdev->first_nbuf = false;
1073*5113495bSYour Name 		mpdu_done = true;
1074*5113495bSYour Name 	}
1075*5113495bSYour Name 
1076*5113495bSYour Name 	/*
1077*5113495bSYour Name 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
1078*5113495bSYour Name 	 * should be NULL here, add the checking for debugging purpose
1079*5113495bSYour Name 	 * in case some corner case.
1080*5113495bSYour Name 	 */
1081*5113495bSYour Name 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
1082*5113495bSYour Name 					dp_pdev->invalid_peer_tail_msdu);
1083*5113495bSYour Name 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
1084*5113495bSYour Name 			  dp_pdev->invalid_peer_tail_msdu,
1085*5113495bSYour Name 			  nbuf);
1086*5113495bSYour Name 
1087*5113495bSYour Name 	return mpdu_done;
1088*5113495bSYour Name }
1089*5113495bSYour Name 
dp_rx_replensih_soc_get_li(struct dp_soc * soc,uint8_t chip_id)1090*5113495bSYour Name static struct dp_soc *dp_rx_replensih_soc_get_li(struct dp_soc *soc,
1091*5113495bSYour Name 						 uint8_t chip_id)
1092*5113495bSYour Name {
1093*5113495bSYour Name 	return soc;
1094*5113495bSYour Name }
1095*5113495bSYour Name 
1096*5113495bSYour Name qdf_nbuf_t
dp_rx_wbm_err_reap_desc_li(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint32_t quota,uint32_t * rx_bufs_used)1097*5113495bSYour Name dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
1098*5113495bSYour Name 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota,
1099*5113495bSYour Name 			   uint32_t *rx_bufs_used)
1100*5113495bSYour Name {
1101*5113495bSYour Name 	hal_ring_desc_t ring_desc;
1102*5113495bSYour Name 	hal_soc_handle_t hal_soc;
1103*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
1104*5113495bSYour Name 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
1105*5113495bSYour Name 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
1106*5113495bSYour Name 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
1107*5113495bSYour Name 	uint8_t buf_type;
1108*5113495bSYour Name 	uint8_t mac_id;
1109*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
1110*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
1111*5113495bSYour Name 	qdf_nbuf_t nbuf_head = NULL;
1112*5113495bSYour Name 	qdf_nbuf_t nbuf_tail = NULL;
1113*5113495bSYour Name 	qdf_nbuf_t nbuf;
1114*5113495bSYour Name 	union hal_wbm_err_info_u wbm_err_info = { 0 };
1115*5113495bSYour Name 	uint8_t msdu_continuation = 0;
1116*5113495bSYour Name 	bool process_sg_buf = false;
1117*5113495bSYour Name 	uint32_t wbm_err_src;
1118*5113495bSYour Name 	QDF_STATUS status;
1119*5113495bSYour Name 	struct dp_soc *replenish_soc;
1120*5113495bSYour Name 	uint8_t chip_id = 0;
1121*5113495bSYour Name 	struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
1122*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
1123*5113495bSYour Name 	uint32_t peer_mdata;
1124*5113495bSYour Name 
1125*5113495bSYour Name 	qdf_assert(soc && hal_ring_hdl);
1126*5113495bSYour Name 	hal_soc = soc->hal_soc;
1127*5113495bSYour Name 	qdf_assert(hal_soc);
1128*5113495bSYour Name 
1129*5113495bSYour Name 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1130*5113495bSYour Name 		/* TODO */
1131*5113495bSYour Name 		/*
1132*5113495bSYour Name 		 * Need API to convert from hal_ring pointer to
1133*5113495bSYour Name 		 * Ring Type / Ring Id combo
1134*5113495bSYour Name 		 */
1135*5113495bSYour Name 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
1136*5113495bSYour Name 			      soc, hal_ring_hdl);
1137*5113495bSYour Name 		goto done;
1138*5113495bSYour Name 	}
1139*5113495bSYour Name 
1140*5113495bSYour Name 	while (qdf_likely(quota)) {
1141*5113495bSYour Name 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1142*5113495bSYour Name 		if (qdf_unlikely(!ring_desc))
1143*5113495bSYour Name 			break;
1144*5113495bSYour Name 
1145*5113495bSYour Name 		/* XXX */
1146*5113495bSYour Name 		buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
1147*5113495bSYour Name 
1148*5113495bSYour Name 		if (dp_assert_always_internal_stat(
1149*5113495bSYour Name 				buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF,
1150*5113495bSYour Name 				soc, rx.err.wbm_err_buf_rel_type))
1151*5113495bSYour Name 			continue;
1152*5113495bSYour Name 
1153*5113495bSYour Name 		wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
1154*5113495bSYour Name 		qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
1155*5113495bSYour Name 			   (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
1156*5113495bSYour Name 
1157*5113495bSYour Name 		if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
1158*5113495bSYour Name 								   ring_desc,
1159*5113495bSYour Name 								   &rx_desc)) {
1160*5113495bSYour Name 			dp_rx_err_err("get rx desc from hal_desc failed");
1161*5113495bSYour Name 			continue;
1162*5113495bSYour Name 		}
1163*5113495bSYour Name 
1164*5113495bSYour Name 		if (dp_assert_always_internal_stat(rx_desc, soc,
1165*5113495bSYour Name 						   rx.err.rx_desc_null))
1166*5113495bSYour Name 			continue;
1167*5113495bSYour Name 
1168*5113495bSYour Name 		if (!dp_rx_desc_check_magic(rx_desc)) {
1169*5113495bSYour Name 			dp_rx_err_err("%pk: Invalid rx_desc %pk",
1170*5113495bSYour Name 				      soc, rx_desc);
1171*5113495bSYour Name 			continue;
1172*5113495bSYour Name 		}
1173*5113495bSYour Name 
1174*5113495bSYour Name 		/*
1175*5113495bSYour Name 		 * this is a unlikely scenario where the host is reaping
1176*5113495bSYour Name 		 * a descriptor which it already reaped just a while ago
1177*5113495bSYour Name 		 * but is yet to replenish it back to HW.
1178*5113495bSYour Name 		 * In this case host will dump the last 128 descriptors
1179*5113495bSYour Name 		 * including the software descriptor rx_desc and assert.
1180*5113495bSYour Name 		 */
1181*5113495bSYour Name 		if (qdf_unlikely(!rx_desc->in_use)) {
1182*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
1183*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1184*5113495bSYour Name 						   ring_desc, rx_desc);
1185*5113495bSYour Name 			continue;
1186*5113495bSYour Name 		}
1187*5113495bSYour Name 
1188*5113495bSYour Name 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info.info_bit,
1189*5113495bSYour Name 					hal_soc);
1190*5113495bSYour Name 		nbuf = rx_desc->nbuf;
1191*5113495bSYour Name 
1192*5113495bSYour Name 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
1193*5113495bSYour Name 							  ring_desc, rx_desc);
1194*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
1195*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1196*5113495bSYour Name 			dp_info_rl("Rx error Nbuf %pk sanity check failure!",
1197*5113495bSYour Name 				   nbuf);
1198*5113495bSYour Name 			rx_desc->in_err_state = 1;
1199*5113495bSYour Name 			continue;
1200*5113495bSYour Name 		}
1201*5113495bSYour Name 
1202*5113495bSYour Name 		/* Update peer_id in nbuf cb */
1203*5113495bSYour Name 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
1204*5113495bSYour Name 		peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
1205*5113495bSYour Name 							   rx_tlv_hdr);
1206*5113495bSYour Name 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
1207*5113495bSYour Name 			dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
1208*5113495bSYour Name 
1209*5113495bSYour Name 		/* Get MPDU DESC info */
1210*5113495bSYour Name 		hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
1211*5113495bSYour Name 
1212*5113495bSYour Name 		if (qdf_likely(mpdu_desc_info.mpdu_flags &
1213*5113495bSYour Name 			       HAL_MPDU_F_QOS_CONTROL_VALID))
1214*5113495bSYour Name 			qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
1215*5113495bSYour Name 
1216*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1217*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1218*5113495bSYour Name 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1219*5113495bSYour Name 		rx_desc->unmapped = 1;
1220*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1221*5113495bSYour Name 
1222*5113495bSYour Name 		if (qdf_unlikely(
1223*5113495bSYour Name 		    soc->wbm_release_desc_rx_sg_support &&
1224*5113495bSYour Name 		    dp_rx_is_sg_formation_required(&wbm_err_info.info_bit))) {
1225*5113495bSYour Name 			/* SG is detected from continuation bit */
1226*5113495bSYour Name 			msdu_continuation =
1227*5113495bSYour Name 				hal_rx_wbm_err_msdu_continuation_get(hal_soc,
1228*5113495bSYour Name 								     ring_desc);
1229*5113495bSYour Name 			if (msdu_continuation &&
1230*5113495bSYour Name 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
1231*5113495bSYour Name 				/* Update length from first buffer in SG */
1232*5113495bSYour Name 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
1233*5113495bSYour Name 					hal_rx_msdu_start_msdu_len_get(
1234*5113495bSYour Name 						soc->hal_soc,
1235*5113495bSYour Name 						qdf_nbuf_data(nbuf));
1236*5113495bSYour Name 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg =
1237*5113495bSYour Name 									true;
1238*5113495bSYour Name 			}
1239*5113495bSYour Name 
1240*5113495bSYour Name 			if (msdu_continuation) {
1241*5113495bSYour Name 				/* MSDU continued packets */
1242*5113495bSYour Name 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1243*5113495bSYour Name 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
1244*5113495bSYour Name 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1245*5113495bSYour Name 			} else {
1246*5113495bSYour Name 				/* This is the terminal packet in SG */
1247*5113495bSYour Name 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1248*5113495bSYour Name 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1249*5113495bSYour Name 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
1250*5113495bSYour Name 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1251*5113495bSYour Name 				process_sg_buf = true;
1252*5113495bSYour Name 			}
1253*5113495bSYour Name 		}
1254*5113495bSYour Name 
1255*5113495bSYour Name 		/*
1256*5113495bSYour Name 		 * save the wbm desc info in nbuf CB/TLV. We will need this
1257*5113495bSYour Name 		 * info when we do the actual nbuf processing
1258*5113495bSYour Name 		 */
1259*5113495bSYour Name 		wbm_err_info.info_bit.pool_id = rx_desc->pool_id;
1260*5113495bSYour Name 		dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err_info);
1261*5113495bSYour Name 
1262*5113495bSYour Name 		rx_bufs_reaped[rx_desc->pool_id]++;
1263*5113495bSYour Name 
1264*5113495bSYour Name 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
1265*5113495bSYour Name 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
1266*5113495bSYour Name 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
1267*5113495bSYour Name 					  nbuf);
1268*5113495bSYour Name 			if (process_sg_buf) {
1269*5113495bSYour Name 				if (!dp_rx_buffer_pool_refill(
1270*5113495bSYour Name 					soc,
1271*5113495bSYour Name 					soc->wbm_sg_param.wbm_sg_nbuf_head,
1272*5113495bSYour Name 					rx_desc->pool_id))
1273*5113495bSYour Name 					DP_RX_MERGE_TWO_LIST(
1274*5113495bSYour Name 					  nbuf_head, nbuf_tail,
1275*5113495bSYour Name 					  soc->wbm_sg_param.wbm_sg_nbuf_head,
1276*5113495bSYour Name 					  soc->wbm_sg_param.wbm_sg_nbuf_tail);
1277*5113495bSYour Name 				dp_rx_wbm_sg_list_last_msdu_war(soc);
1278*5113495bSYour Name 				dp_rx_wbm_sg_list_reset(soc);
1279*5113495bSYour Name 				process_sg_buf = false;
1280*5113495bSYour Name 			}
1281*5113495bSYour Name 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
1282*5113495bSYour Name 						     rx_desc->pool_id)) {
1283*5113495bSYour Name 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
1284*5113495bSYour Name 		}
1285*5113495bSYour Name 
1286*5113495bSYour Name 		dp_rx_add_to_free_desc_list
1287*5113495bSYour Name 			(&head[rx_desc->pool_id],
1288*5113495bSYour Name 			 &tail[rx_desc->pool_id], rx_desc);
1289*5113495bSYour Name 
1290*5113495bSYour Name 		/*
1291*5113495bSYour Name 		 * if continuation bit is set then we have MSDU spread
1292*5113495bSYour Name 		 * across multiple buffers, let us not decrement quota
1293*5113495bSYour Name 		 * till we reap all buffers of that MSDU.
1294*5113495bSYour Name 		 */
1295*5113495bSYour Name 		if (qdf_likely(!msdu_continuation))
1296*5113495bSYour Name 			quota -= 1;
1297*5113495bSYour Name 	}
1298*5113495bSYour Name done:
1299*5113495bSYour Name 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1300*5113495bSYour Name 
1301*5113495bSYour Name 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1302*5113495bSYour Name 		/*
1303*5113495bSYour Name 		 * continue with next mac_id if no pkts were reaped
1304*5113495bSYour Name 		 * from that pool
1305*5113495bSYour Name 		 */
1306*5113495bSYour Name 		if (!rx_bufs_reaped[mac_id])
1307*5113495bSYour Name 			continue;
1308*5113495bSYour Name 
1309*5113495bSYour Name 		replenish_soc =
1310*5113495bSYour Name 		dp_rx_replensih_soc_get_li(soc, chip_id);
1311*5113495bSYour Name 		dp_rxdma_srng =
1312*5113495bSYour Name 			&replenish_soc->rx_refill_buf_ring[mac_id];
1313*5113495bSYour Name 
1314*5113495bSYour Name 		rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
1315*5113495bSYour Name 
1316*5113495bSYour Name 		dp_rx_buffers_replenish_simple(
1317*5113495bSYour Name 					replenish_soc, mac_id,
1318*5113495bSYour Name 					dp_rxdma_srng,
1319*5113495bSYour Name 					rx_desc_pool,
1320*5113495bSYour Name 					rx_bufs_reaped[mac_id],
1321*5113495bSYour Name 					&head[mac_id],
1322*5113495bSYour Name 					&tail[mac_id]);
1323*5113495bSYour Name 		*rx_bufs_used += rx_bufs_reaped[mac_id];
1324*5113495bSYour Name 	}
1325*5113495bSYour Name 	return nbuf_head;
1326*5113495bSYour Name }
1327*5113495bSYour Name 
1328*5113495bSYour Name QDF_STATUS
dp_rx_null_q_desc_handle_li(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint8_t pool_id,struct dp_txrx_peer * txrx_peer,bool is_reo_exception,uint8_t link_id)1329*5113495bSYour Name dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
1330*5113495bSYour Name 			    uint8_t *rx_tlv_hdr, uint8_t pool_id,
1331*5113495bSYour Name 			    struct dp_txrx_peer *txrx_peer,
1332*5113495bSYour Name 			    bool is_reo_exception,
1333*5113495bSYour Name 			    uint8_t link_id)
1334*5113495bSYour Name {
1335*5113495bSYour Name 	uint32_t pkt_len;
1336*5113495bSYour Name 	uint16_t msdu_len;
1337*5113495bSYour Name 	struct dp_vdev *vdev;
1338*5113495bSYour Name 	uint8_t tid;
1339*5113495bSYour Name 	qdf_ether_header_t *eh;
1340*5113495bSYour Name 	struct hal_rx_msdu_metadata msdu_metadata;
1341*5113495bSYour Name 	uint16_t sa_idx = 0;
1342*5113495bSYour Name 	bool is_eapol = 0;
1343*5113495bSYour Name 	bool enh_flag;
1344*5113495bSYour Name 	uint16_t buf_size;
1345*5113495bSYour Name 
1346*5113495bSYour Name 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
1347*5113495bSYour Name 
1348*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_start(
1349*5113495bSYour Name 				nbuf,
1350*5113495bSYour Name 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1351*5113495bSYour Name 							       rx_tlv_hdr));
1352*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1353*5113495bSYour Name 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1354*5113495bSYour Name 								 rx_tlv_hdr));
1355*5113495bSYour Name 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1356*5113495bSYour Name 								  rx_tlv_hdr));
1357*5113495bSYour Name 	qdf_nbuf_set_da_valid(nbuf,
1358*5113495bSYour Name 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1359*5113495bSYour Name 							      rx_tlv_hdr));
1360*5113495bSYour Name 	qdf_nbuf_set_sa_valid(nbuf,
1361*5113495bSYour Name 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1362*5113495bSYour Name 							      rx_tlv_hdr));
1363*5113495bSYour Name 
1364*5113495bSYour Name 	tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
1365*5113495bSYour Name 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1366*5113495bSYour Name 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1367*5113495bSYour Name 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1368*5113495bSYour Name 
1369*5113495bSYour Name 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1370*5113495bSYour Name 		if (dp_rx_check_pkt_len(soc, pkt_len))
1371*5113495bSYour Name 			goto drop_nbuf;
1372*5113495bSYour Name 
1373*5113495bSYour Name 		/* Set length in nbuf */
1374*5113495bSYour Name 		qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size));
1375*5113495bSYour Name 	}
1376*5113495bSYour Name 
1377*5113495bSYour Name 	/*
1378*5113495bSYour Name 	 * Check if DMA completed -- msdu_done is the last bit
1379*5113495bSYour Name 	 * to be written
1380*5113495bSYour Name 	 */
1381*5113495bSYour Name 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1382*5113495bSYour Name 		dp_err_rl("MSDU DONE failure");
1383*5113495bSYour Name 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1384*5113495bSYour Name 				     QDF_TRACE_LEVEL_INFO);
1385*5113495bSYour Name 		qdf_assert(0);
1386*5113495bSYour Name 	}
1387*5113495bSYour Name 
1388*5113495bSYour Name 	if (!txrx_peer &&
1389*5113495bSYour Name 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
1390*5113495bSYour Name 							  rx_tlv_hdr, nbuf))
1391*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1392*5113495bSYour Name 
1393*5113495bSYour Name 	if (!txrx_peer) {
1394*5113495bSYour Name 		bool mpdu_done = false;
1395*5113495bSYour Name 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
1396*5113495bSYour Name 
1397*5113495bSYour Name 		if (!pdev) {
1398*5113495bSYour Name 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
1399*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
1400*5113495bSYour Name 		}
1401*5113495bSYour Name 
1402*5113495bSYour Name 		dp_err_rl("txrx_peer is NULL");
1403*5113495bSYour Name 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1404*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
1405*5113495bSYour Name 
1406*5113495bSYour Name 		/* QCN9000 has the support enabled */
1407*5113495bSYour Name 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
1408*5113495bSYour Name 			mpdu_done = true;
1409*5113495bSYour Name 			nbuf->next = NULL;
1410*5113495bSYour Name 			/* Trigger invalid peer handler wrapper */
1411*5113495bSYour Name 			dp_rx_process_invalid_peer_wrapper(soc,
1412*5113495bSYour Name 							   nbuf,
1413*5113495bSYour Name 							   mpdu_done,
1414*5113495bSYour Name 							   pool_id);
1415*5113495bSYour Name 		} else {
1416*5113495bSYour Name 			mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
1417*5113495bSYour Name 								    rx_tlv_hdr,
1418*5113495bSYour Name 								    pool_id);
1419*5113495bSYour Name 			/* Trigger invalid peer handler wrapper */
1420*5113495bSYour Name 			dp_rx_process_invalid_peer_wrapper(
1421*5113495bSYour Name 					soc,
1422*5113495bSYour Name 					pdev->invalid_peer_head_msdu,
1423*5113495bSYour Name 					mpdu_done, pool_id);
1424*5113495bSYour Name 		}
1425*5113495bSYour Name 
1426*5113495bSYour Name 		if (mpdu_done) {
1427*5113495bSYour Name 			pdev->invalid_peer_head_msdu = NULL;
1428*5113495bSYour Name 			pdev->invalid_peer_tail_msdu = NULL;
1429*5113495bSYour Name 		}
1430*5113495bSYour Name 
1431*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1432*5113495bSYour Name 	}
1433*5113495bSYour Name 
1434*5113495bSYour Name 	vdev = txrx_peer->vdev;
1435*5113495bSYour Name 	if (!vdev) {
1436*5113495bSYour Name 		dp_err_rl("Null vdev!");
1437*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1438*5113495bSYour Name 		goto drop_nbuf;
1439*5113495bSYour Name 	}
1440*5113495bSYour Name 
1441*5113495bSYour Name 	/*
1442*5113495bSYour Name 	 * Advance the packet start pointer by total size of
1443*5113495bSYour Name 	 * pre-header TLV's
1444*5113495bSYour Name 	 */
1445*5113495bSYour Name 	if (qdf_nbuf_is_frag(nbuf))
1446*5113495bSYour Name 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1447*5113495bSYour Name 	else
1448*5113495bSYour Name 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1449*5113495bSYour Name 				   soc->rx_pkt_tlv_size));
1450*5113495bSYour Name 
1451*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
1452*5113495bSYour Name 
1453*5113495bSYour Name 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1454*5113495bSYour Name 
1455*5113495bSYour Name 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
1456*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1,
1457*5113495bSYour Name 					  0);
1458*5113495bSYour Name 		goto drop_nbuf;
1459*5113495bSYour Name 	}
1460*5113495bSYour Name 
1461*5113495bSYour Name 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
1462*5113495bSYour Name 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
1463*5113495bSYour Name 
1464*5113495bSYour Name 		if ((sa_idx < 0) ||
1465*5113495bSYour Name 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
1466*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
1467*5113495bSYour Name 			goto drop_nbuf;
1468*5113495bSYour Name 		}
1469*5113495bSYour Name 	}
1470*5113495bSYour Name 
1471*5113495bSYour Name 	if ((!soc->mec_fw_offload) &&
1472*5113495bSYour Name 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
1473*5113495bSYour Name 		/* this is a looped back MCBC pkt, drop it */
1474*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1475*5113495bSYour Name 					      qdf_nbuf_len(nbuf), 0);
1476*5113495bSYour Name 		goto drop_nbuf;
1477*5113495bSYour Name 	}
1478*5113495bSYour Name 
1479*5113495bSYour Name 	/*
1480*5113495bSYour Name 	 * In qwrap mode if the received packet matches with any of the vdev
1481*5113495bSYour Name 	 * mac addresses, drop it. Donot receive multicast packets originated
1482*5113495bSYour Name 	 * from any proxysta.
1483*5113495bSYour Name 	 */
1484*5113495bSYour Name 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
1485*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
1486*5113495bSYour Name 					      qdf_nbuf_len(nbuf), 0);
1487*5113495bSYour Name 		goto drop_nbuf;
1488*5113495bSYour Name 	}
1489*5113495bSYour Name 
1490*5113495bSYour Name 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
1491*5113495bSYour Name 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1492*5113495bSYour Name 							rx_tlv_hdr))) {
1493*5113495bSYour Name 		dp_err_rl("free buffer for multicast packet");
1494*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1,
1495*5113495bSYour Name 					  0);
1496*5113495bSYour Name 		goto drop_nbuf;
1497*5113495bSYour Name 	}
1498*5113495bSYour Name 
1499*5113495bSYour Name 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
1500*5113495bSYour Name 		dp_err_rl("mcast Policy Check Drop pkt");
1501*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1,
1502*5113495bSYour Name 					  0);
1503*5113495bSYour Name 		goto drop_nbuf;
1504*5113495bSYour Name 	}
1505*5113495bSYour Name 	/* WDS Source Port Learning */
1506*5113495bSYour Name 	if (!soc->ast_offload_support &&
1507*5113495bSYour Name 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
1508*5113495bSYour Name 		       vdev->wds_enabled))
1509*5113495bSYour Name 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
1510*5113495bSYour Name 					msdu_metadata);
1511*5113495bSYour Name 
1512*5113495bSYour Name 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
1513*5113495bSYour Name 		struct dp_peer *peer;
1514*5113495bSYour Name 		struct dp_rx_tid *rx_tid;
1515*5113495bSYour Name 
1516*5113495bSYour Name 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
1517*5113495bSYour Name 					     DP_MOD_ID_RX_ERR);
1518*5113495bSYour Name 		if (peer) {
1519*5113495bSYour Name 			rx_tid = &peer->rx_tid[tid];
1520*5113495bSYour Name 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1521*5113495bSYour Name 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
1522*5113495bSYour Name 			/* For Mesh peer, if on one of the mesh AP the
1523*5113495bSYour Name 			 * mesh peer is not deleted, the new addition of mesh
1524*5113495bSYour Name 			 * peer on other mesh AP doesn't do BA negotiation
1525*5113495bSYour Name 			 * leading to mismatch in BA windows.
1526*5113495bSYour Name 			 * To avoid this send max BA window during init.
1527*5113495bSYour Name 			 */
1528*5113495bSYour Name 				if (qdf_unlikely(vdev->mesh_vdev) ||
1529*5113495bSYour Name 				    qdf_unlikely(txrx_peer->nawds_enabled))
1530*5113495bSYour Name 					dp_rx_tid_setup_wifi3(
1531*5113495bSYour Name 						peer, BIT(tid),
1532*5113495bSYour Name 						hal_get_rx_max_ba_window(soc->hal_soc,tid),
1533*5113495bSYour Name 						IEEE80211_SEQ_MAX);
1534*5113495bSYour Name 				else
1535*5113495bSYour Name 					dp_rx_tid_setup_wifi3(peer, BIT(tid), 1,
1536*5113495bSYour Name 							      IEEE80211_SEQ_MAX);
1537*5113495bSYour Name 			}
1538*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1539*5113495bSYour Name 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
1540*5113495bSYour Name 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1541*5113495bSYour Name 		}
1542*5113495bSYour Name 	}
1543*5113495bSYour Name 
1544*5113495bSYour Name 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1545*5113495bSYour Name 
1546*5113495bSYour Name 	if (!txrx_peer->authorize) {
1547*5113495bSYour Name 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1548*5113495bSYour Name 
1549*5113495bSYour Name 		if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1550*5113495bSYour Name 			if (!dp_rx_err_match_dhost(eh, vdev))
1551*5113495bSYour Name 				goto drop_nbuf;
1552*5113495bSYour Name 		} else {
1553*5113495bSYour Name 			goto drop_nbuf;
1554*5113495bSYour Name 		}
1555*5113495bSYour Name 	}
1556*5113495bSYour Name 
1557*5113495bSYour Name 	/*
1558*5113495bSYour Name 	 * Drop packets in this path if cce_match is found. Packets will come
1559*5113495bSYour Name 	 * in following path depending on whether tidQ is setup.
1560*5113495bSYour Name 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
1561*5113495bSYour Name 	 * cce_match = 1
1562*5113495bSYour Name 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
1563*5113495bSYour Name 	 *    dropped.
1564*5113495bSYour Name 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
1565*5113495bSYour Name 	 * cce_match = 1
1566*5113495bSYour Name 	 *    These packets need to be dropped and should not get delivered
1567*5113495bSYour Name 	 *    to stack.
1568*5113495bSYour Name 	 */
1569*5113495bSYour Name 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr)))
1570*5113495bSYour Name 		goto drop_nbuf;
1571*5113495bSYour Name 
1572*5113495bSYour Name 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1573*5113495bSYour Name 		qdf_nbuf_set_raw_frame(nbuf, 1);
1574*5113495bSYour Name 		qdf_nbuf_set_next(nbuf, NULL);
1575*5113495bSYour Name 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, 0);
1576*5113495bSYour Name 	} else {
1577*5113495bSYour Name 		enh_flag = vdev->pdev->enhanced_stats_en;
1578*5113495bSYour Name 		qdf_nbuf_set_next(nbuf, NULL);
1579*5113495bSYour Name 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1580*5113495bSYour Name 					  enh_flag);
1581*5113495bSYour Name 
1582*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
1583*5113495bSYour Name 					      rx.rx_success, 1,
1584*5113495bSYour Name 					      qdf_nbuf_len(nbuf), 0);
1585*5113495bSYour Name 		/*
1586*5113495bSYour Name 		 * Update the protocol tag in SKB based on
1587*5113495bSYour Name 		 * CCE metadata
1588*5113495bSYour Name 		 */
1589*5113495bSYour Name 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1590*5113495bSYour Name 					  EXCEPTION_DEST_RING_ID,
1591*5113495bSYour Name 					  true, true);
1592*5113495bSYour Name 
1593*5113495bSYour Name 		/* Update the flow tag in SKB based on FSE metadata */
1594*5113495bSYour Name 		dp_rx_update_flow_tag(soc, vdev, nbuf,
1595*5113495bSYour Name 				      rx_tlv_hdr, true);
1596*5113495bSYour Name 
1597*5113495bSYour Name 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
1598*5113495bSYour Name 				 soc->hal_soc, rx_tlv_hdr) &&
1599*5113495bSYour Name 				 (vdev->rx_decap_type ==
1600*5113495bSYour Name 				  htt_cmn_pkt_type_ethernet))) {
1601*5113495bSYour Name 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
1602*5113495bSYour Name 					    enh_flag, 0);
1603*5113495bSYour Name 
1604*5113495bSYour Name 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
1605*5113495bSYour Name 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
1606*5113495bSYour Name 						    qdf_nbuf_len(nbuf),
1607*5113495bSYour Name 						    enh_flag, 0);
1608*5113495bSYour Name 		} else {
1609*5113495bSYour Name 			DP_PEER_UC_INCC_PKT(txrx_peer, 1,
1610*5113495bSYour Name 					    qdf_nbuf_len(nbuf),
1611*5113495bSYour Name 					    enh_flag,
1612*5113495bSYour Name 					    0);
1613*5113495bSYour Name 		}
1614*5113495bSYour Name 
1615*5113495bSYour Name 		qdf_nbuf_set_exc_frame(nbuf, 1);
1616*5113495bSYour Name 
1617*5113495bSYour Name 		if (qdf_unlikely(vdev->multipass_en)) {
1618*5113495bSYour Name 			if (dp_rx_multipass_process(txrx_peer, nbuf,
1619*5113495bSYour Name 						    tid) == false) {
1620*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC
1621*5113495bSYour Name 					(txrx_peer,
1622*5113495bSYour Name 					 rx.multipass_rx_pkt_drop,
1623*5113495bSYour Name 					 1, link_id);
1624*5113495bSYour Name 				goto drop_nbuf;
1625*5113495bSYour Name 			}
1626*5113495bSYour Name 		}
1627*5113495bSYour Name 
1628*5113495bSYour Name 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1629*5113495bSYour Name 					    is_eapol);
1630*5113495bSYour Name 	}
1631*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1632*5113495bSYour Name 
1633*5113495bSYour Name drop_nbuf:
1634*5113495bSYour Name 	dp_rx_nbuf_free(nbuf);
1635*5113495bSYour Name 	return QDF_STATUS_E_FAILURE;
1636*5113495bSYour Name }
1637