xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "cdp_txrx_cmn_struct.h"
21*5113495bSYour Name #include "hal_hw_headers.h"
22*5113495bSYour Name #include "dp_types.h"
23*5113495bSYour Name #include "dp_rx.h"
24*5113495bSYour Name #include "dp_tx.h"
25*5113495bSYour Name #include "dp_be_rx.h"
26*5113495bSYour Name #include "dp_peer.h"
27*5113495bSYour Name #include "hal_rx.h"
28*5113495bSYour Name #include "hal_be_rx.h"
29*5113495bSYour Name #include "hal_api.h"
30*5113495bSYour Name #include "hal_be_api.h"
31*5113495bSYour Name #include "qdf_nbuf.h"
32*5113495bSYour Name #ifdef MESH_MODE_SUPPORT
33*5113495bSYour Name #include "if_meta_hdr.h"
34*5113495bSYour Name #endif
35*5113495bSYour Name #include "dp_internal.h"
36*5113495bSYour Name #include "dp_ipa.h"
37*5113495bSYour Name #ifdef FEATURE_WDS
38*5113495bSYour Name #include "dp_txrx_wds.h"
39*5113495bSYour Name #endif
40*5113495bSYour Name #include "dp_hist.h"
41*5113495bSYour Name #include "dp_rx_buffer_pool.h"
42*5113495bSYour Name 
43*5113495bSYour Name #ifdef WLAN_SUPPORT_RX_FLOW_TAG
44*5113495bSYour Name static inline void
dp_rx_update_flow_info(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)45*5113495bSYour Name dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
46*5113495bSYour Name {
47*5113495bSYour Name 	uint32_t fse_metadata;
48*5113495bSYour Name 
49*5113495bSYour Name 	/* Set the flow idx valid flag only when there is no timeout */
50*5113495bSYour Name 	if (hal_rx_msdu_flow_idx_timeout_be(rx_tlv_hdr))
51*5113495bSYour Name 		return;
52*5113495bSYour Name 
53*5113495bSYour Name 	/*
54*5113495bSYour Name 	 * If invalid bit is not set and the fse metadata indicates that it is
55*5113495bSYour Name 	 * a valid SFE flow match in FSE, do not set the rx flow tag and let it
56*5113495bSYour Name 	 * go via stack instead of VP.
57*5113495bSYour Name 	 */
58*5113495bSYour Name 	fse_metadata = hal_rx_msdu_fse_metadata_get_be(rx_tlv_hdr);
59*5113495bSYour Name 	if (!hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr) && (fse_metadata == DP_RX_FSE_FLOW_MATCH_SFE))
60*5113495bSYour Name 		return;
61*5113495bSYour Name 
62*5113495bSYour Name 	qdf_nbuf_set_rx_flow_idx_valid(nbuf,
63*5113495bSYour Name 				 !hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr));
64*5113495bSYour Name }
65*5113495bSYour Name #else
66*5113495bSYour Name static inline void
dp_rx_update_flow_info(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)67*5113495bSYour Name dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
68*5113495bSYour Name {
69*5113495bSYour Name }
70*5113495bSYour Name #endif
71*5113495bSYour Name 
72*5113495bSYour Name #ifdef DP_RX_MSDU_DONE_FAIL_HISTORY
73*5113495bSYour Name static inline void
dp_rx_msdu_done_fail_event_record(struct dp_soc * soc,struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)74*5113495bSYour Name dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
75*5113495bSYour Name 				  struct dp_rx_desc *rx_desc,
76*5113495bSYour Name 				  qdf_nbuf_t nbuf)
77*5113495bSYour Name {
78*5113495bSYour Name 	struct dp_msdu_done_fail_entry *entry;
79*5113495bSYour Name 	uint32_t idx;
80*5113495bSYour Name 
81*5113495bSYour Name 	if (qdf_unlikely(!soc->msdu_done_fail_hist))
82*5113495bSYour Name 		return;
83*5113495bSYour Name 
84*5113495bSYour Name 	idx = dp_history_get_next_index(&soc->msdu_done_fail_hist->index,
85*5113495bSYour Name 					DP_MSDU_DONE_FAIL_HIST_MAX);
86*5113495bSYour Name 	entry = &soc->msdu_done_fail_hist->entry[idx];
87*5113495bSYour Name 	entry->paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
88*5113495bSYour Name 
89*5113495bSYour Name 	if (rx_desc)
90*5113495bSYour Name 		entry->sw_cookie = rx_desc->cookie;
91*5113495bSYour Name 	else
92*5113495bSYour Name 		entry->sw_cookie = 0xDEAD;
93*5113495bSYour Name }
94*5113495bSYour Name #else
95*5113495bSYour Name static inline void
dp_rx_msdu_done_fail_event_record(struct dp_soc * soc,struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)96*5113495bSYour Name dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
97*5113495bSYour Name 				  struct dp_rx_desc *rx_desc,
98*5113495bSYour Name 				  qdf_nbuf_t nbuf)
99*5113495bSYour Name {
100*5113495bSYour Name }
101*5113495bSYour Name #endif
102*5113495bSYour Name 
103*5113495bSYour Name #ifndef AST_OFFLOAD_ENABLE
104*5113495bSYour Name static void
dp_rx_wds_learn(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf)105*5113495bSYour Name dp_rx_wds_learn(struct dp_soc *soc,
106*5113495bSYour Name 		struct dp_vdev *vdev,
107*5113495bSYour Name 		uint8_t *rx_tlv_hdr,
108*5113495bSYour Name 		struct dp_txrx_peer *txrx_peer,
109*5113495bSYour Name 		qdf_nbuf_t nbuf)
110*5113495bSYour Name {
111*5113495bSYour Name 	struct hal_rx_msdu_metadata msdu_metadata;
112*5113495bSYour Name 
113*5113495bSYour Name 	hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
114*5113495bSYour Name 	/* WDS Source Port Learning */
115*5113495bSYour Name 	if (qdf_likely(vdev->wds_enabled))
116*5113495bSYour Name 		dp_rx_wds_srcport_learn(soc,
117*5113495bSYour Name 				rx_tlv_hdr,
118*5113495bSYour Name 				txrx_peer,
119*5113495bSYour Name 				nbuf,
120*5113495bSYour Name 				msdu_metadata);
121*5113495bSYour Name }
122*5113495bSYour Name #else
123*5113495bSYour Name #ifdef QCA_SUPPORT_WDS_EXTENDED
124*5113495bSYour Name /**
125*5113495bSYour Name  * dp_wds_ext_peer_learn_be() - function to send event to control
126*5113495bSYour Name  * path on receiving 1st 4-address frame from backhaul.
127*5113495bSYour Name  * @soc: DP soc
128*5113495bSYour Name  * @ta_txrx_peer: WDS repeater txrx peer
129*5113495bSYour Name  * @rx_tlv_hdr: start address of rx tlvs
130*5113495bSYour Name  * @nbuf: RX packet buffer
131*5113495bSYour Name  *
132*5113495bSYour Name  * Return: void
133*5113495bSYour Name  */
dp_wds_ext_peer_learn_be(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)134*5113495bSYour Name static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
135*5113495bSYour Name 					    struct dp_txrx_peer *ta_txrx_peer,
136*5113495bSYour Name 					    uint8_t *rx_tlv_hdr,
137*5113495bSYour Name 					    qdf_nbuf_t nbuf)
138*5113495bSYour Name {
139*5113495bSYour Name 	uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
140*5113495bSYour Name 	struct dp_peer *ta_base_peer;
141*5113495bSYour Name 
142*5113495bSYour Name 	/* instead of checking addr4 is valid or not in per packet path
143*5113495bSYour Name 	 * check for init bit, which will be set on reception of
144*5113495bSYour Name 	 * first addr4 valid packet.
145*5113495bSYour Name 	 */
146*5113495bSYour Name 	if (!ta_txrx_peer->vdev->wds_ext_enabled ||
147*5113495bSYour Name 	    qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
148*5113495bSYour Name 				&ta_txrx_peer->wds_ext.init))
149*5113495bSYour Name 		return;
150*5113495bSYour Name 
151*5113495bSYour Name 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
152*5113495bSYour Name 	    (qdf_nbuf_is_fr_ds_set(nbuf) && qdf_nbuf_is_to_ds_set(nbuf))) {
153*5113495bSYour Name 		qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
154*5113495bSYour Name 					    &ta_txrx_peer->wds_ext.init);
155*5113495bSYour Name 
156*5113495bSYour Name 		if (qdf_unlikely(ta_txrx_peer->nawds_enabled &&
157*5113495bSYour Name 				 ta_txrx_peer->is_mld_peer)) {
158*5113495bSYour Name 			ta_base_peer = dp_get_primary_link_peer_by_id(
159*5113495bSYour Name 							soc,
160*5113495bSYour Name 							ta_txrx_peer->peer_id,
161*5113495bSYour Name 							DP_MOD_ID_RX);
162*5113495bSYour Name 		} else {
163*5113495bSYour Name 			ta_base_peer = dp_peer_get_ref_by_id(
164*5113495bSYour Name 							soc,
165*5113495bSYour Name 							ta_txrx_peer->peer_id,
166*5113495bSYour Name 							DP_MOD_ID_RX);
167*5113495bSYour Name 		}
168*5113495bSYour Name 
169*5113495bSYour Name 		if (!ta_base_peer)
170*5113495bSYour Name 			return;
171*5113495bSYour Name 
172*5113495bSYour Name 		qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
173*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE);
174*5113495bSYour Name 		dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
175*5113495bSYour Name 
176*5113495bSYour Name 		soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
177*5113495bSYour Name 						soc->ctrl_psoc,
178*5113495bSYour Name 						ta_txrx_peer->peer_id,
179*5113495bSYour Name 						ta_txrx_peer->vdev->vdev_id,
180*5113495bSYour Name 						wds_ext_src_mac);
181*5113495bSYour Name 	}
182*5113495bSYour Name }
183*5113495bSYour Name #else
dp_wds_ext_peer_learn_be(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)184*5113495bSYour Name static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
185*5113495bSYour Name 					    struct dp_txrx_peer *ta_txrx_peer,
186*5113495bSYour Name 					    uint8_t *rx_tlv_hdr,
187*5113495bSYour Name 					    qdf_nbuf_t nbuf)
188*5113495bSYour Name {
189*5113495bSYour Name }
190*5113495bSYour Name #endif
191*5113495bSYour Name static void
dp_rx_wds_learn(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * ta_txrx_peer,qdf_nbuf_t nbuf)192*5113495bSYour Name dp_rx_wds_learn(struct dp_soc *soc,
193*5113495bSYour Name 		struct dp_vdev *vdev,
194*5113495bSYour Name 		uint8_t *rx_tlv_hdr,
195*5113495bSYour Name 		struct dp_txrx_peer *ta_txrx_peer,
196*5113495bSYour Name 		qdf_nbuf_t nbuf)
197*5113495bSYour Name {
198*5113495bSYour Name 	dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
199*5113495bSYour Name }
200*5113495bSYour Name #endif
201*5113495bSYour Name 
202*5113495bSYour Name #ifdef DP_RX_PEEK_MSDU_DONE_WAR
dp_rx_war_peek_msdu_done(struct dp_soc * soc,struct dp_rx_desc * rx_desc)203*5113495bSYour Name static inline int dp_rx_war_peek_msdu_done(struct dp_soc *soc,
204*5113495bSYour Name 					   struct dp_rx_desc *rx_desc)
205*5113495bSYour Name {
206*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
207*5113495bSYour Name 
208*5113495bSYour Name 	qdf_nbuf_sync_for_cpu(soc->osdev, rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
209*5113495bSYour Name 	rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
210*5113495bSYour Name 
211*5113495bSYour Name 	return hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr);
212*5113495bSYour Name }
213*5113495bSYour Name 
214*5113495bSYour Name /**
215*5113495bSYour Name  * dp_rx_delink_n_rel_rx_desc() - unmap & free the nbuf in the rx_desc
216*5113495bSYour Name  * @soc: DP SoC handle
217*5113495bSYour Name  * @rx_desc: rx_desc handle of the nbuf to be unmapped & freed
218*5113495bSYour Name  * @reo_ring_num: REO_RING_NUM corresponding to the REO for which the
219*5113495bSYour Name  *		  bottom half is being serviced.
220*5113495bSYour Name  *
221*5113495bSYour Name  * Return: None
222*5113495bSYour Name  */
223*5113495bSYour Name static inline void
dp_rx_delink_n_rel_rx_desc(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)224*5113495bSYour Name dp_rx_delink_n_rel_rx_desc(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
225*5113495bSYour Name 			   uint8_t reo_ring_num)
226*5113495bSYour Name {
227*5113495bSYour Name 	if (!rx_desc)
228*5113495bSYour Name 		return;
229*5113495bSYour Name 
230*5113495bSYour Name 	dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
231*5113495bSYour Name 	dp_rx_nbuf_free(rx_desc->nbuf);
232*5113495bSYour Name 	/*
233*5113495bSYour Name 	 * RX_DESC flags:
234*5113495bSYour Name 	 * in_use = 0 will be set when this rx_desc is added to local freelist
235*5113495bSYour Name 	 * unmapped = 1 will be set by dp_rx_nbuf_unmap
236*5113495bSYour Name 	 * in_err_state = 0 will be set during replenish
237*5113495bSYour Name 	 * has_reuse_nbuf need not be touched.
238*5113495bSYour Name 	 * msdu_done_fail = 0 should be set here ..!!
239*5113495bSYour Name 	 */
240*5113495bSYour Name 	rx_desc->msdu_done_fail = 0;
241*5113495bSYour Name }
242*5113495bSYour Name 
243*5113495bSYour Name static inline struct dp_rx_desc *
dp_rx_war_store_msdu_done_fail_desc(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)244*5113495bSYour Name dp_rx_war_store_msdu_done_fail_desc(struct dp_soc *soc,
245*5113495bSYour Name 				    struct dp_rx_desc *rx_desc,
246*5113495bSYour Name 				    uint8_t reo_ring_num)
247*5113495bSYour Name {
248*5113495bSYour Name 	struct dp_rx_msdu_done_fail_desc_list *msdu_done_fail_desc_list =
249*5113495bSYour Name 						&soc->msdu_done_fail_desc_list;
250*5113495bSYour Name 	struct dp_rx_desc *old_rx_desc;
251*5113495bSYour Name 	uint32_t idx;
252*5113495bSYour Name 
253*5113495bSYour Name 	idx = dp_get_next_index(&msdu_done_fail_desc_list->index,
254*5113495bSYour Name 				DP_MSDU_DONE_FAIL_DESCS_MAX);
255*5113495bSYour Name 
256*5113495bSYour Name 	old_rx_desc = msdu_done_fail_desc_list->msdu_done_fail_descs[idx];
257*5113495bSYour Name 	dp_rx_delink_n_rel_rx_desc(soc, old_rx_desc, reo_ring_num);
258*5113495bSYour Name 
259*5113495bSYour Name 	msdu_done_fail_desc_list->msdu_done_fail_descs[idx] = rx_desc;
260*5113495bSYour Name 
261*5113495bSYour Name 	return old_rx_desc;
262*5113495bSYour Name }
263*5113495bSYour Name 
264*5113495bSYour Name #else
dp_rx_war_peek_msdu_done(struct dp_soc * soc,struct dp_rx_desc * rx_desc)265*5113495bSYour Name static inline int dp_rx_war_peek_msdu_done(struct dp_soc *soc,
266*5113495bSYour Name 					   struct dp_rx_desc *rx_desc)
267*5113495bSYour Name {
268*5113495bSYour Name 	return 1;
269*5113495bSYour Name }
270*5113495bSYour Name 
271*5113495bSYour Name static inline struct dp_rx_desc *
dp_rx_war_store_msdu_done_fail_desc(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)272*5113495bSYour Name dp_rx_war_store_msdu_done_fail_desc(struct dp_soc *soc,
273*5113495bSYour Name 				    struct dp_rx_desc *rx_desc,
274*5113495bSYour Name 				    uint8_t reo_ring_num)
275*5113495bSYour Name {
276*5113495bSYour Name 	return NULL;
277*5113495bSYour Name }
278*5113495bSYour Name #endif
279*5113495bSYour Name 
dp_rx_process_be(struct dp_intr * int_ctx,hal_ring_handle_t hal_ring_hdl,uint8_t reo_ring_num,uint32_t quota)280*5113495bSYour Name uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
281*5113495bSYour Name 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
282*5113495bSYour Name 			  uint32_t quota)
283*5113495bSYour Name {
284*5113495bSYour Name 	hal_ring_desc_t ring_desc;
285*5113495bSYour Name 	hal_ring_desc_t last_prefetched_hw_desc;
286*5113495bSYour Name 	hal_soc_handle_t hal_soc;
287*5113495bSYour Name 	struct dp_rx_desc *rx_desc = NULL;
288*5113495bSYour Name 	struct dp_rx_desc *last_prefetched_sw_desc = NULL;
289*5113495bSYour Name 	qdf_nbuf_t nbuf, next;
290*5113495bSYour Name 	bool near_full;
291*5113495bSYour Name 	union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
292*5113495bSYour Name 	union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
293*5113495bSYour Name 	uint32_t num_pending = 0;
294*5113495bSYour Name 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
295*5113495bSYour Name 	uint16_t msdu_len = 0;
296*5113495bSYour Name 	uint16_t peer_id;
297*5113495bSYour Name 	uint8_t vdev_id;
298*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
299*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
300*5113495bSYour Name 	struct dp_vdev *vdev;
301*5113495bSYour Name 	uint32_t pkt_len = 0;
302*5113495bSYour Name 	enum hal_reo_error_status error;
303*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
304*5113495bSYour Name 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
305*5113495bSYour Name 	uint8_t mac_id = 0;
306*5113495bSYour Name 	struct dp_pdev *rx_pdev;
307*5113495bSYour Name 	uint8_t enh_flag;
308*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
309*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
310*5113495bSYour Name 	struct dp_soc *soc = int_ctx->soc;
311*5113495bSYour Name 	struct cdp_tid_rx_stats *tid_stats;
312*5113495bSYour Name 	qdf_nbuf_t nbuf_head;
313*5113495bSYour Name 	qdf_nbuf_t nbuf_tail;
314*5113495bSYour Name 	qdf_nbuf_t deliver_list_head;
315*5113495bSYour Name 	qdf_nbuf_t deliver_list_tail;
316*5113495bSYour Name 	uint32_t num_rx_bufs_reaped = 0;
317*5113495bSYour Name 	uint32_t intr_id;
318*5113495bSYour Name 	struct hif_opaque_softc *scn;
319*5113495bSYour Name 	int32_t tid = 0;
320*5113495bSYour Name 	bool is_prev_msdu_last = true;
321*5113495bSYour Name 	uint32_t num_entries_avail = 0;
322*5113495bSYour Name 	uint32_t rx_ol_pkt_cnt = 0;
323*5113495bSYour Name 	uint32_t num_entries = 0;
324*5113495bSYour Name 	QDF_STATUS status;
325*5113495bSYour Name 	qdf_nbuf_t ebuf_head;
326*5113495bSYour Name 	qdf_nbuf_t ebuf_tail;
327*5113495bSYour Name 	uint8_t pkt_capture_offload = 0;
328*5113495bSYour Name 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
329*5113495bSYour Name 	int max_reap_limit, ring_near_full;
330*5113495bSYour Name 	struct dp_soc *replenish_soc;
331*5113495bSYour Name 	uint8_t chip_id;
332*5113495bSYour Name 	uint64_t current_time = 0;
333*5113495bSYour Name 	uint32_t old_tid;
334*5113495bSYour Name 	uint32_t peer_ext_stats;
335*5113495bSYour Name 	uint32_t dsf;
336*5113495bSYour Name 	uint32_t l3_pad;
337*5113495bSYour Name 	uint8_t link_id = 0;
338*5113495bSYour Name 	uint16_t buf_size;
339*5113495bSYour Name 
340*5113495bSYour Name 	DP_HIST_INIT();
341*5113495bSYour Name 
342*5113495bSYour Name 	qdf_assert_always(soc && hal_ring_hdl);
343*5113495bSYour Name 	hal_soc = soc->hal_soc;
344*5113495bSYour Name 	qdf_assert_always(hal_soc);
345*5113495bSYour Name 
346*5113495bSYour Name 	scn = soc->hif_handle;
347*5113495bSYour Name 	intr_id = int_ctx->dp_intr_id;
348*5113495bSYour Name 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
349*5113495bSYour Name 	dp_runtime_pm_mark_last_busy(soc);
350*5113495bSYour Name 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
351*5113495bSYour Name 
352*5113495bSYour Name more_data:
353*5113495bSYour Name 	/* reset local variables here to be re-used in the function */
354*5113495bSYour Name 	nbuf_head = NULL;
355*5113495bSYour Name 	nbuf_tail = NULL;
356*5113495bSYour Name 	deliver_list_head = NULL;
357*5113495bSYour Name 	deliver_list_tail = NULL;
358*5113495bSYour Name 	txrx_peer = NULL;
359*5113495bSYour Name 	vdev = NULL;
360*5113495bSYour Name 	num_rx_bufs_reaped = 0;
361*5113495bSYour Name 	ebuf_head = NULL;
362*5113495bSYour Name 	ebuf_tail = NULL;
363*5113495bSYour Name 	ring_near_full = 0;
364*5113495bSYour Name 	max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
365*5113495bSYour Name 
366*5113495bSYour Name 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
367*5113495bSYour Name 	qdf_mem_zero(head, sizeof(head));
368*5113495bSYour Name 	qdf_mem_zero(tail, sizeof(tail));
369*5113495bSYour Name 	old_tid = 0xff;
370*5113495bSYour Name 	dsf = 0;
371*5113495bSYour Name 	peer_ext_stats = 0;
372*5113495bSYour Name 	rx_pdev = NULL;
373*5113495bSYour Name 	tid_stats = NULL;
374*5113495bSYour Name 
375*5113495bSYour Name 	dp_pkt_get_timestamp(&current_time);
376*5113495bSYour Name 
377*5113495bSYour Name 	ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring,
378*5113495bSYour Name 							    &max_reap_limit);
379*5113495bSYour Name 
380*5113495bSYour Name 	peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
381*5113495bSYour Name 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
382*5113495bSYour Name 		/*
383*5113495bSYour Name 		 * Need API to convert from hal_ring pointer to
384*5113495bSYour Name 		 * Ring Type / Ring Id combo
385*5113495bSYour Name 		 */
386*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
387*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
388*5113495bSYour Name 			  FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
389*5113495bSYour Name 		goto done;
390*5113495bSYour Name 	}
391*5113495bSYour Name 
392*5113495bSYour Name 	hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
393*5113495bSYour Name 
394*5113495bSYour Name 	if (!num_pending)
395*5113495bSYour Name 		num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
396*5113495bSYour Name 
397*5113495bSYour Name 	if (num_pending > quota)
398*5113495bSYour Name 		num_pending = quota;
399*5113495bSYour Name 
400*5113495bSYour Name 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
401*5113495bSYour Name 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
402*5113495bSYour Name 							    hal_ring_hdl,
403*5113495bSYour Name 							    num_pending);
404*5113495bSYour Name 	/*
405*5113495bSYour Name 	 * start reaping the buffers from reo ring and queue
406*5113495bSYour Name 	 * them in per vdev queue.
407*5113495bSYour Name 	 * Process the received pkts in a different per vdev loop.
408*5113495bSYour Name 	 */
409*5113495bSYour Name 	while (qdf_likely(num_pending)) {
410*5113495bSYour Name 		ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
411*5113495bSYour Name 
412*5113495bSYour Name 		if (qdf_unlikely(!ring_desc))
413*5113495bSYour Name 			break;
414*5113495bSYour Name 
415*5113495bSYour Name 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
416*5113495bSYour Name 
417*5113495bSYour Name 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
418*5113495bSYour Name 			dp_rx_err("%pK: HAL RING 0x%pK:error %d",
419*5113495bSYour Name 				  soc, hal_ring_hdl, error);
420*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
421*5113495bSYour Name 				     1);
422*5113495bSYour Name 			/* Don't know how to deal with this -- assert */
423*5113495bSYour Name 			qdf_assert(0);
424*5113495bSYour Name 		}
425*5113495bSYour Name 
426*5113495bSYour Name 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
427*5113495bSYour Name 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
428*5113495bSYour Name 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
429*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
430*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
431*5113495bSYour Name 			break;
432*5113495bSYour Name 		}
433*5113495bSYour Name 
434*5113495bSYour Name 		rx_desc = (struct dp_rx_desc *)
435*5113495bSYour Name 				hal_rx_get_reo_desc_va(ring_desc);
436*5113495bSYour Name 		dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
437*5113495bSYour Name 
438*5113495bSYour Name 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
439*5113495bSYour Name 					   ring_desc, rx_desc);
440*5113495bSYour Name 		if (QDF_IS_STATUS_ERROR(status)) {
441*5113495bSYour Name 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
442*5113495bSYour Name 				qdf_assert_always(!rx_desc->unmapped);
443*5113495bSYour Name 				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
444*5113495bSYour Name 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
445*5113495bSYour Name 							    rx_desc->pool_id);
446*5113495bSYour Name 				dp_rx_add_to_free_desc_list(
447*5113495bSYour Name 					&head[rx_desc->chip_id][rx_desc->pool_id],
448*5113495bSYour Name 					&tail[rx_desc->chip_id][rx_desc->pool_id],
449*5113495bSYour Name 					rx_desc);
450*5113495bSYour Name 			}
451*5113495bSYour Name 			continue;
452*5113495bSYour Name 		}
453*5113495bSYour Name 
454*5113495bSYour Name 		/*
455*5113495bSYour Name 		 * this is a unlikely scenario where the host is reaping
456*5113495bSYour Name 		 * a descriptor which it already reaped just a while ago
457*5113495bSYour Name 		 * but is yet to replenish it back to HW.
458*5113495bSYour Name 		 * In this case host will dump the last 128 descriptors
459*5113495bSYour Name 		 * including the software descriptor rx_desc and assert.
460*5113495bSYour Name 		 */
461*5113495bSYour Name 
462*5113495bSYour Name 		if (qdf_unlikely(!rx_desc->in_use)) {
463*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
464*5113495bSYour Name 			dp_info_rl("Reaping rx_desc not in use!");
465*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
466*5113495bSYour Name 						   ring_desc, rx_desc);
467*5113495bSYour Name 			continue;
468*5113495bSYour Name 		}
469*5113495bSYour Name 
470*5113495bSYour Name 		status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
471*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
472*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
473*5113495bSYour Name 			dp_info_rl("Nbuf sanity check failure!");
474*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
475*5113495bSYour Name 						   ring_desc, rx_desc);
476*5113495bSYour Name 			rx_desc->in_err_state = 1;
477*5113495bSYour Name 			continue;
478*5113495bSYour Name 		}
479*5113495bSYour Name 
480*5113495bSYour Name 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
481*5113495bSYour Name 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
482*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
483*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
484*5113495bSYour Name 						   ring_desc, rx_desc);
485*5113495bSYour Name 		}
486*5113495bSYour Name 
487*5113495bSYour Name 		pkt_capture_offload =
488*5113495bSYour Name 			dp_rx_copy_desc_info_in_nbuf_cb(soc, ring_desc,
489*5113495bSYour Name 							rx_desc->nbuf,
490*5113495bSYour Name 							reo_ring_num);
491*5113495bSYour Name 
492*5113495bSYour Name 		if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) {
493*5113495bSYour Name 			/* In dp_rx_sg_create() until the last buffer,
494*5113495bSYour Name 			 * end bit should not be set. As continuation bit set,
495*5113495bSYour Name 			 * this is not a last buffer.
496*5113495bSYour Name 			 */
497*5113495bSYour Name 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0);
498*5113495bSYour Name 
499*5113495bSYour Name 			/* previous msdu has end bit set, so current one is
500*5113495bSYour Name 			 * the new MPDU
501*5113495bSYour Name 			 */
502*5113495bSYour Name 			if (is_prev_msdu_last) {
503*5113495bSYour Name 				/* Get number of entries available in HW ring */
504*5113495bSYour Name 				num_entries_avail =
505*5113495bSYour Name 				hal_srng_dst_num_valid(hal_soc,
506*5113495bSYour Name 						       hal_ring_hdl, 1);
507*5113495bSYour Name 
508*5113495bSYour Name 				/* For new MPDU check if we can read complete
509*5113495bSYour Name 				 * MPDU by comparing the number of buffers
510*5113495bSYour Name 				 * available and number of buffers needed to
511*5113495bSYour Name 				 * reap this MPDU
512*5113495bSYour Name 				 */
513*5113495bSYour Name 				if ((QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) /
514*5113495bSYour Name 				     (buf_size -
515*5113495bSYour Name 				      soc->rx_pkt_tlv_size) + 1) >
516*5113495bSYour Name 				    num_pending) {
517*5113495bSYour Name 					DP_STATS_INC(soc,
518*5113495bSYour Name 						     rx.msdu_scatter_wait_break,
519*5113495bSYour Name 						     1);
520*5113495bSYour Name 					dp_rx_cookie_reset_invalid_bit(
521*5113495bSYour Name 								     ring_desc);
522*5113495bSYour Name 					/* As we are going to break out of the
523*5113495bSYour Name 					 * loop because of unavailability of
524*5113495bSYour Name 					 * descs to form complete SG, we need to
525*5113495bSYour Name 					 * reset the TP in the REO destination
526*5113495bSYour Name 					 * ring.
527*5113495bSYour Name 					 */
528*5113495bSYour Name 					hal_srng_dst_dec_tp(hal_soc,
529*5113495bSYour Name 							    hal_ring_hdl);
530*5113495bSYour Name 					break;
531*5113495bSYour Name 				}
532*5113495bSYour Name 				is_prev_msdu_last = false;
533*5113495bSYour Name 			}
534*5113495bSYour Name 		} else if (qdf_unlikely(!dp_rx_war_peek_msdu_done(soc,
535*5113495bSYour Name 								  rx_desc))) {
536*5113495bSYour Name 			struct dp_rx_desc *old_rx_desc =
537*5113495bSYour Name 					dp_rx_war_store_msdu_done_fail_desc(
538*5113495bSYour Name 								soc, rx_desc,
539*5113495bSYour Name 								reo_ring_num);
540*5113495bSYour Name 			if (qdf_likely(old_rx_desc)) {
541*5113495bSYour Name 				rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
542*5113495bSYour Name 				dp_rx_add_to_free_desc_list
543*5113495bSYour Name 					(&head[rx_desc->chip_id][rx_desc->pool_id],
544*5113495bSYour Name 					 &tail[rx_desc->chip_id][rx_desc->pool_id],
545*5113495bSYour Name 					 old_rx_desc);
546*5113495bSYour Name 				quota -= 1;
547*5113495bSYour Name 				num_pending -= 1;
548*5113495bSYour Name 				num_rx_bufs_reaped++;
549*5113495bSYour Name 			}
550*5113495bSYour Name 			rx_desc->msdu_done_fail = 1;
551*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
552*5113495bSYour Name 			dp_err("MSDU DONE failure %d",
553*5113495bSYour Name 			       soc->stats.rx.err.msdu_done_fail);
554*5113495bSYour Name 			dp_rx_msdu_done_fail_event_record(soc, rx_desc,
555*5113495bSYour Name 							  rx_desc->nbuf);
556*5113495bSYour Name 			continue;
557*5113495bSYour Name 		}
558*5113495bSYour Name 
559*5113495bSYour Name 		if (!is_prev_msdu_last &&
560*5113495bSYour Name 		    !(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
561*5113495bSYour Name 			is_prev_msdu_last = true;
562*5113495bSYour Name 
563*5113495bSYour Name 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
564*5113495bSYour Name 
565*5113495bSYour Name 		/*
566*5113495bSYour Name 		 * move unmap after scattered msdu waiting break logic
567*5113495bSYour Name 		 * in case double skb unmap happened.
568*5113495bSYour Name 		 */
569*5113495bSYour Name 		dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
570*5113495bSYour Name 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
571*5113495bSYour Name 				   ebuf_tail, rx_desc);
572*5113495bSYour Name 
573*5113495bSYour Name 		quota -= 1;
574*5113495bSYour Name 		num_pending -= 1;
575*5113495bSYour Name 
576*5113495bSYour Name 		dp_rx_add_to_free_desc_list
577*5113495bSYour Name 			(&head[rx_desc->chip_id][rx_desc->pool_id],
578*5113495bSYour Name 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
579*5113495bSYour Name 		num_rx_bufs_reaped++;
580*5113495bSYour Name 
581*5113495bSYour Name 		dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc,
582*5113495bSYour Name 					       num_pending,
583*5113495bSYour Name 					       hal_ring_hdl,
584*5113495bSYour Name 					       &last_prefetched_hw_desc,
585*5113495bSYour Name 					       &last_prefetched_sw_desc);
586*5113495bSYour Name 
587*5113495bSYour Name 		/*
588*5113495bSYour Name 		 * only if complete msdu is received for scatter case,
589*5113495bSYour Name 		 * then allow break.
590*5113495bSYour Name 		 */
591*5113495bSYour Name 		if (is_prev_msdu_last &&
592*5113495bSYour Name 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
593*5113495bSYour Name 						  max_reap_limit))
594*5113495bSYour Name 			break;
595*5113495bSYour Name 	}
596*5113495bSYour Name done:
597*5113495bSYour Name 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
598*5113495bSYour Name 	qdf_dsb();
599*5113495bSYour Name 
600*5113495bSYour Name 	dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
601*5113495bSYour Name 
602*5113495bSYour Name 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
603*5113495bSYour Name 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
604*5113495bSYour Name 			/*
605*5113495bSYour Name 			 * continue with next mac_id if no pkts were reaped
606*5113495bSYour Name 			 * from that pool
607*5113495bSYour Name 			 */
608*5113495bSYour Name 			if (!rx_bufs_reaped[chip_id][mac_id])
609*5113495bSYour Name 				continue;
610*5113495bSYour Name 
611*5113495bSYour Name 			replenish_soc = dp_rx_replenish_soc_get(soc, chip_id);
612*5113495bSYour Name 
613*5113495bSYour Name 			dp_rxdma_srng =
614*5113495bSYour Name 				&replenish_soc->rx_refill_buf_ring[mac_id];
615*5113495bSYour Name 
616*5113495bSYour Name 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
617*5113495bSYour Name 
618*5113495bSYour Name 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
619*5113495bSYour Name 					       dp_rxdma_srng,
620*5113495bSYour Name 					       rx_desc_pool,
621*5113495bSYour Name 					       rx_bufs_reaped[chip_id][mac_id],
622*5113495bSYour Name 					       &head[chip_id][mac_id],
623*5113495bSYour Name 					       &tail[chip_id][mac_id]);
624*5113495bSYour Name 		}
625*5113495bSYour Name 	}
626*5113495bSYour Name 
627*5113495bSYour Name 	/* Peer can be NULL is case of LFR */
628*5113495bSYour Name 	if (qdf_likely(txrx_peer))
629*5113495bSYour Name 		vdev = NULL;
630*5113495bSYour Name 
631*5113495bSYour Name 	/*
632*5113495bSYour Name 	 * BIG loop where each nbuf is dequeued from global queue,
633*5113495bSYour Name 	 * processed and queued back on a per vdev basis. These nbufs
634*5113495bSYour Name 	 * are sent to stack as and when we run out of nbufs
635*5113495bSYour Name 	 * or a new nbuf dequeued from global queue has a different
636*5113495bSYour Name 	 * vdev when compared to previous nbuf.
637*5113495bSYour Name 	 */
638*5113495bSYour Name 	nbuf = nbuf_head;
639*5113495bSYour Name 	while (nbuf) {
640*5113495bSYour Name 		next = nbuf->next;
641*5113495bSYour Name 		dp_rx_prefetch_nbuf_data_be(nbuf, next);
642*5113495bSYour Name 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
643*5113495bSYour Name 			nbuf = next;
644*5113495bSYour Name 			dp_verbose_debug("drop raw frame");
645*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
646*5113495bSYour Name 			continue;
647*5113495bSYour Name 		}
648*5113495bSYour Name 
649*5113495bSYour Name 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
650*5113495bSYour Name 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
651*5113495bSYour Name 		peer_id = dp_rx_get_peer_id_be(nbuf);
652*5113495bSYour Name 		dp_rx_set_mpdu_seq_number_be(nbuf, rx_tlv_hdr);
653*5113495bSYour Name 
654*5113495bSYour Name 		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
655*5113495bSYour Name 					peer_id, vdev_id)) {
656*5113495bSYour Name 			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
657*5113495bSYour Name 					       deliver_list_head,
658*5113495bSYour Name 					       deliver_list_tail);
659*5113495bSYour Name 			deliver_list_head = NULL;
660*5113495bSYour Name 			deliver_list_tail = NULL;
661*5113495bSYour Name 		}
662*5113495bSYour Name 
663*5113495bSYour Name 		/* Get TID from struct cb->tid_val, save to tid */
664*5113495bSYour Name 		tid = qdf_nbuf_get_tid_val(nbuf);
665*5113495bSYour Name 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) {
666*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
667*5113495bSYour Name 			dp_verbose_debug("drop invalid tid");
668*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
669*5113495bSYour Name 			nbuf = next;
670*5113495bSYour Name 			continue;
671*5113495bSYour Name 		}
672*5113495bSYour Name 
673*5113495bSYour Name 		if (qdf_unlikely(!txrx_peer)) {
674*5113495bSYour Name 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
675*5113495bSYour Name 								 peer_id,
676*5113495bSYour Name 								 &txrx_ref_handle,
677*5113495bSYour Name 								 pkt_capture_offload,
678*5113495bSYour Name 								 &vdev,
679*5113495bSYour Name 								 &rx_pdev, &dsf,
680*5113495bSYour Name 								 &old_tid);
681*5113495bSYour Name 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
682*5113495bSYour Name 				dp_verbose_debug("drop no peer frame");
683*5113495bSYour Name 				nbuf = next;
684*5113495bSYour Name 				continue;
685*5113495bSYour Name 			}
686*5113495bSYour Name 			enh_flag = rx_pdev->enhanced_stats_en;
687*5113495bSYour Name 		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
688*5113495bSYour Name 			dp_txrx_peer_unref_delete(txrx_ref_handle,
689*5113495bSYour Name 						  DP_MOD_ID_RX);
690*5113495bSYour Name 
691*5113495bSYour Name 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
692*5113495bSYour Name 								 peer_id,
693*5113495bSYour Name 								 &txrx_ref_handle,
694*5113495bSYour Name 								 pkt_capture_offload,
695*5113495bSYour Name 								 &vdev,
696*5113495bSYour Name 								 &rx_pdev, &dsf,
697*5113495bSYour Name 								 &old_tid);
698*5113495bSYour Name 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
699*5113495bSYour Name 				dp_verbose_debug("drop by unmatch peer_id");
700*5113495bSYour Name 				nbuf = next;
701*5113495bSYour Name 				continue;
702*5113495bSYour Name 			}
703*5113495bSYour Name 			enh_flag = rx_pdev->enhanced_stats_en;
704*5113495bSYour Name 		}
705*5113495bSYour Name 
706*5113495bSYour Name 		if (txrx_peer) {
707*5113495bSYour Name 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
708*5113495bSYour Name 			qdf_dp_trace_set_track(nbuf, QDF_RX);
709*5113495bSYour Name 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
710*5113495bSYour Name 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
711*5113495bSYour Name 				QDF_NBUF_RX_PKT_DATA_TRACK;
712*5113495bSYour Name 		}
713*5113495bSYour Name 
714*5113495bSYour Name 		rx_bufs_used++;
715*5113495bSYour Name 
716*5113495bSYour Name 		/* MLD Link Peer Statistics support */
717*5113495bSYour Name 		if (txrx_peer->is_mld_peer && rx_pdev->link_peer_stats) {
718*5113495bSYour Name 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
719*5113495bSYour Name 								nbuf,
720*5113495bSYour Name 								txrx_peer);
721*5113495bSYour Name 		} else {
722*5113495bSYour Name 			link_id = 0;
723*5113495bSYour Name 		}
724*5113495bSYour Name 
725*5113495bSYour Name 		dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id);
726*5113495bSYour Name 
727*5113495bSYour Name 		/* when hlos tid override is enabled, save tid in
728*5113495bSYour Name 		 * skb->priority
729*5113495bSYour Name 		 */
730*5113495bSYour Name 		if (qdf_unlikely(vdev->skip_sw_tid_classification &
731*5113495bSYour Name 					DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
732*5113495bSYour Name 			qdf_nbuf_set_priority(nbuf, tid);
733*5113495bSYour Name 
734*5113495bSYour Name 		DP_RX_TID_SAVE(nbuf, tid);
735*5113495bSYour Name 		if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
736*5113495bSYour Name 		    dp_rx_pkt_tracepoints_enabled())
737*5113495bSYour Name 			qdf_nbuf_set_timestamp(nbuf);
738*5113495bSYour Name 
739*5113495bSYour Name 		if (qdf_likely(old_tid != tid)) {
740*5113495bSYour Name 			tid_stats =
741*5113495bSYour Name 		&rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
742*5113495bSYour Name 			old_tid = tid;
743*5113495bSYour Name 		}
744*5113495bSYour Name 
745*5113495bSYour Name 		/*
746*5113495bSYour Name 		 * Check if DMA completed -- msdu_done is the last bit
747*5113495bSYour Name 		 * to be written
748*5113495bSYour Name 		 */
749*5113495bSYour Name 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
750*5113495bSYour Name 				 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) {
751*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
752*5113495bSYour Name 			dp_err("MSDU DONE failure %d",
753*5113495bSYour Name 			       soc->stats.rx.err.msdu_done_fail);
754*5113495bSYour Name 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
755*5113495bSYour Name 					     QDF_TRACE_LEVEL_INFO);
756*5113495bSYour Name 			dp_rx_msdu_done_fail_event_record(soc, NULL, nbuf);
757*5113495bSYour Name 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
758*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
759*5113495bSYour Name 			qdf_assert(0);
760*5113495bSYour Name 			nbuf = next;
761*5113495bSYour Name 			continue;
762*5113495bSYour Name 		}
763*5113495bSYour Name 
764*5113495bSYour Name 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
765*5113495bSYour Name 		/*
766*5113495bSYour Name 		 * First IF condition:
767*5113495bSYour Name 		 * 802.11 Fragmented pkts are reinjected to REO
768*5113495bSYour Name 		 * HW block as SG pkts and for these pkts we only
769*5113495bSYour Name 		 * need to pull the RX TLVS header length.
770*5113495bSYour Name 		 * Second IF condition:
771*5113495bSYour Name 		 * The below condition happens when an MSDU is spread
772*5113495bSYour Name 		 * across multiple buffers. This can happen in two cases
773*5113495bSYour Name 		 * 1. The nbuf size is smaller then the received msdu.
774*5113495bSYour Name 		 *    ex: we have set the nbuf size to 2048 during
775*5113495bSYour Name 		 *        nbuf_alloc. but we received an msdu which is
776*5113495bSYour Name 		 *        2304 bytes in size then this msdu is spread
777*5113495bSYour Name 		 *        across 2 nbufs.
778*5113495bSYour Name 		 *
779*5113495bSYour Name 		 * 2. AMSDUs when RAW mode is enabled.
780*5113495bSYour Name 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
781*5113495bSYour Name 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
782*5113495bSYour Name 		 *        spread across 2nd nbuf and 3rd nbuf.
783*5113495bSYour Name 		 *
784*5113495bSYour Name 		 * for these scenarios let us create a skb frag_list and
785*5113495bSYour Name 		 * append these buffers till the last MSDU of the AMSDU
786*5113495bSYour Name 		 * Third condition:
787*5113495bSYour Name 		 * This is the most likely case, we receive 802.3 pkts
788*5113495bSYour Name 		 * decapsulated by HW, here we need to set the pkt length.
789*5113495bSYour Name 		 */
790*5113495bSYour Name 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
791*5113495bSYour Name 			bool is_mcbc, is_sa_vld, is_da_vld;
792*5113495bSYour Name 
793*5113495bSYour Name 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
794*5113495bSYour Name 								 rx_tlv_hdr);
795*5113495bSYour Name 			is_sa_vld =
796*5113495bSYour Name 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
797*5113495bSYour Name 								rx_tlv_hdr);
798*5113495bSYour Name 			is_da_vld =
799*5113495bSYour Name 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
800*5113495bSYour Name 								rx_tlv_hdr);
801*5113495bSYour Name 
802*5113495bSYour Name 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
803*5113495bSYour Name 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
804*5113495bSYour Name 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
805*5113495bSYour Name 
806*5113495bSYour Name 			qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
807*5113495bSYour Name 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
808*5113495bSYour Name 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
809*5113495bSYour Name 			nbuf = dp_rx_sg_create(soc, nbuf);
810*5113495bSYour Name 			next = nbuf->next;
811*5113495bSYour Name 
812*5113495bSYour Name 			if (qdf_nbuf_is_raw_frame(nbuf)) {
813*5113495bSYour Name 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
814*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
815*5113495bSYour Name 							      rx.raw, 1,
816*5113495bSYour Name 							      msdu_len,
817*5113495bSYour Name 							      link_id);
818*5113495bSYour Name 			} else {
819*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
820*5113495bSYour Name 
821*5113495bSYour Name 				if (!dp_rx_is_sg_supported()) {
822*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
823*5113495bSYour Name 					dp_info_rl("sg msdu len %d, dropped",
824*5113495bSYour Name 						   msdu_len);
825*5113495bSYour Name 					nbuf = next;
826*5113495bSYour Name 					continue;
827*5113495bSYour Name 				}
828*5113495bSYour Name 			}
829*5113495bSYour Name 		} else {
830*5113495bSYour Name 			l3_pad = hal_rx_get_l3_pad_bytes_be(nbuf, rx_tlv_hdr);
831*5113495bSYour Name 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
832*5113495bSYour Name 			pkt_len = msdu_len + l3_pad + soc->rx_pkt_tlv_size;
833*5113495bSYour Name 
834*5113495bSYour Name 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
835*5113495bSYour Name 			dp_rx_skip_tlvs(soc, nbuf, l3_pad);
836*5113495bSYour Name 		}
837*5113495bSYour Name 
838*5113495bSYour Name 		dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
839*5113495bSYour Name 
840*5113495bSYour Name 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
841*5113495bSYour Name 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
842*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC(txrx_peer,
843*5113495bSYour Name 						  rx.policy_check_drop,
844*5113495bSYour Name 						  1, link_id);
845*5113495bSYour Name 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
846*5113495bSYour Name 			/* Drop & free packet */
847*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
848*5113495bSYour Name 			/* Statistics */
849*5113495bSYour Name 			nbuf = next;
850*5113495bSYour Name 			continue;
851*5113495bSYour Name 		}
852*5113495bSYour Name 
853*5113495bSYour Name 		/*
854*5113495bSYour Name 		 * Drop non-EAPOL frames from unauthorized peer.
855*5113495bSYour Name 		 */
856*5113495bSYour Name 		if (qdf_likely(txrx_peer) &&
857*5113495bSYour Name 		    qdf_unlikely(!txrx_peer->authorize) &&
858*5113495bSYour Name 		    !qdf_nbuf_is_raw_frame(nbuf)) {
859*5113495bSYour Name 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
860*5113495bSYour Name 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
861*5113495bSYour Name 
862*5113495bSYour Name 			if (!is_eapol) {
863*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
864*5113495bSYour Name 							  rx.peer_unauth_rx_pkt_drop,
865*5113495bSYour Name 							  1, link_id);
866*5113495bSYour Name 				dp_verbose_debug("drop by unauthorized peer");
867*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
868*5113495bSYour Name 				nbuf = next;
869*5113495bSYour Name 				continue;
870*5113495bSYour Name 			}
871*5113495bSYour Name 		}
872*5113495bSYour Name 
873*5113495bSYour Name 		dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
874*5113495bSYour Name 		dp_rx_update_flow_info(nbuf, rx_tlv_hdr);
875*5113495bSYour Name 
876*5113495bSYour Name 		if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
877*5113495bSYour Name 			/*
878*5113495bSYour Name 			 * process frame for mulitpass phrase processing
879*5113495bSYour Name 			 */
880*5113495bSYour Name 			if (qdf_unlikely(vdev->multipass_en)) {
881*5113495bSYour Name 				if (dp_rx_multipass_process(txrx_peer, nbuf,
882*5113495bSYour Name 							    tid) == false) {
883*5113495bSYour Name 					DP_PEER_PER_PKT_STATS_INC
884*5113495bSYour Name 						(txrx_peer,
885*5113495bSYour Name 						 rx.multipass_rx_pkt_drop,
886*5113495bSYour Name 						 1, link_id);
887*5113495bSYour Name 					dp_verbose_debug("drop multi pass");
888*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
889*5113495bSYour Name 					nbuf = next;
890*5113495bSYour Name 					continue;
891*5113495bSYour Name 				}
892*5113495bSYour Name 			}
893*5113495bSYour Name 			if (qdf_unlikely(txrx_peer &&
894*5113495bSYour Name 					 (txrx_peer->nawds_enabled) &&
895*5113495bSYour Name 					 (qdf_nbuf_is_da_mcbc(nbuf)) &&
896*5113495bSYour Name 					 (hal_rx_get_mpdu_mac_ad4_valid_be
897*5113495bSYour Name 						(rx_tlv_hdr) == false))) {
898*5113495bSYour Name 				tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
899*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
900*5113495bSYour Name 							  rx.nawds_mcast_drop,
901*5113495bSYour Name 							  1, link_id);
902*5113495bSYour Name 				dp_verbose_debug("drop nawds");
903*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
904*5113495bSYour Name 				nbuf = next;
905*5113495bSYour Name 				continue;
906*5113495bSYour Name 			}
907*5113495bSYour Name 
908*5113495bSYour Name 			/* Update the protocol tag in SKB based on CCE metadata
909*5113495bSYour Name 			 */
910*5113495bSYour Name 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
911*5113495bSYour Name 						  reo_ring_num, false, true);
912*5113495bSYour Name 
913*5113495bSYour Name 			/* Update the flow tag in SKB based on FSE metadata */
914*5113495bSYour Name 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
915*5113495bSYour Name 					      true);
916*5113495bSYour Name 
917*5113495bSYour Name 			if (qdf_unlikely(vdev->mesh_vdev)) {
918*5113495bSYour Name 				if (dp_rx_filter_mesh_packets(vdev, nbuf,
919*5113495bSYour Name 							      rx_tlv_hdr)
920*5113495bSYour Name 						== QDF_STATUS_SUCCESS) {
921*5113495bSYour Name 					dp_rx_info("%pK: mesh pkt filtered",
922*5113495bSYour Name 						   soc);
923*5113495bSYour Name 					tid_stats->fail_cnt[MESH_FILTER_DROP]++;
924*5113495bSYour Name 					DP_STATS_INC(vdev->pdev,
925*5113495bSYour Name 						     dropped.mesh_filter, 1);
926*5113495bSYour Name 
927*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
928*5113495bSYour Name 					nbuf = next;
929*5113495bSYour Name 					continue;
930*5113495bSYour Name 				}
931*5113495bSYour Name 				dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
932*5113495bSYour Name 						      txrx_peer);
933*5113495bSYour Name 			}
934*5113495bSYour Name 		}
935*5113495bSYour Name 
936*5113495bSYour Name 		if (qdf_likely(vdev->rx_decap_type ==
937*5113495bSYour Name 			       htt_cmn_pkt_type_ethernet) &&
938*5113495bSYour Name 		    qdf_likely(!vdev->mesh_vdev)) {
939*5113495bSYour Name 			dp_rx_wds_learn(soc, vdev,
940*5113495bSYour Name 					rx_tlv_hdr,
941*5113495bSYour Name 					txrx_peer,
942*5113495bSYour Name 					nbuf);
943*5113495bSYour Name 		}
944*5113495bSYour Name 
945*5113495bSYour Name 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
946*5113495bSYour Name 					reo_ring_num, tid_stats, link_id);
947*5113495bSYour Name 
948*5113495bSYour Name 		if (qdf_likely(vdev->rx_decap_type ==
949*5113495bSYour Name 			       htt_cmn_pkt_type_ethernet) &&
950*5113495bSYour Name 		    qdf_likely(!vdev->mesh_vdev)) {
951*5113495bSYour Name 			/* Intrabss-fwd */
952*5113495bSYour Name 			if (dp_rx_check_ap_bridge(vdev))
953*5113495bSYour Name 				if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
954*5113495bSYour Name 							  rx_tlv_hdr,
955*5113495bSYour Name 							  nbuf,
956*5113495bSYour Name 							  link_id)) {
957*5113495bSYour Name 					nbuf = next;
958*5113495bSYour Name 					tid_stats->intrabss_cnt++;
959*5113495bSYour Name 					continue; /* Get next desc */
960*5113495bSYour Name 				}
961*5113495bSYour Name 		}
962*5113495bSYour Name 
963*5113495bSYour Name 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
964*5113495bSYour Name 
965*5113495bSYour Name 		dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
966*5113495bSYour Name 							 nbuf);
967*5113495bSYour Name 
968*5113495bSYour Name 		dp_rx_update_stats(soc, nbuf);
969*5113495bSYour Name 
970*5113495bSYour Name 		dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
971*5113495bSYour Name 				     current_time, nbuf);
972*5113495bSYour Name 
973*5113495bSYour Name 		DP_RX_LIST_APPEND(deliver_list_head,
974*5113495bSYour Name 				  deliver_list_tail,
975*5113495bSYour Name 				  nbuf);
976*5113495bSYour Name 
977*5113495bSYour Name 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
978*5113495bSYour Name 					  QDF_NBUF_CB_RX_PKT_LEN(nbuf),
979*5113495bSYour Name 					  enh_flag);
980*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
981*5113495bSYour Name 					      rx.rx_success, 1,
982*5113495bSYour Name 					      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
983*5113495bSYour Name 					      link_id);
984*5113495bSYour Name 
985*5113495bSYour Name 		if (qdf_unlikely(txrx_peer->in_twt))
986*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
987*5113495bSYour Name 						      rx.to_stack_twt, 1,
988*5113495bSYour Name 						      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
989*5113495bSYour Name 						      link_id);
990*5113495bSYour Name 
991*5113495bSYour Name 		tid_stats->delivered_to_stack++;
992*5113495bSYour Name 		nbuf = next;
993*5113495bSYour Name 	}
994*5113495bSYour Name 
995*5113495bSYour Name 	DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
996*5113495bSYour Name 			       pkt_capture_offload,
997*5113495bSYour Name 			       deliver_list_head,
998*5113495bSYour Name 			       deliver_list_tail);
999*5113495bSYour Name 
1000*5113495bSYour Name 	if (qdf_likely(txrx_peer))
1001*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1002*5113495bSYour Name 
1003*5113495bSYour Name 	/*
1004*5113495bSYour Name 	 * If we are processing in near-full condition, there are 3 scenario
1005*5113495bSYour Name 	 * 1) Ring entries has reached critical state
1006*5113495bSYour Name 	 * 2) Ring entries are still near high threshold
1007*5113495bSYour Name 	 * 3) Ring entries are below the safe level
1008*5113495bSYour Name 	 *
1009*5113495bSYour Name 	 * One more loop will move the state to normal processing and yield
1010*5113495bSYour Name 	 */
1011*5113495bSYour Name 	if (ring_near_full && quota)
1012*5113495bSYour Name 		goto more_data;
1013*5113495bSYour Name 
1014*5113495bSYour Name 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
1015*5113495bSYour Name 		if (quota) {
1016*5113495bSYour Name 			num_pending =
1017*5113495bSYour Name 				dp_rx_srng_get_num_pending(hal_soc,
1018*5113495bSYour Name 							   hal_ring_hdl,
1019*5113495bSYour Name 							   num_entries,
1020*5113495bSYour Name 							   &near_full);
1021*5113495bSYour Name 			if (num_pending) {
1022*5113495bSYour Name 				DP_STATS_INC(soc, rx.hp_oos2, 1);
1023*5113495bSYour Name 
1024*5113495bSYour Name 				if (!hif_exec_should_yield(scn, intr_id))
1025*5113495bSYour Name 					goto more_data;
1026*5113495bSYour Name 
1027*5113495bSYour Name 				if (qdf_unlikely(near_full)) {
1028*5113495bSYour Name 					DP_STATS_INC(soc, rx.near_full, 1);
1029*5113495bSYour Name 					goto more_data;
1030*5113495bSYour Name 				}
1031*5113495bSYour Name 			}
1032*5113495bSYour Name 		}
1033*5113495bSYour Name 
1034*5113495bSYour Name 		if (vdev && vdev->osif_fisa_flush)
1035*5113495bSYour Name 			vdev->osif_fisa_flush(soc, reo_ring_num);
1036*5113495bSYour Name 
1037*5113495bSYour Name 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
1038*5113495bSYour Name 			vdev->osif_gro_flush(vdev->osif_vdev,
1039*5113495bSYour Name 					     reo_ring_num);
1040*5113495bSYour Name 		}
1041*5113495bSYour Name 	}
1042*5113495bSYour Name 
1043*5113495bSYour Name 	/* Update histogram statistics by looping through pdev's */
1044*5113495bSYour Name 	DP_RX_HIST_STATS_PER_PDEV();
1045*5113495bSYour Name 
1046*5113495bSYour Name 	return rx_bufs_used; /* Assume no scale factor for now */
1047*5113495bSYour Name }
1048*5113495bSYour Name 
1049*5113495bSYour Name #ifdef RX_DESC_MULTI_PAGE_ALLOC
1050*5113495bSYour Name /**
1051*5113495bSYour Name  * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
1052*5113495bSYour Name  * @soc: Handle to DP Soc structure
1053*5113495bSYour Name  * @rx_desc_pool: Rx descriptor pool handler
1054*5113495bSYour Name  * @pool_id: Rx descriptor pool ID
1055*5113495bSYour Name  *
1056*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
1057*5113495bSYour Name  */
1058*5113495bSYour Name static QDF_STATUS
dp_rx_desc_pool_init_be_cc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1059*5113495bSYour Name dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
1060*5113495bSYour Name 			   struct rx_desc_pool *rx_desc_pool,
1061*5113495bSYour Name 			   uint32_t pool_id)
1062*5113495bSYour Name {
1063*5113495bSYour Name 	struct dp_hw_cookie_conversion_t *cc_ctx;
1064*5113495bSYour Name 	struct dp_soc_be *be_soc;
1065*5113495bSYour Name 	union dp_rx_desc_list_elem_t *rx_desc_elem;
1066*5113495bSYour Name 	struct dp_spt_page_desc *page_desc;
1067*5113495bSYour Name 	uint32_t ppt_idx = 0;
1068*5113495bSYour Name 	uint32_t avail_entry_index = 0;
1069*5113495bSYour Name 
1070*5113495bSYour Name 	if (!rx_desc_pool->pool_size) {
1071*5113495bSYour Name 		dp_err("desc_num 0 !!");
1072*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1073*5113495bSYour Name 	}
1074*5113495bSYour Name 
1075*5113495bSYour Name 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1076*5113495bSYour Name 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1077*5113495bSYour Name 
1078*5113495bSYour Name 	page_desc = &cc_ctx->page_desc_base[0];
1079*5113495bSYour Name 	rx_desc_elem = rx_desc_pool->freelist;
1080*5113495bSYour Name 	while (rx_desc_elem) {
1081*5113495bSYour Name 		if (avail_entry_index == 0) {
1082*5113495bSYour Name 			if (ppt_idx >= cc_ctx->total_page_num) {
1083*5113495bSYour Name 				dp_alert("insufficient secondary page tables");
1084*5113495bSYour Name 				qdf_assert_always(0);
1085*5113495bSYour Name 			}
1086*5113495bSYour Name 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1087*5113495bSYour Name 		}
1088*5113495bSYour Name 
1089*5113495bSYour Name 		/* put each RX Desc VA to SPT pages and
1090*5113495bSYour Name 		 * get corresponding ID
1091*5113495bSYour Name 		 */
1092*5113495bSYour Name 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1093*5113495bSYour Name 					 avail_entry_index,
1094*5113495bSYour Name 					 &rx_desc_elem->rx_desc);
1095*5113495bSYour Name 		rx_desc_elem->rx_desc.cookie =
1096*5113495bSYour Name 			dp_cc_desc_id_generate(page_desc->ppt_index,
1097*5113495bSYour Name 					       avail_entry_index);
1098*5113495bSYour Name 		rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc);
1099*5113495bSYour Name 		rx_desc_elem->rx_desc.pool_id = pool_id;
1100*5113495bSYour Name 		rx_desc_elem->rx_desc.in_use = 0;
1101*5113495bSYour Name 		rx_desc_elem = rx_desc_elem->next;
1102*5113495bSYour Name 
1103*5113495bSYour Name 		avail_entry_index = (avail_entry_index + 1) &
1104*5113495bSYour Name 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1105*5113495bSYour Name 	}
1106*5113495bSYour Name 
1107*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1108*5113495bSYour Name }
1109*5113495bSYour Name #else
1110*5113495bSYour Name static QDF_STATUS
dp_rx_desc_pool_init_be_cc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1111*5113495bSYour Name dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
1112*5113495bSYour Name 			   struct rx_desc_pool *rx_desc_pool,
1113*5113495bSYour Name 			   uint32_t pool_id)
1114*5113495bSYour Name {
1115*5113495bSYour Name 	struct dp_hw_cookie_conversion_t *cc_ctx;
1116*5113495bSYour Name 	struct dp_soc_be *be_soc;
1117*5113495bSYour Name 	struct dp_spt_page_desc *page_desc;
1118*5113495bSYour Name 	uint32_t ppt_idx = 0;
1119*5113495bSYour Name 	uint32_t avail_entry_index = 0;
1120*5113495bSYour Name 	int i = 0;
1121*5113495bSYour Name 
1122*5113495bSYour Name 	if (!rx_desc_pool->pool_size) {
1123*5113495bSYour Name 		dp_err("desc_num 0 !!");
1124*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1125*5113495bSYour Name 	}
1126*5113495bSYour Name 
1127*5113495bSYour Name 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1128*5113495bSYour Name 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1129*5113495bSYour Name 
1130*5113495bSYour Name 	page_desc = &cc_ctx->page_desc_base[0];
1131*5113495bSYour Name 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
1132*5113495bSYour Name 		if (i == rx_desc_pool->pool_size - 1)
1133*5113495bSYour Name 			rx_desc_pool->array[i].next = NULL;
1134*5113495bSYour Name 		else
1135*5113495bSYour Name 			rx_desc_pool->array[i].next =
1136*5113495bSYour Name 				&rx_desc_pool->array[i + 1];
1137*5113495bSYour Name 
1138*5113495bSYour Name 		if (avail_entry_index == 0) {
1139*5113495bSYour Name 			if (ppt_idx >= cc_ctx->total_page_num) {
1140*5113495bSYour Name 				dp_alert("insufficient secondary page tables");
1141*5113495bSYour Name 				qdf_assert_always(0);
1142*5113495bSYour Name 			}
1143*5113495bSYour Name 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1144*5113495bSYour Name 		}
1145*5113495bSYour Name 
1146*5113495bSYour Name 		/* put each RX Desc VA to SPT pages and
1147*5113495bSYour Name 		 * get corresponding ID
1148*5113495bSYour Name 		 */
1149*5113495bSYour Name 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1150*5113495bSYour Name 					 avail_entry_index,
1151*5113495bSYour Name 					 &rx_desc_pool->array[i].rx_desc);
1152*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.cookie =
1153*5113495bSYour Name 			dp_cc_desc_id_generate(page_desc->ppt_index,
1154*5113495bSYour Name 					       avail_entry_index);
1155*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
1156*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.in_use = 0;
1157*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.chip_id =
1158*5113495bSYour Name 					dp_mlo_get_chip_id(soc);
1159*5113495bSYour Name 
1160*5113495bSYour Name 		avail_entry_index = (avail_entry_index + 1) &
1161*5113495bSYour Name 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1162*5113495bSYour Name 	}
1163*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1164*5113495bSYour Name }
1165*5113495bSYour Name #endif
1166*5113495bSYour Name 
1167*5113495bSYour Name static void
dp_rx_desc_pool_deinit_be_cc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1168*5113495bSYour Name dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
1169*5113495bSYour Name 			     struct rx_desc_pool *rx_desc_pool,
1170*5113495bSYour Name 			     uint32_t pool_id)
1171*5113495bSYour Name {
1172*5113495bSYour Name 	struct dp_spt_page_desc *page_desc;
1173*5113495bSYour Name 	struct dp_soc_be *be_soc;
1174*5113495bSYour Name 	int i = 0;
1175*5113495bSYour Name 	struct dp_hw_cookie_conversion_t *cc_ctx;
1176*5113495bSYour Name 
1177*5113495bSYour Name 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1178*5113495bSYour Name 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1179*5113495bSYour Name 
1180*5113495bSYour Name 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1181*5113495bSYour Name 		page_desc = &cc_ctx->page_desc_base[i];
1182*5113495bSYour Name 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1183*5113495bSYour Name 	}
1184*5113495bSYour Name }
1185*5113495bSYour Name 
dp_rx_desc_pool_init_be(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1186*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
1187*5113495bSYour Name 				   struct rx_desc_pool *rx_desc_pool,
1188*5113495bSYour Name 				   uint32_t pool_id)
1189*5113495bSYour Name {
1190*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1191*5113495bSYour Name 
1192*5113495bSYour Name 	/* Only regular RX buffer desc pool use HW cookie conversion */
1193*5113495bSYour Name 	if (rx_desc_pool->desc_type == QDF_DP_RX_DESC_BUF_TYPE) {
1194*5113495bSYour Name 		dp_info("rx_desc_buf pool init");
1195*5113495bSYour Name 		status = dp_rx_desc_pool_init_be_cc(soc,
1196*5113495bSYour Name 						    rx_desc_pool,
1197*5113495bSYour Name 						    pool_id);
1198*5113495bSYour Name 	} else {
1199*5113495bSYour Name 		dp_info("non_rx_desc_buf_pool init");
1200*5113495bSYour Name 		status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
1201*5113495bSYour Name 						      pool_id);
1202*5113495bSYour Name 	}
1203*5113495bSYour Name 
1204*5113495bSYour Name 	return status;
1205*5113495bSYour Name }
1206*5113495bSYour Name 
dp_rx_desc_pool_deinit_be(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1207*5113495bSYour Name void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
1208*5113495bSYour Name 			       struct rx_desc_pool *rx_desc_pool,
1209*5113495bSYour Name 			       uint32_t pool_id)
1210*5113495bSYour Name {
1211*5113495bSYour Name 	if (rx_desc_pool->desc_type == QDF_DP_RX_DESC_BUF_TYPE)
1212*5113495bSYour Name 		dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
1213*5113495bSYour Name }
1214*5113495bSYour Name 
1215*5113495bSYour Name #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
1216*5113495bSYour Name #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)1217*5113495bSYour Name QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1218*5113495bSYour Name 					       void *ring_desc,
1219*5113495bSYour Name 					       struct dp_rx_desc **r_rx_desc)
1220*5113495bSYour Name {
1221*5113495bSYour Name 	if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
1222*5113495bSYour Name 		/* HW cookie conversion done */
1223*5113495bSYour Name 		*r_rx_desc = (struct dp_rx_desc *)
1224*5113495bSYour Name 				hal_rx_wbm_get_desc_va(ring_desc);
1225*5113495bSYour Name 	} else {
1226*5113495bSYour Name 		/* SW do cookie conversion */
1227*5113495bSYour Name 		uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1228*5113495bSYour Name 
1229*5113495bSYour Name 		*r_rx_desc = (struct dp_rx_desc *)
1230*5113495bSYour Name 				dp_cc_desc_find(soc, cookie);
1231*5113495bSYour Name 	}
1232*5113495bSYour Name 
1233*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1234*5113495bSYour Name }
1235*5113495bSYour Name #else
dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)1236*5113495bSYour Name QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1237*5113495bSYour Name 					       void *ring_desc,
1238*5113495bSYour Name 					       struct dp_rx_desc **r_rx_desc)
1239*5113495bSYour Name {
1240*5113495bSYour Name 	 *r_rx_desc = (struct dp_rx_desc *)
1241*5113495bSYour Name 			hal_rx_wbm_get_desc_va(ring_desc);
1242*5113495bSYour Name 
1243*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1244*5113495bSYour Name }
1245*5113495bSYour Name #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
dp_rx_desc_ppeds_cookie_2_va(struct dp_soc * soc,unsigned long cookie)1246*5113495bSYour Name struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
1247*5113495bSYour Name 						unsigned long cookie)
1248*5113495bSYour Name {
1249*5113495bSYour Name 	return (struct dp_rx_desc *)cookie;
1250*5113495bSYour Name }
1251*5113495bSYour Name 
1252*5113495bSYour Name #else
dp_rx_desc_ppeds_cookie_2_va(struct dp_soc * soc,unsigned long cookie)1253*5113495bSYour Name struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
1254*5113495bSYour Name 						unsigned long cookie)
1255*5113495bSYour Name {
1256*5113495bSYour Name 	if (!cookie)
1257*5113495bSYour Name 		return NULL;
1258*5113495bSYour Name 
1259*5113495bSYour Name 	return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
1260*5113495bSYour Name }
1261*5113495bSYour Name 
dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)1262*5113495bSYour Name QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1263*5113495bSYour Name 					       void *ring_desc,
1264*5113495bSYour Name 					       struct dp_rx_desc **r_rx_desc)
1265*5113495bSYour Name {
1266*5113495bSYour Name 	/* SW do cookie conversion */
1267*5113495bSYour Name 	uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1268*5113495bSYour Name 
1269*5113495bSYour Name 	*r_rx_desc = (struct dp_rx_desc *)
1270*5113495bSYour Name 			dp_cc_desc_find(soc, cookie);
1271*5113495bSYour Name 
1272*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1273*5113495bSYour Name }
1274*5113495bSYour Name #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
1275*5113495bSYour Name 
dp_rx_desc_cookie_2_va_be(struct dp_soc * soc,uint32_t cookie)1276*5113495bSYour Name struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
1277*5113495bSYour Name 					     uint32_t cookie)
1278*5113495bSYour Name {
1279*5113495bSYour Name 	return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
1280*5113495bSYour Name }
1281*5113495bSYour Name 
1282*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO)
1283*5113495bSYour Name #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
1284*5113495bSYour Name #define DP_RANDOM_MAC_ID_BIT_MASK	0xC0
1285*5113495bSYour Name #define DP_RANDOM_MAC_OFFSET	1
1286*5113495bSYour Name #define DP_MAC_LOCAL_ADMBIT_MASK	0x2
1287*5113495bSYour Name #define DP_MAC_LOCAL_ADMBIT_OFFSET	0
dp_rx_dummy_src_mac(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1288*5113495bSYour Name static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev,
1289*5113495bSYour Name 				       qdf_nbuf_t nbuf)
1290*5113495bSYour Name {
1291*5113495bSYour Name 	qdf_ether_header_t *eh =
1292*5113495bSYour Name 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1293*5113495bSYour Name 
1294*5113495bSYour Name 	eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] =
1295*5113495bSYour Name 				eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] |
1296*5113495bSYour Name 				DP_MAC_LOCAL_ADMBIT_MASK;
1297*5113495bSYour Name }
1298*5113495bSYour Name 
1299*5113495bSYour Name #ifdef QCA_SUPPORT_WDS_EXTENDED
dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer * peer)1300*5113495bSYour Name static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1301*5113495bSYour Name {
1302*5113495bSYour Name 	return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
1303*5113495bSYour Name }
1304*5113495bSYour Name #else
dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer * peer)1305*5113495bSYour Name static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1306*5113495bSYour Name {
1307*5113495bSYour Name 	return false;
1308*5113495bSYour Name }
1309*5113495bSYour Name #endif
1310*5113495bSYour Name 
1311*5113495bSYour Name #ifdef EXT_HYBRID_MLO_MODE
1312*5113495bSYour Name static inline
dp_rx_check_ext_hybrid_mode(struct dp_soc * soc,struct dp_vdev * vdev)1313*5113495bSYour Name bool dp_rx_check_ext_hybrid_mode(struct dp_soc *soc, struct dp_vdev *vdev)
1314*5113495bSYour Name {
1315*5113495bSYour Name 	return ((DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap) &&
1316*5113495bSYour Name 		(wlan_op_mode_ap == vdev->opmode));
1317*5113495bSYour Name }
1318*5113495bSYour Name #else
1319*5113495bSYour Name static inline
dp_rx_check_ext_hybrid_mode(struct dp_soc * soc,struct dp_vdev * vdev)1320*5113495bSYour Name bool dp_rx_check_ext_hybrid_mode(struct dp_soc *soc, struct dp_vdev *vdev)
1321*5113495bSYour Name {
1322*5113495bSYour Name 	return false;
1323*5113495bSYour Name }
1324*5113495bSYour Name #endif
1325*5113495bSYour Name 
dp_rx_mlo_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1326*5113495bSYour Name bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1327*5113495bSYour Name 			    struct dp_vdev *vdev,
1328*5113495bSYour Name 			    struct dp_txrx_peer *peer,
1329*5113495bSYour Name 			    qdf_nbuf_t nbuf,
1330*5113495bSYour Name 			    uint8_t link_id)
1331*5113495bSYour Name {
1332*5113495bSYour Name 	qdf_nbuf_t nbuf_copy;
1333*5113495bSYour Name 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1334*5113495bSYour Name 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1335*5113495bSYour Name 	struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats.
1336*5113495bSYour Name 					tid_stats.tid_rx_wbm_stats[0][tid];
1337*5113495bSYour Name 
1338*5113495bSYour Name 	if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) ||
1339*5113495bSYour Name 	      qdf_nbuf_is_ipv6_igmp_pkt(nbuf)))
1340*5113495bSYour Name 		return false;
1341*5113495bSYour Name 
1342*5113495bSYour Name 	if (qdf_unlikely(vdev->multipass_en)) {
1343*5113495bSYour Name 		if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
1344*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC(peer,
1345*5113495bSYour Name 						  rx.multipass_rx_pkt_drop,
1346*5113495bSYour Name 						  1, link_id);
1347*5113495bSYour Name 			return false;
1348*5113495bSYour Name 		}
1349*5113495bSYour Name 	}
1350*5113495bSYour Name 
1351*5113495bSYour Name 	if (!peer->bss_peer) {
1352*5113495bSYour Name 		if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf,
1353*5113495bSYour Name 					    tid_stats, link_id))
1354*5113495bSYour Name 			dp_rx_err("forwarding failed");
1355*5113495bSYour Name 	}
1356*5113495bSYour Name 
1357*5113495bSYour Name 	qdf_nbuf_set_next(nbuf, NULL);
1358*5113495bSYour Name 
1359*5113495bSYour Name 	/* REO sends IGMP to driver only if AP is operating in hybrid
1360*5113495bSYour Name 	 *  mld mode.
1361*5113495bSYour Name 	 */
1362*5113495bSYour Name 
1363*5113495bSYour Name 	if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) {
1364*5113495bSYour Name 		/* send the IGMP to the netdev corresponding to the interface
1365*5113495bSYour Name 		 * its received on
1366*5113495bSYour Name 		 */
1367*5113495bSYour Name 		goto send_pkt;
1368*5113495bSYour Name 	}
1369*5113495bSYour Name 
1370*5113495bSYour Name 	if (dp_rx_check_ext_hybrid_mode(soc, vdev)) {
1371*5113495bSYour Name 		/* send the IGMP to the netdev corresponding to the interface
1372*5113495bSYour Name 		 * its received on
1373*5113495bSYour Name 		 */
1374*5113495bSYour Name 		goto send_pkt;
1375*5113495bSYour Name 	}
1376*5113495bSYour Name 
1377*5113495bSYour Name 	/*
1378*5113495bSYour Name 	 * In the case of ME5/ME6, Backhaul WDS for a mld peer, NAWDS,
1379*5113495bSYour Name 	 * legacy non-mlo AP vdev & non-AP vdev(which is very unlikely),
1380*5113495bSYour Name 	 * send the igmp pkt on the same link where it received, as these
1381*5113495bSYour Name 	 *  features will use peer based tcl metadata.
1382*5113495bSYour Name 	 */
1383*5113495bSYour Name 	if (vdev->mcast_enhancement_en ||
1384*5113495bSYour Name 	    peer->is_mld_peer ||
1385*5113495bSYour Name 	    peer->nawds_enabled ||
1386*5113495bSYour Name 	    !vdev->mlo_vdev ||
1387*5113495bSYour Name 	    qdf_unlikely(wlan_op_mode_ap != vdev->opmode)) {
1388*5113495bSYour Name 		/* send the IGMP to the netdev corresponding to the interface
1389*5113495bSYour Name 		 * its received on
1390*5113495bSYour Name 		 */
1391*5113495bSYour Name 		goto send_pkt;
1392*5113495bSYour Name 	}
1393*5113495bSYour Name 
1394*5113495bSYour Name 	/* We are here, it means a legacy non-wds sta is connected
1395*5113495bSYour Name 	 * to a hybrid mld ap, So send a clone of the IGPMP packet
1396*5113495bSYour Name 	 * on the interface where it was received.
1397*5113495bSYour Name 	 */
1398*5113495bSYour Name 	nbuf_copy = qdf_nbuf_copy(nbuf);
1399*5113495bSYour Name 	if (qdf_likely(nbuf_copy))
1400*5113495bSYour Name 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, NULL);
1401*5113495bSYour Name 
1402*5113495bSYour Name 	dp_rx_dummy_src_mac(vdev, nbuf);
1403*5113495bSYour Name 	/* Set the ml peer valid bit in skb peer metadata, so that osif
1404*5113495bSYour Name 	 * can deliver the SA mangled IGMP packet to mld netdev.
1405*5113495bSYour Name 	 */
1406*5113495bSYour Name 	QDF_NBUF_CB_RX_PEER_ID(nbuf) |= CDP_RX_ML_PEER_VALID_MASK;
1407*5113495bSYour Name 	/* Deliver the original IGMP with dummy src on the mld netdev */
1408*5113495bSYour Name send_pkt:
1409*5113495bSYour Name 	dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc,
1410*5113495bSYour Name 			       &be_vdev->vdev,
1411*5113495bSYour Name 			       peer,
1412*5113495bSYour Name 			       nbuf,
1413*5113495bSYour Name 			       NULL);
1414*5113495bSYour Name 	return true;
1415*5113495bSYour Name }
1416*5113495bSYour Name #else
dp_rx_mlo_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1417*5113495bSYour Name bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1418*5113495bSYour Name 			    struct dp_vdev *vdev,
1419*5113495bSYour Name 			    struct dp_txrx_peer *peer,
1420*5113495bSYour Name 			    qdf_nbuf_t nbuf,
1421*5113495bSYour Name 			    uint8_t link_id)
1422*5113495bSYour Name {
1423*5113495bSYour Name 	return false;
1424*5113495bSYour Name }
1425*5113495bSYour Name #endif
1426*5113495bSYour Name #endif
1427*5113495bSYour Name 
1428*5113495bSYour Name #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
dp_rx_nf_process(struct dp_intr * int_ctx,hal_ring_handle_t hal_ring_hdl,uint8_t reo_ring_num,uint32_t quota)1429*5113495bSYour Name uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
1430*5113495bSYour Name 			  hal_ring_handle_t hal_ring_hdl,
1431*5113495bSYour Name 			  uint8_t reo_ring_num,
1432*5113495bSYour Name 			  uint32_t quota)
1433*5113495bSYour Name {
1434*5113495bSYour Name 	struct dp_soc *soc = int_ctx->soc;
1435*5113495bSYour Name 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
1436*5113495bSYour Name 	uint32_t work_done = 0;
1437*5113495bSYour Name 
1438*5113495bSYour Name 	if (dp_srng_get_near_full_level(soc, rx_ring) <
1439*5113495bSYour Name 			DP_SRNG_THRESH_NEAR_FULL)
1440*5113495bSYour Name 		return 0;
1441*5113495bSYour Name 
1442*5113495bSYour Name 	qdf_atomic_set(&rx_ring->near_full, 1);
1443*5113495bSYour Name 	work_done++;
1444*5113495bSYour Name 
1445*5113495bSYour Name 	return work_done;
1446*5113495bSYour Name }
1447*5113495bSYour Name #endif
1448*5113495bSYour Name 
1449*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
1450*5113495bSYour Name #ifdef WLAN_FEATURE_11BE_MLO
1451*5113495bSYour Name /**
1452*5113495bSYour Name  * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed
1453*5113495bSYour Name  * @ta_peer: transmitter peer handle
1454*5113495bSYour Name  * @da_peer: destination peer handle
1455*5113495bSYour Name  *
1456*5113495bSYour Name  * Return: true - MLO forwarding case, false: not
1457*5113495bSYour Name  */
1458*5113495bSYour Name static inline bool
dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer * ta_peer,struct dp_txrx_peer * da_peer)1459*5113495bSYour Name dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1460*5113495bSYour Name 			     struct dp_txrx_peer *da_peer)
1461*5113495bSYour Name {
1462*5113495bSYour Name 	/* TA peer and DA peer's vdev should be partner MLO vdevs */
1463*5113495bSYour Name 	if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr,
1464*5113495bSYour Name 				      &da_peer->vdev->mld_mac_addr))
1465*5113495bSYour Name 		return false;
1466*5113495bSYour Name 
1467*5113495bSYour Name 	return true;
1468*5113495bSYour Name }
1469*5113495bSYour Name #else
1470*5113495bSYour Name static inline bool
dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer * ta_peer,struct dp_txrx_peer * da_peer)1471*5113495bSYour Name dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1472*5113495bSYour Name 			     struct dp_txrx_peer *da_peer)
1473*5113495bSYour Name {
1474*5113495bSYour Name 	return false;
1475*5113495bSYour Name }
1476*5113495bSYour Name #endif
1477*5113495bSYour Name 
1478*5113495bSYour Name #ifdef INTRA_BSS_FWD_OFFLOAD
1479*5113495bSYour Name /**
1480*5113495bSYour Name  * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed
1481*5113495bSYour Name  *				     for unicast frame
1482*5113495bSYour Name  * @nbuf: RX packet buffer
1483*5113495bSYour Name  * @ta_peer: transmitter DP peer handle
1484*5113495bSYour Name  * @rx_tlv_hdr: Rx TLV header
1485*5113495bSYour Name  * @msdu_metadata: MSDU meta data info
1486*5113495bSYour Name  * @params: params to be filled in
1487*5113495bSYour Name  *
1488*5113495bSYour Name  * Return: true - intrabss allowed
1489*5113495bSYour Name  *	   false - not allow
1490*5113495bSYour Name  */
1491*5113495bSYour Name static bool
dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,struct dp_txrx_peer * ta_peer,uint8_t * rx_tlv_hdr,struct hal_rx_msdu_metadata * msdu_metadata,struct dp_be_intrabss_params * params)1492*5113495bSYour Name dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1493*5113495bSYour Name 			      struct dp_txrx_peer *ta_peer,
1494*5113495bSYour Name 			      uint8_t *rx_tlv_hdr,
1495*5113495bSYour Name 			      struct hal_rx_msdu_metadata *msdu_metadata,
1496*5113495bSYour Name 			      struct dp_be_intrabss_params *params)
1497*5113495bSYour Name {
1498*5113495bSYour Name 	uint8_t dest_chip_id, dest_chip_pmac_id;
1499*5113495bSYour Name 	struct dp_vdev_be *be_vdev =
1500*5113495bSYour Name 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1501*5113495bSYour Name 	struct dp_soc_be *be_soc =
1502*5113495bSYour Name 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1503*5113495bSYour Name 	uint16_t da_peer_id;
1504*5113495bSYour Name 	struct dp_peer *da_peer = NULL;
1505*5113495bSYour Name 
1506*5113495bSYour Name 	if (!qdf_nbuf_is_intra_bss(nbuf))
1507*5113495bSYour Name 		return false;
1508*5113495bSYour Name 
1509*5113495bSYour Name 	hal_rx_tlv_get_dest_chip_pmac_id(rx_tlv_hdr,
1510*5113495bSYour Name 					 &dest_chip_id,
1511*5113495bSYour Name 					 &dest_chip_pmac_id);
1512*5113495bSYour Name 
1513*5113495bSYour Name 	if (dp_assert_always_internal_stat(
1514*5113495bSYour Name 				(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)),
1515*5113495bSYour Name 				&be_soc->soc, rx.err.intra_bss_bad_chipid))
1516*5113495bSYour Name 		return false;
1517*5113495bSYour Name 
1518*5113495bSYour Name 	params->dest_soc =
1519*5113495bSYour Name 		dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1520*5113495bSYour Name 					      dest_chip_id);
1521*5113495bSYour Name 	if (!params->dest_soc)
1522*5113495bSYour Name 		return false;
1523*5113495bSYour Name 
1524*5113495bSYour Name 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1525*5113495bSYour Name 
1526*5113495bSYour Name 	da_peer = dp_peer_get_tgt_peer_by_id(params->dest_soc, da_peer_id,
1527*5113495bSYour Name 					     DP_MOD_ID_RX);
1528*5113495bSYour Name 	if (da_peer) {
1529*5113495bSYour Name 		if (da_peer->bss_peer || (da_peer->txrx_peer == ta_peer)) {
1530*5113495bSYour Name 			dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
1531*5113495bSYour Name 			return false;
1532*5113495bSYour Name 		}
1533*5113495bSYour Name 		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
1534*5113495bSYour Name 	}
1535*5113495bSYour Name 
1536*5113495bSYour Name 	if (!be_vdev->mlo_dev_ctxt) {
1537*5113495bSYour Name 		params->tx_vdev_id = ta_peer->vdev->vdev_id;
1538*5113495bSYour Name 		return true;
1539*5113495bSYour Name 	}
1540*5113495bSYour Name 
1541*5113495bSYour Name 	if (dest_chip_id == be_soc->mlo_chip_id) {
1542*5113495bSYour Name 		if (dest_chip_pmac_id == ta_peer->vdev->pdev->pdev_id)
1543*5113495bSYour Name 			params->tx_vdev_id = ta_peer->vdev->vdev_id;
1544*5113495bSYour Name 		else
1545*5113495bSYour Name 			params->tx_vdev_id =
1546*5113495bSYour Name 				be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id]
1547*5113495bSYour Name 							  [dest_chip_pmac_id];
1548*5113495bSYour Name 		return true;
1549*5113495bSYour Name 	}
1550*5113495bSYour Name 
1551*5113495bSYour Name 	params->tx_vdev_id =
1552*5113495bSYour Name 		be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id]
1553*5113495bSYour Name 						[dest_chip_pmac_id];
1554*5113495bSYour Name 
1555*5113495bSYour Name 	return true;
1556*5113495bSYour Name }
1557*5113495bSYour Name #else
1558*5113495bSYour Name #ifdef WLAN_MLO_MULTI_CHIP
1559*5113495bSYour Name static bool
dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,struct dp_txrx_peer * ta_peer,uint8_t * rx_tlv_hdr,struct hal_rx_msdu_metadata * msdu_metadata,struct dp_be_intrabss_params * params)1560*5113495bSYour Name dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1561*5113495bSYour Name 			      struct dp_txrx_peer *ta_peer,
1562*5113495bSYour Name 			      uint8_t *rx_tlv_hdr,
1563*5113495bSYour Name 			      struct hal_rx_msdu_metadata *msdu_metadata,
1564*5113495bSYour Name 			      struct dp_be_intrabss_params *params)
1565*5113495bSYour Name {
1566*5113495bSYour Name 	uint16_t da_peer_id;
1567*5113495bSYour Name 	struct dp_txrx_peer *da_peer;
1568*5113495bSYour Name 	bool ret = false;
1569*5113495bSYour Name 	uint8_t dest_chip_id;
1570*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1571*5113495bSYour Name 	struct dp_vdev_be *be_vdev =
1572*5113495bSYour Name 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1573*5113495bSYour Name 	struct dp_soc_be *be_soc =
1574*5113495bSYour Name 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1575*5113495bSYour Name 
1576*5113495bSYour Name 	if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
1577*5113495bSYour Name 		return false;
1578*5113495bSYour Name 
1579*5113495bSYour Name 	dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
1580*5113495bSYour Name 	if (dp_assert_always_internal_stat(
1581*5113495bSYour Name 				(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)),
1582*5113495bSYour Name 				&be_soc->soc, rx.err.intra_bss_bad_chipid))
1583*5113495bSYour Name 		return false;
1584*5113495bSYour Name 
1585*5113495bSYour Name 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1586*5113495bSYour Name 
1587*5113495bSYour Name 	/* use dest chip id when TA is MLD peer and DA is legacy */
1588*5113495bSYour Name 	if (be_soc->mlo_enabled &&
1589*5113495bSYour Name 	    ta_peer->mld_peer &&
1590*5113495bSYour Name 	    !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1591*5113495bSYour Name 		/* validate chip_id, get a ref, and re-assign soc */
1592*5113495bSYour Name 		params->dest_soc =
1593*5113495bSYour Name 			dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1594*5113495bSYour Name 						      dest_chip_id);
1595*5113495bSYour Name 		if (!params->dest_soc)
1596*5113495bSYour Name 			return false;
1597*5113495bSYour Name 
1598*5113495bSYour Name 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1599*5113495bSYour Name 						     da_peer_id,
1600*5113495bSYour Name 						     &txrx_ref_handle,
1601*5113495bSYour Name 						     DP_MOD_ID_RX);
1602*5113495bSYour Name 		if (!da_peer)
1603*5113495bSYour Name 			return false;
1604*5113495bSYour Name 
1605*5113495bSYour Name 	} else {
1606*5113495bSYour Name 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1607*5113495bSYour Name 						     da_peer_id,
1608*5113495bSYour Name 						     &txrx_ref_handle,
1609*5113495bSYour Name 						     DP_MOD_ID_RX);
1610*5113495bSYour Name 		if (!da_peer)
1611*5113495bSYour Name 			return false;
1612*5113495bSYour Name 
1613*5113495bSYour Name 		params->dest_soc = da_peer->vdev->pdev->soc;
1614*5113495bSYour Name 		if (!params->dest_soc)
1615*5113495bSYour Name 			goto rel_da_peer;
1616*5113495bSYour Name 
1617*5113495bSYour Name 	}
1618*5113495bSYour Name 
1619*5113495bSYour Name 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1620*5113495bSYour Name 
1621*5113495bSYour Name 	/* If the source or destination peer in the isolation
1622*5113495bSYour Name 	 * list then dont forward instead push to bridge stack.
1623*5113495bSYour Name 	 */
1624*5113495bSYour Name 	if (dp_get_peer_isolation(ta_peer) ||
1625*5113495bSYour Name 	    dp_get_peer_isolation(da_peer)) {
1626*5113495bSYour Name 		ret = false;
1627*5113495bSYour Name 		goto rel_da_peer;
1628*5113495bSYour Name 	}
1629*5113495bSYour Name 
1630*5113495bSYour Name 	if (da_peer->bss_peer || (da_peer == ta_peer)) {
1631*5113495bSYour Name 		ret = false;
1632*5113495bSYour Name 		goto rel_da_peer;
1633*5113495bSYour Name 	}
1634*5113495bSYour Name 
1635*5113495bSYour Name 	/* Same vdev, support Inra-BSS */
1636*5113495bSYour Name 	if (da_peer->vdev == ta_peer->vdev) {
1637*5113495bSYour Name 		ret = true;
1638*5113495bSYour Name 		goto rel_da_peer;
1639*5113495bSYour Name 	}
1640*5113495bSYour Name 
1641*5113495bSYour Name 	if (!be_vdev->mlo_dev_ctxt)
1642*5113495bSYour Name 		ret = false;
1643*5113495bSYour Name 		goto rel_da_peer;
1644*5113495bSYour Name 	}
1645*5113495bSYour Name 
1646*5113495bSYour Name 	/* MLO specific Intra-BSS check */
1647*5113495bSYour Name 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1648*5113495bSYour Name 		/* use dest chip id for legacy dest peer */
1649*5113495bSYour Name 		if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1650*5113495bSYour Name 			if (!(be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id][0]
1651*5113495bSYour Name 			      == params->tx_vdev_id) &&
1652*5113495bSYour Name 			    !(be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id][1]
1653*5113495bSYour Name 			      == params->tx_vdev_id)) {
1654*5113495bSYour Name 				/*dp_soc_unref_delete(soc);*/
1655*5113495bSYour Name 				goto rel_da_peer;
1656*5113495bSYour Name 			}
1657*5113495bSYour Name 		}
1658*5113495bSYour Name 		ret = true;
1659*5113495bSYour Name 	}
1660*5113495bSYour Name 
1661*5113495bSYour Name rel_da_peer:
1662*5113495bSYour Name 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1663*5113495bSYour Name 	return ret;
1664*5113495bSYour Name }
1665*5113495bSYour Name #else
1666*5113495bSYour Name static bool
1667*5113495bSYour Name dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1668*5113495bSYour Name 			      struct dp_txrx_peer *ta_peer,
1669*5113495bSYour Name 			      uint8_t *rx_tlv_hdr,
1670*5113495bSYour Name 			      struct hal_rx_msdu_metadata *msdu_metadata,
1671*5113495bSYour Name 			      struct dp_be_intrabss_params *params)
1672*5113495bSYour Name {
1673*5113495bSYour Name 	uint16_t da_peer_id;
1674*5113495bSYour Name 	struct dp_txrx_peer *da_peer;
1675*5113495bSYour Name 	bool ret = false;
1676*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1677*5113495bSYour Name 
1678*5113495bSYour Name 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
1679*5113495bSYour Name 		return false;
1680*5113495bSYour Name 
1681*5113495bSYour Name 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
1682*5113495bSYour Name 						params->dest_soc,
1683*5113495bSYour Name 						msdu_metadata->da_idx);
1684*5113495bSYour Name 
1685*5113495bSYour Name 	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
1686*5113495bSYour Name 					     &txrx_ref_handle, DP_MOD_ID_RX);
1687*5113495bSYour Name 	if (!da_peer)
1688*5113495bSYour Name 		return false;
1689*5113495bSYour Name 
1690*5113495bSYour Name 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1691*5113495bSYour Name 	/* If the source or destination peer in the isolation
1692*5113495bSYour Name 	 * list then dont forward instead push to bridge stack.
1693*5113495bSYour Name 	 */
1694*5113495bSYour Name 	if (dp_get_peer_isolation(ta_peer) ||
1695*5113495bSYour Name 	    dp_get_peer_isolation(da_peer))
1696*5113495bSYour Name 		goto rel_da_peer;
1697*5113495bSYour Name 
1698*5113495bSYour Name 	if (da_peer->bss_peer || da_peer == ta_peer)
1699*5113495bSYour Name 		goto rel_da_peer;
1700*5113495bSYour Name 
1701*5113495bSYour Name 	/* Same vdev, support Inra-BSS */
1702*5113495bSYour Name 	if (da_peer->vdev == ta_peer->vdev) {
1703*5113495bSYour Name 		ret = true;
1704*5113495bSYour Name 		goto rel_da_peer;
1705*5113495bSYour Name 	}
1706*5113495bSYour Name 
1707*5113495bSYour Name 	/* MLO specific Intra-BSS check */
1708*5113495bSYour Name 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1709*5113495bSYour Name 		ret = true;
1710*5113495bSYour Name 		goto rel_da_peer;
1711*5113495bSYour Name 	}
1712*5113495bSYour Name 
1713*5113495bSYour Name rel_da_peer:
1714*5113495bSYour Name 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1715*5113495bSYour Name 	return ret;
1716*5113495bSYour Name }
1717*5113495bSYour Name #endif /* WLAN_MLO_MULTI_CHIP */
1718*5113495bSYour Name #endif /* INTRA_BSS_FWD_OFFLOAD */
1719*5113495bSYour Name 
1720*5113495bSYour Name #if defined(WLAN_PKT_CAPTURE_RX_2_0) || defined(CONFIG_WORD_BASED_TLV)
1721*5113495bSYour Name void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
1722*5113495bSYour Name 				  uint32_t *msg_word,
1723*5113495bSYour Name 				  void *rx_filter)
1724*5113495bSYour Name {
1725*5113495bSYour Name 	struct htt_rx_ring_tlv_filter *tlv_filter =
1726*5113495bSYour Name 				(struct htt_rx_ring_tlv_filter *)rx_filter;
1727*5113495bSYour Name 
1728*5113495bSYour Name 	if (!msg_word || !tlv_filter)
1729*5113495bSYour Name 		return;
1730*5113495bSYour Name 
1731*5113495bSYour Name 	/* tlv_filter->enable is set to 1 for monitor rings */
1732*5113495bSYour Name 	if (tlv_filter->enable)
1733*5113495bSYour Name 		return;
1734*5113495bSYour Name 
1735*5113495bSYour Name 	/* if word mask is zero, FW will set the default values */
1736*5113495bSYour Name 	if (!(tlv_filter->rx_mpdu_start_wmask > 0 &&
1737*5113495bSYour Name 	      tlv_filter->rx_msdu_end_wmask > 0)) {
1738*5113495bSYour Name 		return;
1739*5113495bSYour Name 	}
1740*5113495bSYour Name 
1741*5113495bSYour Name 	HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1);
1742*5113495bSYour Name 
1743*5113495bSYour Name 	/* word 14 */
1744*5113495bSYour Name 	msg_word += 3;
1745*5113495bSYour Name 	*msg_word = 0;
1746*5113495bSYour Name 
1747*5113495bSYour Name 	HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET(
1748*5113495bSYour Name 				*msg_word,
1749*5113495bSYour Name 				tlv_filter->rx_mpdu_start_wmask);
1750*5113495bSYour Name 
1751*5113495bSYour Name 	/* word 15 */
1752*5113495bSYour Name 	msg_word++;
1753*5113495bSYour Name 	*msg_word = 0;
1754*5113495bSYour Name 	HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET(
1755*5113495bSYour Name 				*msg_word,
1756*5113495bSYour Name 				tlv_filter->rx_msdu_end_wmask);
1757*5113495bSYour Name }
1758*5113495bSYour Name #else
1759*5113495bSYour Name void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
1760*5113495bSYour Name 				  uint32_t *msg_word,
1761*5113495bSYour Name 				  void *rx_filter)
1762*5113495bSYour Name {
1763*5113495bSYour Name }
1764*5113495bSYour Name #endif
1765*5113495bSYour Name 
1766*5113495bSYour Name #if defined(WLAN_MCAST_MLO) && defined(CONFIG_MLO_SINGLE_DEV)
1767*5113495bSYour Name static inline
1768*5113495bSYour Name bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
1769*5113495bSYour Name 				 qdf_nbuf_t nbuf_copy)
1770*5113495bSYour Name {
1771*5113495bSYour Name 	struct dp_vdev *mcast_primary_vdev = NULL;
1772*5113495bSYour Name 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1773*5113495bSYour Name 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1774*5113495bSYour Name 	struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1775*5113495bSYour Name 
1776*5113495bSYour Name 	tx_exc_metadata.is_mlo_mcast = 1;
1777*5113495bSYour Name 	tx_exc_metadata.tx_encap_type = CDP_INVALID_TX_ENCAP_TYPE;
1778*5113495bSYour Name 	tx_exc_metadata.sec_type = CDP_INVALID_SEC_TYPE;
1779*5113495bSYour Name 	tx_exc_metadata.peer_id = CDP_INVALID_PEER;
1780*5113495bSYour Name 	tx_exc_metadata.tid = CDP_INVALID_TID;
1781*5113495bSYour Name 
1782*5113495bSYour Name 	mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc,
1783*5113495bSYour Name 							   be_vdev,
1784*5113495bSYour Name 							   DP_MOD_ID_RX);
1785*5113495bSYour Name 
1786*5113495bSYour Name 	if (!mcast_primary_vdev)
1787*5113495bSYour Name 		return false;
1788*5113495bSYour Name 
1789*5113495bSYour Name 	nbuf_copy = dp_tx_send_exception((struct cdp_soc_t *)
1790*5113495bSYour Name 					 mcast_primary_vdev->pdev->soc,
1791*5113495bSYour Name 					 mcast_primary_vdev->vdev_id,
1792*5113495bSYour Name 					 nbuf_copy, &tx_exc_metadata);
1793*5113495bSYour Name 
1794*5113495bSYour Name 	if (nbuf_copy)
1795*5113495bSYour Name 		qdf_nbuf_free(nbuf_copy);
1796*5113495bSYour Name 
1797*5113495bSYour Name 	dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
1798*5113495bSYour Name 			     mcast_primary_vdev, DP_MOD_ID_RX);
1799*5113495bSYour Name 	return true;
1800*5113495bSYour Name }
1801*5113495bSYour Name #else
1802*5113495bSYour Name static inline
1803*5113495bSYour Name bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
1804*5113495bSYour Name 				 qdf_nbuf_t nbuf_copy)
1805*5113495bSYour Name {
1806*5113495bSYour Name 	return false;
1807*5113495bSYour Name }
1808*5113495bSYour Name #endif
1809*5113495bSYour Name 
1810*5113495bSYour Name bool
1811*5113495bSYour Name dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
1812*5113495bSYour Name 				struct dp_txrx_peer *ta_txrx_peer,
1813*5113495bSYour Name 				qdf_nbuf_t nbuf_copy,
1814*5113495bSYour Name 				struct cdp_tid_rx_stats *tid_stats,
1815*5113495bSYour Name 				uint8_t link_id)
1816*5113495bSYour Name {
1817*5113495bSYour Name 	if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
1818*5113495bSYour Name 		struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1819*5113495bSYour Name 		uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
1820*5113495bSYour Name 
1821*5113495bSYour Name 		tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
1822*5113495bSYour Name 		tx_exc_metadata.is_intrabss_fwd = 1;
1823*5113495bSYour Name 		tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
1824*5113495bSYour Name 
1825*5113495bSYour Name 		if (dp_tx_send_exception((struct cdp_soc_t *)soc,
1826*5113495bSYour Name 					  ta_txrx_peer->vdev->vdev_id,
1827*5113495bSYour Name 					  nbuf_copy,
1828*5113495bSYour Name 					  &tx_exc_metadata)) {
1829*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1830*5113495bSYour Name 						      rx.intra_bss.fail, 1,
1831*5113495bSYour Name 						      len, link_id);
1832*5113495bSYour Name 			tid_stats->fail_cnt[INTRABSS_DROP]++;
1833*5113495bSYour Name 			qdf_nbuf_free(nbuf_copy);
1834*5113495bSYour Name 		} else {
1835*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1836*5113495bSYour Name 						      rx.intra_bss.pkts, 1,
1837*5113495bSYour Name 						      len, link_id);
1838*5113495bSYour Name 			tid_stats->intrabss_cnt++;
1839*5113495bSYour Name 		}
1840*5113495bSYour Name 		return true;
1841*5113495bSYour Name 	}
1842*5113495bSYour Name 
1843*5113495bSYour Name 	if (dp_rx_intrabss_mlo_mcbc_fwd(soc, ta_txrx_peer->vdev,
1844*5113495bSYour Name 					nbuf_copy))
1845*5113495bSYour Name 		return true;
1846*5113495bSYour Name 
1847*5113495bSYour Name 	return false;
1848*5113495bSYour Name }
1849*5113495bSYour Name 
1850*5113495bSYour Name bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
1851*5113495bSYour Name 			   uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1852*5113495bSYour Name 			   uint8_t link_id)
1853*5113495bSYour Name {
1854*5113495bSYour Name 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1855*5113495bSYour Name 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1856*5113495bSYour Name 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
1857*5113495bSYour Name 					tid_stats.tid_rx_stats[ring_id][tid];
1858*5113495bSYour Name 	bool ret = false;
1859*5113495bSYour Name 	struct dp_be_intrabss_params params;
1860*5113495bSYour Name 	struct hal_rx_msdu_metadata msdu_metadata;
1861*5113495bSYour Name 
1862*5113495bSYour Name 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
1863*5113495bSYour Name 	 * source, then clone the pkt and send the cloned pkt for
1864*5113495bSYour Name 	 * intra BSS forwarding and original pkt up the network stack
1865*5113495bSYour Name 	 * Note: how do we handle multicast pkts. do we forward
1866*5113495bSYour Name 	 * all multicast pkts as is or let a higher layer module
1867*5113495bSYour Name 	 * like igmpsnoop decide whether to forward or not with
1868*5113495bSYour Name 	 * Mcast enhancement.
1869*5113495bSYour Name 	 */
1870*5113495bSYour Name 	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
1871*5113495bSYour Name 		return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
1872*5113495bSYour Name 					       nbuf, tid_stats, link_id);
1873*5113495bSYour Name 	}
1874*5113495bSYour Name 
1875*5113495bSYour Name 	if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
1876*5113495bSYour Name 					    nbuf))
1877*5113495bSYour Name 		return true;
1878*5113495bSYour Name 
1879*5113495bSYour Name 	hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
1880*5113495bSYour Name 	params.dest_soc = soc;
1881*5113495bSYour Name 	if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr,
1882*5113495bSYour Name 					  &msdu_metadata, &params)) {
1883*5113495bSYour Name 		ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
1884*5113495bSYour Name 					       params.tx_vdev_id,
1885*5113495bSYour Name 					       rx_tlv_hdr, nbuf, tid_stats,
1886*5113495bSYour Name 					       link_id);
1887*5113495bSYour Name 	}
1888*5113495bSYour Name 
1889*5113495bSYour Name 	return ret;
1890*5113495bSYour Name }
1891*5113495bSYour Name #endif
1892*5113495bSYour Name 
1893*5113495bSYour Name #ifndef BE_WBM_RELEASE_DESC_RX_SG_SUPPORT
1894*5113495bSYour Name /**
1895*5113495bSYour Name  * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
1896*5113495bSYour Name  *			    to pdev invalid peer list
1897*5113495bSYour Name  *
1898*5113495bSYour Name  * @soc: core DP main context
1899*5113495bSYour Name  * @nbuf: Buffer pointer
1900*5113495bSYour Name  * @rx_tlv_hdr: start of rx tlv header
1901*5113495bSYour Name  * @mac_id: mac id
1902*5113495bSYour Name  *
1903*5113495bSYour Name  *  Return: bool: true for last msdu of mpdu
1904*5113495bSYour Name  */
1905*5113495bSYour Name static bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
1906*5113495bSYour Name 				 uint8_t *rx_tlv_hdr, uint8_t mac_id)
1907*5113495bSYour Name {
1908*5113495bSYour Name 	bool mpdu_done = false;
1909*5113495bSYour Name 	qdf_nbuf_t curr_nbuf = NULL;
1910*5113495bSYour Name 	qdf_nbuf_t tmp_nbuf = NULL;
1911*5113495bSYour Name 
1912*5113495bSYour Name 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1913*5113495bSYour Name 
1914*5113495bSYour Name 	if (!dp_pdev) {
1915*5113495bSYour Name 		dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
1916*5113495bSYour Name 		return mpdu_done;
1917*5113495bSYour Name 	}
1918*5113495bSYour Name 	/* if invalid peer SG list has max values free the buffers in list
1919*5113495bSYour Name 	 * and treat current buffer as start of list
1920*5113495bSYour Name 	 *
1921*5113495bSYour Name 	 * current logic to detect the last buffer from attn_tlv is not reliable
1922*5113495bSYour Name 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
1923*5113495bSYour Name 	 * up
1924*5113495bSYour Name 	 */
1925*5113495bSYour Name 	if (!dp_pdev->first_nbuf ||
1926*5113495bSYour Name 	    (dp_pdev->invalid_peer_head_msdu &&
1927*5113495bSYour Name 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
1928*5113495bSYour Name 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
1929*5113495bSYour Name 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1930*5113495bSYour Name 		dp_pdev->first_nbuf = true;
1931*5113495bSYour Name 
1932*5113495bSYour Name 		/* If the new nbuf received is the first msdu of the
1933*5113495bSYour Name 		 * amsdu and there are msdus in the invalid peer msdu
1934*5113495bSYour Name 		 * list, then let us free all the msdus of the invalid
1935*5113495bSYour Name 		 * peer msdu list.
1936*5113495bSYour Name 		 * This scenario can happen when we start receiving
1937*5113495bSYour Name 		 * new a-msdu even before the previous a-msdu is completely
1938*5113495bSYour Name 		 * received.
1939*5113495bSYour Name 		 */
1940*5113495bSYour Name 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
1941*5113495bSYour Name 		while (curr_nbuf) {
1942*5113495bSYour Name 			tmp_nbuf = curr_nbuf->next;
1943*5113495bSYour Name 			dp_rx_nbuf_free(curr_nbuf);
1944*5113495bSYour Name 			curr_nbuf = tmp_nbuf;
1945*5113495bSYour Name 		}
1946*5113495bSYour Name 
1947*5113495bSYour Name 		dp_pdev->invalid_peer_head_msdu = NULL;
1948*5113495bSYour Name 		dp_pdev->invalid_peer_tail_msdu = NULL;
1949*5113495bSYour Name 
1950*5113495bSYour Name 		dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
1951*5113495bSYour Name 	}
1952*5113495bSYour Name 
1953*5113495bSYour Name 	if (qdf_nbuf_is_rx_chfrag_end(nbuf) &&
1954*5113495bSYour Name 	    hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1955*5113495bSYour Name 		qdf_assert_always(dp_pdev->first_nbuf);
1956*5113495bSYour Name 		dp_pdev->first_nbuf = false;
1957*5113495bSYour Name 		mpdu_done = true;
1958*5113495bSYour Name 	}
1959*5113495bSYour Name 
1960*5113495bSYour Name 	/*
1961*5113495bSYour Name 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
1962*5113495bSYour Name 	 * should be NULL here, add the checking for debugging purpose
1963*5113495bSYour Name 	 * in case some corner case.
1964*5113495bSYour Name 	 */
1965*5113495bSYour Name 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
1966*5113495bSYour Name 					dp_pdev->invalid_peer_tail_msdu);
1967*5113495bSYour Name 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
1968*5113495bSYour Name 			  dp_pdev->invalid_peer_tail_msdu,
1969*5113495bSYour Name 			  nbuf);
1970*5113495bSYour Name 
1971*5113495bSYour Name 	return mpdu_done;
1972*5113495bSYour Name }
1973*5113495bSYour Name #else
1974*5113495bSYour Name static bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
1975*5113495bSYour Name 				 uint8_t *rx_tlv_hdr, uint8_t mac_id)
1976*5113495bSYour Name {
1977*5113495bSYour Name 	return false;
1978*5113495bSYour Name }
1979*5113495bSYour Name #endif
1980*5113495bSYour Name 
1981*5113495bSYour Name qdf_nbuf_t
1982*5113495bSYour Name dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
1983*5113495bSYour Name 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota,
1984*5113495bSYour Name 			   uint32_t *rx_bufs_used)
1985*5113495bSYour Name {
1986*5113495bSYour Name 	hal_ring_desc_t ring_desc;
1987*5113495bSYour Name 	hal_soc_handle_t hal_soc;
1988*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
1989*5113495bSYour Name 	union dp_rx_desc_list_elem_t
1990*5113495bSYour Name 		*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
1991*5113495bSYour Name 	union dp_rx_desc_list_elem_t
1992*5113495bSYour Name 		*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
1993*5113495bSYour Name 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
1994*5113495bSYour Name 	uint8_t mac_id;
1995*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
1996*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
1997*5113495bSYour Name 	qdf_nbuf_t nbuf_head = NULL;
1998*5113495bSYour Name 	qdf_nbuf_t nbuf_tail = NULL;
1999*5113495bSYour Name 	qdf_nbuf_t nbuf;
2000*5113495bSYour Name 	uint8_t msdu_continuation = 0;
2001*5113495bSYour Name 	bool process_sg_buf = false;
2002*5113495bSYour Name 	QDF_STATUS status;
2003*5113495bSYour Name 	struct dp_soc *replenish_soc;
2004*5113495bSYour Name 	uint8_t chip_id;
2005*5113495bSYour Name 	union hal_wbm_err_info_u wbm_err = { 0 };
2006*5113495bSYour Name 
2007*5113495bSYour Name 	qdf_assert(soc && hal_ring_hdl);
2008*5113495bSYour Name 	hal_soc = soc->hal_soc;
2009*5113495bSYour Name 	qdf_assert(hal_soc);
2010*5113495bSYour Name 
2011*5113495bSYour Name 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2012*5113495bSYour Name 		/* TODO */
2013*5113495bSYour Name 		/*
2014*5113495bSYour Name 		 * Need API to convert from hal_ring pointer to
2015*5113495bSYour Name 		 * Ring Type / Ring Id combo
2016*5113495bSYour Name 		 */
2017*5113495bSYour Name 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
2018*5113495bSYour Name 			      soc, hal_ring_hdl);
2019*5113495bSYour Name 		goto done;
2020*5113495bSYour Name 	}
2021*5113495bSYour Name 
2022*5113495bSYour Name 	while (qdf_likely(quota)) {
2023*5113495bSYour Name 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2024*5113495bSYour Name 
2025*5113495bSYour Name 		if (qdf_unlikely(!ring_desc))
2026*5113495bSYour Name 			break;
2027*5113495bSYour Name 
2028*5113495bSYour Name 		/* Get SW Desc from HAL desc */
2029*5113495bSYour Name 		if (dp_wbm_get_rx_desc_from_hal_desc_be(soc,
2030*5113495bSYour Name 							ring_desc,
2031*5113495bSYour Name 							&rx_desc)) {
2032*5113495bSYour Name 			dp_rx_err_err("get rx sw desc from hal_desc failed");
2033*5113495bSYour Name 			continue;
2034*5113495bSYour Name 		}
2035*5113495bSYour Name 
2036*5113495bSYour Name 		if (dp_assert_always_internal_stat(rx_desc, soc,
2037*5113495bSYour Name 						   rx.err.rx_desc_null))
2038*5113495bSYour Name 			continue;
2039*5113495bSYour Name 
2040*5113495bSYour Name 		if (!dp_rx_desc_check_magic(rx_desc)) {
2041*5113495bSYour Name 			dp_rx_err_err("%pK: Invalid rx_desc %pK",
2042*5113495bSYour Name 				      soc, rx_desc);
2043*5113495bSYour Name 			continue;
2044*5113495bSYour Name 		}
2045*5113495bSYour Name 
2046*5113495bSYour Name 		/*
2047*5113495bSYour Name 		 * this is a unlikely scenario where the host is reaping
2048*5113495bSYour Name 		 * a descriptor which it already reaped just a while ago
2049*5113495bSYour Name 		 * but is yet to replenish it back to HW.
2050*5113495bSYour Name 		 * In this case host will dump the last 128 descriptors
2051*5113495bSYour Name 		 * including the software descriptor rx_desc and assert.
2052*5113495bSYour Name 		 */
2053*5113495bSYour Name 		if (qdf_unlikely(!rx_desc->in_use)) {
2054*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
2055*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2056*5113495bSYour Name 						   ring_desc, rx_desc);
2057*5113495bSYour Name 			continue;
2058*5113495bSYour Name 		}
2059*5113495bSYour Name 
2060*5113495bSYour Name 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
2061*5113495bSYour Name 							  ring_desc, rx_desc);
2062*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2063*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2064*5113495bSYour Name 			dp_info_rl("Rx error Nbuf %pK sanity check failure!",
2065*5113495bSYour Name 				   rx_desc->nbuf);
2066*5113495bSYour Name 			rx_desc->in_err_state = 1;
2067*5113495bSYour Name 			continue;
2068*5113495bSYour Name 		}
2069*5113495bSYour Name 
2070*5113495bSYour Name 		nbuf = rx_desc->nbuf;
2071*5113495bSYour Name 
2072*5113495bSYour Name 		/*
2073*5113495bSYour Name 		 * Read wbm err info , MSDU info , MPDU info , peer meta data,
2074*5113495bSYour Name 		 * from desc. Save all the info in nbuf CB/TLV.
2075*5113495bSYour Name 		 * We will need this info when we do the actual nbuf processing
2076*5113495bSYour Name 		 */
2077*5113495bSYour Name 		wbm_err.info = dp_rx_wbm_err_copy_desc_info_in_nbuf(
2078*5113495bSYour Name 							soc,
2079*5113495bSYour Name 							ring_desc,
2080*5113495bSYour Name 							nbuf,
2081*5113495bSYour Name 							rx_desc->pool_id);
2082*5113495bSYour Name 		/*
2083*5113495bSYour Name 		 * For WBM ring, expect only MSDU buffers
2084*5113495bSYour Name 		 */
2085*5113495bSYour Name 		if (dp_assert_always_internal_stat(
2086*5113495bSYour Name 				wbm_err.info_bit.buffer_or_desc_type ==
2087*5113495bSYour Name 						HAL_RX_WBM_BUF_TYPE_REL_BUF,
2088*5113495bSYour Name 				soc, rx.err.wbm_err_buf_rel_type))
2089*5113495bSYour Name 			continue;
2090*5113495bSYour Name 		/*
2091*5113495bSYour Name 		 * Errors are handled only if the source is RXDMA or REO
2092*5113495bSYour Name 		 */
2093*5113495bSYour Name 		qdf_assert((wbm_err.info_bit.wbm_err_src ==
2094*5113495bSYour Name 			    HAL_RX_WBM_ERR_SRC_RXDMA) ||
2095*5113495bSYour Name 			   (wbm_err.info_bit.wbm_err_src ==
2096*5113495bSYour Name 			    HAL_RX_WBM_ERR_SRC_REO));
2097*5113495bSYour Name 
2098*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2099*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
2100*5113495bSYour Name 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
2101*5113495bSYour Name 		rx_desc->unmapped = 1;
2102*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2103*5113495bSYour Name 
2104*5113495bSYour Name 		if (qdf_unlikely(
2105*5113495bSYour Name 			soc->wbm_release_desc_rx_sg_support &&
2106*5113495bSYour Name 			dp_rx_is_sg_formation_required(&wbm_err.info_bit))) {
2107*5113495bSYour Name 			/* SG is detected from continuation bit */
2108*5113495bSYour Name 			msdu_continuation =
2109*5113495bSYour Name 				dp_rx_wbm_err_msdu_continuation_get(soc,
2110*5113495bSYour Name 								    ring_desc,
2111*5113495bSYour Name 								    nbuf);
2112*5113495bSYour Name 			if (msdu_continuation &&
2113*5113495bSYour Name 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
2114*5113495bSYour Name 				/* Update length from first buffer in SG */
2115*5113495bSYour Name 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
2116*5113495bSYour Name 					hal_rx_msdu_start_msdu_len_get(
2117*5113495bSYour Name 						soc->hal_soc,
2118*5113495bSYour Name 						qdf_nbuf_data(nbuf));
2119*5113495bSYour Name 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg =
2120*5113495bSYour Name 									true;
2121*5113495bSYour Name 			}
2122*5113495bSYour Name 
2123*5113495bSYour Name 			if (msdu_continuation) {
2124*5113495bSYour Name 				/* MSDU continued packets */
2125*5113495bSYour Name 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
2126*5113495bSYour Name 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2127*5113495bSYour Name 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2128*5113495bSYour Name 			} else {
2129*5113495bSYour Name 				/* This is the terminal packet in SG */
2130*5113495bSYour Name 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
2131*5113495bSYour Name 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
2132*5113495bSYour Name 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2133*5113495bSYour Name 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2134*5113495bSYour Name 				process_sg_buf = true;
2135*5113495bSYour Name 			}
2136*5113495bSYour Name 		} else {
2137*5113495bSYour Name 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 0);
2138*5113495bSYour Name 		}
2139*5113495bSYour Name 
2140*5113495bSYour Name 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
2141*5113495bSYour Name 
2142*5113495bSYour Name 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
2143*5113495bSYour Name 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
2144*5113495bSYour Name 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
2145*5113495bSYour Name 					  nbuf);
2146*5113495bSYour Name 			if (process_sg_buf) {
2147*5113495bSYour Name 				if (!dp_rx_buffer_pool_refill(
2148*5113495bSYour Name 					soc,
2149*5113495bSYour Name 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2150*5113495bSYour Name 					rx_desc->pool_id))
2151*5113495bSYour Name 					DP_RX_MERGE_TWO_LIST(
2152*5113495bSYour Name 					  nbuf_head, nbuf_tail,
2153*5113495bSYour Name 					  soc->wbm_sg_param.wbm_sg_nbuf_head,
2154*5113495bSYour Name 					  soc->wbm_sg_param.wbm_sg_nbuf_tail);
2155*5113495bSYour Name 				dp_rx_wbm_sg_list_last_msdu_war(soc);
2156*5113495bSYour Name 				dp_rx_wbm_sg_list_reset(soc);
2157*5113495bSYour Name 				process_sg_buf = false;
2158*5113495bSYour Name 			}
2159*5113495bSYour Name 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2160*5113495bSYour Name 						     rx_desc->pool_id)) {
2161*5113495bSYour Name 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2162*5113495bSYour Name 		}
2163*5113495bSYour Name 
2164*5113495bSYour Name 		dp_rx_add_to_free_desc_list
2165*5113495bSYour Name 			(&head[rx_desc->chip_id][rx_desc->pool_id],
2166*5113495bSYour Name 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
2167*5113495bSYour Name 
2168*5113495bSYour Name 		/*
2169*5113495bSYour Name 		 * if continuation bit is set then we have MSDU spread
2170*5113495bSYour Name 		 * across multiple buffers, let us not decrement quota
2171*5113495bSYour Name 		 * till we reap all buffers of that MSDU.
2172*5113495bSYour Name 		 */
2173*5113495bSYour Name 		if (qdf_likely(!msdu_continuation))
2174*5113495bSYour Name 			quota -= 1;
2175*5113495bSYour Name 	}
2176*5113495bSYour Name done:
2177*5113495bSYour Name 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2178*5113495bSYour Name 
2179*5113495bSYour Name 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
2180*5113495bSYour Name 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2181*5113495bSYour Name 			/*
2182*5113495bSYour Name 			 * continue with next mac_id if no pkts were reaped
2183*5113495bSYour Name 			 * from that pool
2184*5113495bSYour Name 			 */
2185*5113495bSYour Name 			if (!rx_bufs_reaped[chip_id][mac_id])
2186*5113495bSYour Name 				continue;
2187*5113495bSYour Name 
2188*5113495bSYour Name 			replenish_soc = dp_rx_replenish_soc_get(soc, chip_id);
2189*5113495bSYour Name 
2190*5113495bSYour Name 			dp_rxdma_srng =
2191*5113495bSYour Name 				&replenish_soc->rx_refill_buf_ring[mac_id];
2192*5113495bSYour Name 
2193*5113495bSYour Name 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
2194*5113495bSYour Name 
2195*5113495bSYour Name 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
2196*5113495bSYour Name 						dp_rxdma_srng,
2197*5113495bSYour Name 						rx_desc_pool,
2198*5113495bSYour Name 						rx_bufs_reaped[chip_id][mac_id],
2199*5113495bSYour Name 						&head[chip_id][mac_id],
2200*5113495bSYour Name 						&tail[chip_id][mac_id]);
2201*5113495bSYour Name 			*rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
2202*5113495bSYour Name 		}
2203*5113495bSYour Name 	}
2204*5113495bSYour Name 	return nbuf_head;
2205*5113495bSYour Name }
2206*5113495bSYour Name 
2207*5113495bSYour Name #ifdef WLAN_FEATURE_11BE_MLO
2208*5113495bSYour Name /**
2209*5113495bSYour Name  * check_extap_multicast_loopback() - Check if rx packet is a loopback packet.
2210*5113495bSYour Name  *
2211*5113495bSYour Name  * @vdev: vdev on which rx packet is received
2212*5113495bSYour Name  * @addr: src address of the received packet
2213*5113495bSYour Name  *
2214*5113495bSYour Name  */
2215*5113495bSYour Name static bool check_extap_multicast_loopback(struct dp_vdev *vdev, uint8_t *addr)
2216*5113495bSYour Name {
2217*5113495bSYour Name 	 /* if src mac addr matches with vdev mac address then drop the pkt */
2218*5113495bSYour Name 	if (!(qdf_mem_cmp(addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)))
2219*5113495bSYour Name 		return true;
2220*5113495bSYour Name 
2221*5113495bSYour Name 	 /* if src mac addr matches with mld mac address then drop the pkt */
2222*5113495bSYour Name 	if (!(qdf_mem_cmp(addr, vdev->mld_mac_addr.raw, QDF_MAC_ADDR_SIZE)))
2223*5113495bSYour Name 		return true;
2224*5113495bSYour Name 
2225*5113495bSYour Name 	return false;
2226*5113495bSYour Name }
2227*5113495bSYour Name #else
2228*5113495bSYour Name static bool check_extap_multicast_loopback(struct dp_vdev *vdev, uint8_t *addr)
2229*5113495bSYour Name {
2230*5113495bSYour Name 	return false;
2231*5113495bSYour Name }
2232*5113495bSYour Name #endif
2233*5113495bSYour Name 
2234*5113495bSYour Name QDF_STATUS
2235*5113495bSYour Name dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
2236*5113495bSYour Name 			    uint8_t *rx_tlv_hdr, uint8_t pool_id,
2237*5113495bSYour Name 			    struct dp_txrx_peer *txrx_peer,
2238*5113495bSYour Name 			    bool is_reo_exception,
2239*5113495bSYour Name 			    uint8_t link_id)
2240*5113495bSYour Name {
2241*5113495bSYour Name 	uint32_t pkt_len;
2242*5113495bSYour Name 	uint16_t msdu_len;
2243*5113495bSYour Name 	struct dp_vdev *vdev;
2244*5113495bSYour Name 	uint8_t tid;
2245*5113495bSYour Name 	qdf_ether_header_t *eh;
2246*5113495bSYour Name 	struct hal_rx_msdu_metadata msdu_metadata;
2247*5113495bSYour Name 	uint16_t sa_idx = 0;
2248*5113495bSYour Name 	bool is_eapol = 0;
2249*5113495bSYour Name 	bool enh_flag;
2250*5113495bSYour Name 	uint16_t buf_size;
2251*5113495bSYour Name 
2252*5113495bSYour Name 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
2253*5113495bSYour Name 
2254*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_start(
2255*5113495bSYour Name 				nbuf,
2256*5113495bSYour Name 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2257*5113495bSYour Name 							       rx_tlv_hdr));
2258*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_end(nbuf,
2259*5113495bSYour Name 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
2260*5113495bSYour Name 								 rx_tlv_hdr));
2261*5113495bSYour Name 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2262*5113495bSYour Name 								  rx_tlv_hdr));
2263*5113495bSYour Name 	qdf_nbuf_set_da_valid(nbuf,
2264*5113495bSYour Name 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2265*5113495bSYour Name 							      rx_tlv_hdr));
2266*5113495bSYour Name 	qdf_nbuf_set_sa_valid(nbuf,
2267*5113495bSYour Name 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2268*5113495bSYour Name 							      rx_tlv_hdr));
2269*5113495bSYour Name 
2270*5113495bSYour Name 	tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
2271*5113495bSYour Name 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
2272*5113495bSYour Name 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
2273*5113495bSYour Name 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
2274*5113495bSYour Name 
2275*5113495bSYour Name 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
2276*5113495bSYour Name 		if (dp_rx_check_pkt_len(soc, pkt_len))
2277*5113495bSYour Name 			goto drop_nbuf;
2278*5113495bSYour Name 
2279*5113495bSYour Name 		/* Set length in nbuf */
2280*5113495bSYour Name 		qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size));
2281*5113495bSYour Name 	}
2282*5113495bSYour Name 
2283*5113495bSYour Name 	/*
2284*5113495bSYour Name 	 * Check if DMA completed -- msdu_done is the last bit
2285*5113495bSYour Name 	 * to be written
2286*5113495bSYour Name 	 */
2287*5113495bSYour Name 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
2288*5113495bSYour Name 		dp_err_rl("MSDU DONE failure");
2289*5113495bSYour Name 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
2290*5113495bSYour Name 				     QDF_TRACE_LEVEL_INFO);
2291*5113495bSYour Name 		qdf_assert(0);
2292*5113495bSYour Name 	}
2293*5113495bSYour Name 
2294*5113495bSYour Name 	if (!txrx_peer &&
2295*5113495bSYour Name 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
2296*5113495bSYour Name 							  rx_tlv_hdr, nbuf))
2297*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2298*5113495bSYour Name 
2299*5113495bSYour Name 	if (!txrx_peer) {
2300*5113495bSYour Name 		bool mpdu_done = false;
2301*5113495bSYour Name 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2302*5113495bSYour Name 
2303*5113495bSYour Name 		if (!pdev) {
2304*5113495bSYour Name 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
2305*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
2306*5113495bSYour Name 		}
2307*5113495bSYour Name 
2308*5113495bSYour Name 		dp_err_rl("txrx_peer is NULL");
2309*5113495bSYour Name 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2310*5113495bSYour Name 				 qdf_nbuf_len(nbuf));
2311*5113495bSYour Name 
2312*5113495bSYour Name 		/* QCN9000 has the support enabled */
2313*5113495bSYour Name 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
2314*5113495bSYour Name 			mpdu_done = true;
2315*5113495bSYour Name 			nbuf->next = NULL;
2316*5113495bSYour Name 			/* Trigger invalid peer handler wrapper */
2317*5113495bSYour Name 			dp_rx_process_invalid_peer_wrapper(soc,
2318*5113495bSYour Name 							   nbuf,
2319*5113495bSYour Name 							   mpdu_done,
2320*5113495bSYour Name 							   pool_id);
2321*5113495bSYour Name 		} else {
2322*5113495bSYour Name 			mpdu_done = dp_rx_chain_msdus_be(soc, nbuf, rx_tlv_hdr,
2323*5113495bSYour Name 							 pool_id);
2324*5113495bSYour Name 
2325*5113495bSYour Name 			/* Trigger invalid peer handler wrapper */
2326*5113495bSYour Name 			dp_rx_process_invalid_peer_wrapper(
2327*5113495bSYour Name 					soc,
2328*5113495bSYour Name 					pdev->invalid_peer_head_msdu,
2329*5113495bSYour Name 					mpdu_done, pool_id);
2330*5113495bSYour Name 		}
2331*5113495bSYour Name 
2332*5113495bSYour Name 		if (mpdu_done) {
2333*5113495bSYour Name 			pdev->invalid_peer_head_msdu = NULL;
2334*5113495bSYour Name 			pdev->invalid_peer_tail_msdu = NULL;
2335*5113495bSYour Name 		}
2336*5113495bSYour Name 
2337*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2338*5113495bSYour Name 	}
2339*5113495bSYour Name 
2340*5113495bSYour Name 	vdev = txrx_peer->vdev;
2341*5113495bSYour Name 	if (!vdev) {
2342*5113495bSYour Name 		dp_err_rl("Null vdev!");
2343*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2344*5113495bSYour Name 		goto drop_nbuf;
2345*5113495bSYour Name 	}
2346*5113495bSYour Name 
2347*5113495bSYour Name 	/*
2348*5113495bSYour Name 	 * Advance the packet start pointer by total size of
2349*5113495bSYour Name 	 * pre-header TLV's
2350*5113495bSYour Name 	 */
2351*5113495bSYour Name 	if (qdf_nbuf_is_frag(nbuf))
2352*5113495bSYour Name 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
2353*5113495bSYour Name 	else
2354*5113495bSYour Name 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
2355*5113495bSYour Name 				   soc->rx_pkt_tlv_size));
2356*5113495bSYour Name 
2357*5113495bSYour Name 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
2358*5113495bSYour Name 
2359*5113495bSYour Name 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
2360*5113495bSYour Name 
2361*5113495bSYour Name 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
2362*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1,
2363*5113495bSYour Name 					  link_id);
2364*5113495bSYour Name 		goto drop_nbuf;
2365*5113495bSYour Name 	}
2366*5113495bSYour Name 
2367*5113495bSYour Name 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
2368*5113495bSYour Name 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
2369*5113495bSYour Name 
2370*5113495bSYour Name 		if ((sa_idx < 0) ||
2371*5113495bSYour Name 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
2372*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2373*5113495bSYour Name 			goto drop_nbuf;
2374*5113495bSYour Name 		}
2375*5113495bSYour Name 	}
2376*5113495bSYour Name 
2377*5113495bSYour Name 	if ((!soc->mec_fw_offload) &&
2378*5113495bSYour Name 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
2379*5113495bSYour Name 		/* this is a looped back MCBC pkt, drop it */
2380*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2381*5113495bSYour Name 					      qdf_nbuf_len(nbuf), link_id);
2382*5113495bSYour Name 		goto drop_nbuf;
2383*5113495bSYour Name 	}
2384*5113495bSYour Name 
2385*5113495bSYour Name 	/*
2386*5113495bSYour Name 	 * In qwrap mode if the received packet matches with any of the vdev
2387*5113495bSYour Name 	 * mac addresses, drop it. Donot receive multicast packets originated
2388*5113495bSYour Name 	 * from any proxysta.
2389*5113495bSYour Name 	 */
2390*5113495bSYour Name 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
2391*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2392*5113495bSYour Name 					      qdf_nbuf_len(nbuf), link_id);
2393*5113495bSYour Name 		goto drop_nbuf;
2394*5113495bSYour Name 	}
2395*5113495bSYour Name 
2396*5113495bSYour Name 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
2397*5113495bSYour Name 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2398*5113495bSYour Name 							rx_tlv_hdr))) {
2399*5113495bSYour Name 		dp_err_rl("free buffer for multicast packet");
2400*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1,
2401*5113495bSYour Name 					  link_id);
2402*5113495bSYour Name 		goto drop_nbuf;
2403*5113495bSYour Name 	}
2404*5113495bSYour Name 
2405*5113495bSYour Name 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
2406*5113495bSYour Name 		dp_err_rl("mcast Policy Check Drop pkt");
2407*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1,
2408*5113495bSYour Name 					  link_id);
2409*5113495bSYour Name 		goto drop_nbuf;
2410*5113495bSYour Name 	}
2411*5113495bSYour Name 	/* WDS Source Port Learning */
2412*5113495bSYour Name 	if (!soc->ast_offload_support &&
2413*5113495bSYour Name 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
2414*5113495bSYour Name 		       vdev->wds_enabled))
2415*5113495bSYour Name 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
2416*5113495bSYour Name 					msdu_metadata);
2417*5113495bSYour Name 
2418*5113495bSYour Name 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
2419*5113495bSYour Name 		struct dp_peer *peer;
2420*5113495bSYour Name 		struct dp_rx_tid *rx_tid;
2421*5113495bSYour Name 
2422*5113495bSYour Name 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
2423*5113495bSYour Name 					     DP_MOD_ID_RX_ERR);
2424*5113495bSYour Name 		if (peer) {
2425*5113495bSYour Name 			rx_tid = &peer->rx_tid[tid];
2426*5113495bSYour Name 			qdf_spin_lock_bh(&rx_tid->tid_lock);
2427*5113495bSYour Name 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2428*5113495bSYour Name 			/* For Mesh peer, if on one of the mesh AP the
2429*5113495bSYour Name 			 * mesh peer is not deleted, the new addition of mesh
2430*5113495bSYour Name 			 * peer on other mesh AP doesn't do BA negotiation
2431*5113495bSYour Name 			 * leading to mismatch in BA windows.
2432*5113495bSYour Name 			 * To avoid this send max BA window during init.
2433*5113495bSYour Name 			 */
2434*5113495bSYour Name 				if (qdf_unlikely(vdev->mesh_vdev) ||
2435*5113495bSYour Name 				    qdf_unlikely(txrx_peer->nawds_enabled))
2436*5113495bSYour Name 					dp_rx_tid_setup_wifi3(
2437*5113495bSYour Name 						peer, BIT(tid),
2438*5113495bSYour Name 						hal_get_rx_max_ba_window(soc->hal_soc,tid),
2439*5113495bSYour Name 						IEEE80211_SEQ_MAX);
2440*5113495bSYour Name 				else
2441*5113495bSYour Name 					dp_rx_tid_setup_wifi3(peer, BIT(tid), 1,
2442*5113495bSYour Name 							      IEEE80211_SEQ_MAX);
2443*5113495bSYour Name 			}
2444*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2445*5113495bSYour Name 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
2446*5113495bSYour Name 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2447*5113495bSYour Name 		}
2448*5113495bSYour Name 	}
2449*5113495bSYour Name 
2450*5113495bSYour Name 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2451*5113495bSYour Name 
2452*5113495bSYour Name 	if (!txrx_peer->authorize) {
2453*5113495bSYour Name 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
2454*5113495bSYour Name 
2455*5113495bSYour Name 		if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
2456*5113495bSYour Name 			if (!dp_rx_err_match_dhost(eh, vdev))
2457*5113495bSYour Name 				goto drop_nbuf;
2458*5113495bSYour Name 		} else {
2459*5113495bSYour Name 			goto drop_nbuf;
2460*5113495bSYour Name 		}
2461*5113495bSYour Name 	}
2462*5113495bSYour Name 
2463*5113495bSYour Name 	/*
2464*5113495bSYour Name 	 * Drop packets in this path if cce_match is found. Packets will come
2465*5113495bSYour Name 	 * in following path depending on whether tidQ is setup.
2466*5113495bSYour Name 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
2467*5113495bSYour Name 	 * cce_match = 1
2468*5113495bSYour Name 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
2469*5113495bSYour Name 	 *    dropped.
2470*5113495bSYour Name 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
2471*5113495bSYour Name 	 * cce_match = 1
2472*5113495bSYour Name 	 *    These packets need to be dropped and should not get delivered
2473*5113495bSYour Name 	 *    to stack.
2474*5113495bSYour Name 	 */
2475*5113495bSYour Name 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr)))
2476*5113495bSYour Name 		goto drop_nbuf;
2477*5113495bSYour Name 
2478*5113495bSYour Name 	/*
2479*5113495bSYour Name 	 * In extap mode if the received packet matches with mld mac address
2480*5113495bSYour Name 	 * drop it. For non IP packets conversion might not be possible
2481*5113495bSYour Name 	 * due to that MEC entry will not be updated, resulting loopback.
2482*5113495bSYour Name 	 */
2483*5113495bSYour Name 	if (qdf_unlikely(check_extap_multicast_loopback(vdev,
2484*5113495bSYour Name 							eh->ether_shost))) {
2485*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2486*5113495bSYour Name 					      qdf_nbuf_len(nbuf), link_id);
2487*5113495bSYour Name 		goto drop_nbuf;
2488*5113495bSYour Name 	}
2489*5113495bSYour Name 
2490*5113495bSYour Name 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
2491*5113495bSYour Name 		qdf_nbuf_set_raw_frame(nbuf, 1);
2492*5113495bSYour Name 		qdf_nbuf_set_next(nbuf, NULL);
2493*5113495bSYour Name 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
2494*5113495bSYour Name 	} else {
2495*5113495bSYour Name 		enh_flag = vdev->pdev->enhanced_stats_en;
2496*5113495bSYour Name 		qdf_nbuf_set_next(nbuf, NULL);
2497*5113495bSYour Name 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
2498*5113495bSYour Name 					  enh_flag);
2499*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2500*5113495bSYour Name 					      rx.rx_success, 1,
2501*5113495bSYour Name 					      qdf_nbuf_len(nbuf),
2502*5113495bSYour Name 					      link_id);
2503*5113495bSYour Name 		/*
2504*5113495bSYour Name 		 * Update the protocol tag in SKB based on
2505*5113495bSYour Name 		 * CCE metadata
2506*5113495bSYour Name 		 */
2507*5113495bSYour Name 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2508*5113495bSYour Name 					  EXCEPTION_DEST_RING_ID,
2509*5113495bSYour Name 					  true, true);
2510*5113495bSYour Name 
2511*5113495bSYour Name 		/* Update the flow tag in SKB based on FSE metadata */
2512*5113495bSYour Name 		dp_rx_update_flow_tag(soc, vdev, nbuf,
2513*5113495bSYour Name 				      rx_tlv_hdr, true);
2514*5113495bSYour Name 
2515*5113495bSYour Name 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
2516*5113495bSYour Name 				 soc->hal_soc, rx_tlv_hdr) &&
2517*5113495bSYour Name 				 (vdev->rx_decap_type ==
2518*5113495bSYour Name 				  htt_cmn_pkt_type_ethernet))) {
2519*5113495bSYour Name 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
2520*5113495bSYour Name 					    enh_flag, link_id);
2521*5113495bSYour Name 
2522*5113495bSYour Name 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
2523*5113495bSYour Name 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
2524*5113495bSYour Name 						    qdf_nbuf_len(nbuf),
2525*5113495bSYour Name 						    enh_flag,
2526*5113495bSYour Name 						    link_id);
2527*5113495bSYour Name 		} else {
2528*5113495bSYour Name 			DP_PEER_UC_INCC_PKT(txrx_peer, 1,
2529*5113495bSYour Name 					    qdf_nbuf_len(nbuf),
2530*5113495bSYour Name 					    enh_flag,
2531*5113495bSYour Name 					    link_id);
2532*5113495bSYour Name 		}
2533*5113495bSYour Name 
2534*5113495bSYour Name 		qdf_nbuf_set_exc_frame(nbuf, 1);
2535*5113495bSYour Name 
2536*5113495bSYour Name 		if (qdf_unlikely(vdev->multipass_en)) {
2537*5113495bSYour Name 			if (dp_rx_multipass_process(txrx_peer, nbuf,
2538*5113495bSYour Name 						    tid) == false) {
2539*5113495bSYour Name 				DP_PEER_PER_PKT_STATS_INC
2540*5113495bSYour Name 					(txrx_peer,
2541*5113495bSYour Name 					 rx.multipass_rx_pkt_drop,
2542*5113495bSYour Name 					 1, link_id);
2543*5113495bSYour Name 				goto drop_nbuf;
2544*5113495bSYour Name 			}
2545*5113495bSYour Name 		}
2546*5113495bSYour Name 
2547*5113495bSYour Name 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
2548*5113495bSYour Name 					    is_eapol);
2549*5113495bSYour Name 	}
2550*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2551*5113495bSYour Name 
2552*5113495bSYour Name drop_nbuf:
2553*5113495bSYour Name 	dp_rx_nbuf_free(nbuf);
2554*5113495bSYour Name 	return QDF_STATUS_E_FAILURE;
2555*5113495bSYour Name }
2556