xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_err.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "hal_hw_headers.h"
21*5113495bSYour Name #include "dp_types.h"
22*5113495bSYour Name #include "dp_rx.h"
23*5113495bSYour Name #include "dp_tx.h"
24*5113495bSYour Name #include "dp_peer.h"
25*5113495bSYour Name #include "dp_internal.h"
26*5113495bSYour Name #include "hal_api.h"
27*5113495bSYour Name #include "qdf_trace.h"
28*5113495bSYour Name #include "qdf_nbuf.h"
29*5113495bSYour Name #include "dp_rx_defrag.h"
30*5113495bSYour Name #include "dp_ipa.h"
31*5113495bSYour Name #include "dp_internal.h"
32*5113495bSYour Name #ifdef WIFI_MONITOR_SUPPORT
33*5113495bSYour Name #include "dp_htt.h"
34*5113495bSYour Name #include <dp_mon.h>
35*5113495bSYour Name #endif
36*5113495bSYour Name #ifdef FEATURE_WDS
37*5113495bSYour Name #include "dp_txrx_wds.h"
38*5113495bSYour Name #endif
39*5113495bSYour Name #include <enet.h>	/* LLC_SNAP_HDR_LEN */
40*5113495bSYour Name #include "qdf_net_types.h"
41*5113495bSYour Name #include "dp_rx_buffer_pool.h"
42*5113495bSYour Name 
43*5113495bSYour Name #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
44*5113495bSYour Name #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45*5113495bSYour Name #define dp_rx_err_info(params...) \
46*5113495bSYour Name 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47*5113495bSYour Name #define dp_rx_err_info_rl(params...) \
48*5113495bSYour Name 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49*5113495bSYour Name #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50*5113495bSYour Name 
51*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
52*5113495bSYour Name 
53*5113495bSYour Name 
54*5113495bSYour Name /* Max regular Rx packet routing error */
55*5113495bSYour Name #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
56*5113495bSYour Name #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
57*5113495bSYour Name #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
58*5113495bSYour Name 
59*5113495bSYour Name #ifdef FEATURE_MEC
dp_rx_mcast_echo_check(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)60*5113495bSYour Name bool dp_rx_mcast_echo_check(struct dp_soc *soc,
61*5113495bSYour Name 			    struct dp_txrx_peer *txrx_peer,
62*5113495bSYour Name 			    uint8_t *rx_tlv_hdr,
63*5113495bSYour Name 			    qdf_nbuf_t nbuf)
64*5113495bSYour Name {
65*5113495bSYour Name 	struct dp_vdev *vdev = txrx_peer->vdev;
66*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
67*5113495bSYour Name 	struct dp_mec_entry *mecentry = NULL;
68*5113495bSYour Name 	struct dp_ast_entry *ase = NULL;
69*5113495bSYour Name 	uint16_t sa_idx = 0;
70*5113495bSYour Name 	uint8_t *data;
71*5113495bSYour Name 	/*
72*5113495bSYour Name 	 * Multicast Echo Check is required only if vdev is STA and
73*5113495bSYour Name 	 * received pkt is a multicast/broadcast pkt. otherwise
74*5113495bSYour Name 	 * skip the MEC check.
75*5113495bSYour Name 	 */
76*5113495bSYour Name 	if (vdev->opmode != wlan_op_mode_sta)
77*5113495bSYour Name 		return false;
78*5113495bSYour Name 	if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
79*5113495bSYour Name 		return false;
80*5113495bSYour Name 
81*5113495bSYour Name 	data = qdf_nbuf_data(nbuf);
82*5113495bSYour Name 
83*5113495bSYour Name 	/*
84*5113495bSYour Name 	 * if the received pkts src mac addr matches with vdev
85*5113495bSYour Name 	 * mac address then drop the pkt as it is looped back
86*5113495bSYour Name 	 */
87*5113495bSYour Name 	if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
88*5113495bSYour Name 			  vdev->mac_addr.raw,
89*5113495bSYour Name 			  QDF_MAC_ADDR_SIZE)))
90*5113495bSYour Name 		return true;
91*5113495bSYour Name 
92*5113495bSYour Name 	/*
93*5113495bSYour Name 	 * In case of qwrap isolation mode, donot drop loopback packets.
94*5113495bSYour Name 	 * In isolation mode, all packets from the wired stations need to go
95*5113495bSYour Name 	 * to rootap and loop back to reach the wireless stations and
96*5113495bSYour Name 	 * vice-versa.
97*5113495bSYour Name 	 */
98*5113495bSYour Name 	if (qdf_unlikely(vdev->isolation_vdev))
99*5113495bSYour Name 		return false;
100*5113495bSYour Name 
101*5113495bSYour Name 	/*
102*5113495bSYour Name 	 * if the received pkts src mac addr matches with the
103*5113495bSYour Name 	 * wired PCs MAC addr which is behind the STA or with
104*5113495bSYour Name 	 * wireless STAs MAC addr which are behind the Repeater,
105*5113495bSYour Name 	 * then drop the pkt as it is looped back
106*5113495bSYour Name 	 */
107*5113495bSYour Name 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
108*5113495bSYour Name 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
109*5113495bSYour Name 
110*5113495bSYour Name 		if ((sa_idx < 0) ||
111*5113495bSYour Name 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
112*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
113*5113495bSYour Name 				  "invalid sa_idx: %d", sa_idx);
114*5113495bSYour Name 			qdf_assert_always(0);
115*5113495bSYour Name 		}
116*5113495bSYour Name 
117*5113495bSYour Name 		qdf_spin_lock_bh(&soc->ast_lock);
118*5113495bSYour Name 		ase = soc->ast_table[sa_idx];
119*5113495bSYour Name 
120*5113495bSYour Name 		/*
121*5113495bSYour Name 		 * this check was not needed since MEC is not dependent on AST,
122*5113495bSYour Name 		 * but if we dont have this check SON has some issues in
123*5113495bSYour Name 		 * dual backhaul scenario. in APS SON mode, client connected
124*5113495bSYour Name 		 * to RE 2G and sends multicast packets. the RE sends it to CAP
125*5113495bSYour Name 		 * over 5G backhaul. the CAP loopback it on 2G to RE.
126*5113495bSYour Name 		 * On receiving in 2G STA vap, we assume that client has roamed
127*5113495bSYour Name 		 * and kickout the client.
128*5113495bSYour Name 		 */
129*5113495bSYour Name 		if (ase && (ase->peer_id != txrx_peer->peer_id)) {
130*5113495bSYour Name 			qdf_spin_unlock_bh(&soc->ast_lock);
131*5113495bSYour Name 			goto drop;
132*5113495bSYour Name 		}
133*5113495bSYour Name 
134*5113495bSYour Name 		qdf_spin_unlock_bh(&soc->ast_lock);
135*5113495bSYour Name 	}
136*5113495bSYour Name 
137*5113495bSYour Name 	qdf_spin_lock_bh(&soc->mec_lock);
138*5113495bSYour Name 
139*5113495bSYour Name 	mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
140*5113495bSYour Name 						   &data[QDF_MAC_ADDR_SIZE]);
141*5113495bSYour Name 	if (!mecentry) {
142*5113495bSYour Name 		qdf_spin_unlock_bh(&soc->mec_lock);
143*5113495bSYour Name 		return false;
144*5113495bSYour Name 	}
145*5113495bSYour Name 
146*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->mec_lock);
147*5113495bSYour Name 
148*5113495bSYour Name drop:
149*5113495bSYour Name 	dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
150*5113495bSYour Name 		       soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
151*5113495bSYour Name 
152*5113495bSYour Name 	return true;
153*5113495bSYour Name }
154*5113495bSYour Name #endif
155*5113495bSYour Name #endif /* QCA_HOST_MODE_WIFI_DISABLED */
156*5113495bSYour Name 
dp_rx_link_desc_refill_duplicate_check(struct dp_soc * soc,struct hal_buf_info * buf_info,hal_buff_addrinfo_t ring_buf_info)157*5113495bSYour Name void dp_rx_link_desc_refill_duplicate_check(
158*5113495bSYour Name 				struct dp_soc *soc,
159*5113495bSYour Name 				struct hal_buf_info *buf_info,
160*5113495bSYour Name 				hal_buff_addrinfo_t ring_buf_info)
161*5113495bSYour Name {
162*5113495bSYour Name 	struct hal_buf_info current_link_desc_buf_info = { 0 };
163*5113495bSYour Name 
164*5113495bSYour Name 	/* do duplicate link desc address check */
165*5113495bSYour Name 	hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
166*5113495bSYour Name 					  &current_link_desc_buf_info);
167*5113495bSYour Name 
168*5113495bSYour Name 	/*
169*5113495bSYour Name 	 * TODO - Check if the hal soc api call can be removed
170*5113495bSYour Name 	 * since the cookie is just used for print.
171*5113495bSYour Name 	 * buffer_addr_info is the first element of ring_desc
172*5113495bSYour Name 	 */
173*5113495bSYour Name 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
174*5113495bSYour Name 				  (uint32_t *)ring_buf_info,
175*5113495bSYour Name 				  &current_link_desc_buf_info);
176*5113495bSYour Name 
177*5113495bSYour Name 	if (qdf_unlikely(current_link_desc_buf_info.paddr ==
178*5113495bSYour Name 			 buf_info->paddr)) {
179*5113495bSYour Name 		dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
180*5113495bSYour Name 			   current_link_desc_buf_info.paddr,
181*5113495bSYour Name 			   current_link_desc_buf_info.sw_cookie);
182*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
183*5113495bSYour Name 	}
184*5113495bSYour Name 	*buf_info = current_link_desc_buf_info;
185*5113495bSYour Name }
186*5113495bSYour Name 
187*5113495bSYour Name QDF_STATUS
dp_rx_link_desc_return_by_addr(struct dp_soc * soc,hal_buff_addrinfo_t link_desc_addr,uint8_t bm_action)188*5113495bSYour Name dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
189*5113495bSYour Name 			       hal_buff_addrinfo_t link_desc_addr,
190*5113495bSYour Name 			       uint8_t bm_action)
191*5113495bSYour Name {
192*5113495bSYour Name 	struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
193*5113495bSYour Name 	hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
194*5113495bSYour Name 	hal_soc_handle_t hal_soc = soc->hal_soc;
195*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
196*5113495bSYour Name 	void *src_srng_desc;
197*5113495bSYour Name 
198*5113495bSYour Name 	if (!wbm_rel_srng) {
199*5113495bSYour Name 		dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
200*5113495bSYour Name 		return status;
201*5113495bSYour Name 	}
202*5113495bSYour Name 
203*5113495bSYour Name 	/* do duplicate link desc address check */
204*5113495bSYour Name 	dp_rx_link_desc_refill_duplicate_check(
205*5113495bSYour Name 				soc,
206*5113495bSYour Name 				&soc->last_op_info.wbm_rel_link_desc,
207*5113495bSYour Name 				link_desc_addr);
208*5113495bSYour Name 
209*5113495bSYour Name 	if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
210*5113495bSYour Name 
211*5113495bSYour Name 		/* TODO */
212*5113495bSYour Name 		/*
213*5113495bSYour Name 		 * Need API to convert from hal_ring pointer to
214*5113495bSYour Name 		 * Ring Type / Ring Id combo
215*5113495bSYour Name 		 */
216*5113495bSYour Name 		dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
217*5113495bSYour Name 			      soc, wbm_rel_srng);
218*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
219*5113495bSYour Name 		goto done;
220*5113495bSYour Name 	}
221*5113495bSYour Name 	src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
222*5113495bSYour Name 	if (qdf_likely(src_srng_desc)) {
223*5113495bSYour Name 		/* Return link descriptor through WBM ring (SW2WBM)*/
224*5113495bSYour Name 		hal_rx_msdu_link_desc_set(hal_soc,
225*5113495bSYour Name 				src_srng_desc, link_desc_addr, bm_action);
226*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
227*5113495bSYour Name 	} else {
228*5113495bSYour Name 		struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
229*5113495bSYour Name 
230*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
231*5113495bSYour Name 
232*5113495bSYour Name 		dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
233*5113495bSYour Name 			   srng->ring_id,
234*5113495bSYour Name 			   soc->stats.rx.err.hal_ring_access_full_fail);
235*5113495bSYour Name 		dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
236*5113495bSYour Name 			   *srng->u.src_ring.hp_addr,
237*5113495bSYour Name 			   srng->u.src_ring.reap_hp,
238*5113495bSYour Name 			   *srng->u.src_ring.tp_addr,
239*5113495bSYour Name 			   srng->u.src_ring.cached_tp);
240*5113495bSYour Name 		QDF_BUG(0);
241*5113495bSYour Name 	}
242*5113495bSYour Name done:
243*5113495bSYour Name 	hal_srng_access_end(hal_soc, wbm_rel_srng);
244*5113495bSYour Name 	return status;
245*5113495bSYour Name 
246*5113495bSYour Name }
247*5113495bSYour Name 
248*5113495bSYour Name qdf_export_symbol(dp_rx_link_desc_return_by_addr);
249*5113495bSYour Name 
250*5113495bSYour Name QDF_STATUS
dp_rx_link_desc_return(struct dp_soc * soc,hal_ring_desc_t ring_desc,uint8_t bm_action)251*5113495bSYour Name dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
252*5113495bSYour Name 		       uint8_t bm_action)
253*5113495bSYour Name {
254*5113495bSYour Name 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
255*5113495bSYour Name 
256*5113495bSYour Name 	return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
257*5113495bSYour Name }
258*5113495bSYour Name 
259*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
260*5113495bSYour Name 
261*5113495bSYour Name /**
262*5113495bSYour Name  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
263*5113495bSYour Name  *
264*5113495bSYour Name  * @soc: core txrx main context
265*5113495bSYour Name  * @ring_desc: opaque pointer to the REO error ring descriptor
266*5113495bSYour Name  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
267*5113495bSYour Name  * @mac_id: mac ID
268*5113495bSYour Name  * @quota: No. of units (packets) that can be serviced in one shot.
269*5113495bSYour Name  *
270*5113495bSYour Name  * This function is used to drop all MSDU in an MPDU
271*5113495bSYour Name  *
272*5113495bSYour Name  * Return: uint32_t: No. of elements processed
273*5113495bSYour Name  */
274*5113495bSYour Name static uint32_t
dp_rx_msdus_drop(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint8_t * mac_id,uint32_t quota)275*5113495bSYour Name dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
276*5113495bSYour Name 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
277*5113495bSYour Name 		 uint8_t *mac_id,
278*5113495bSYour Name 		 uint32_t quota)
279*5113495bSYour Name {
280*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
281*5113495bSYour Name 	void *link_desc_va;
282*5113495bSYour Name 	struct hal_buf_info buf_info;
283*5113495bSYour Name 	struct dp_pdev *pdev;
284*5113495bSYour Name 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
285*5113495bSYour Name 	int i;
286*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
287*5113495bSYour Name 	uint32_t tid;
288*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
289*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
290*5113495bSYour Name 	/* First field in REO Dst ring Desc is buffer_addr_info */
291*5113495bSYour Name 	void *buf_addr_info = ring_desc;
292*5113495bSYour Name 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
293*5113495bSYour Name 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
294*5113495bSYour Name 
295*5113495bSYour Name 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
296*5113495bSYour Name 
297*5113495bSYour Name 	/* buffer_addr_info is the first element of ring_desc */
298*5113495bSYour Name 	hal_rx_buf_cookie_rbm_get(soc->hal_soc,
299*5113495bSYour Name 				  (uint32_t *)ring_desc,
300*5113495bSYour Name 				  &buf_info);
301*5113495bSYour Name 
302*5113495bSYour Name 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
303*5113495bSYour Name 	if (!link_desc_va) {
304*5113495bSYour Name 		dp_rx_err_debug("link desc va is null, soc %pk", soc);
305*5113495bSYour Name 		return rx_bufs_used;
306*5113495bSYour Name 	}
307*5113495bSYour Name 
308*5113495bSYour Name more_msdu_link_desc:
309*5113495bSYour Name 	/* No UNMAP required -- this is "malloc_consistent" memory */
310*5113495bSYour Name 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
311*5113495bSYour Name 			     &mpdu_desc_info->msdu_count);
312*5113495bSYour Name 
313*5113495bSYour Name 	for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
314*5113495bSYour Name 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
315*5113495bSYour Name 						soc, msdu_list.sw_cookie[i]);
316*5113495bSYour Name 
317*5113495bSYour Name 		qdf_assert_always(rx_desc);
318*5113495bSYour Name 
319*5113495bSYour Name 		/* all buffers from a MSDU link link belong to same pdev */
320*5113495bSYour Name 		*mac_id = rx_desc->pool_id;
321*5113495bSYour Name 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
322*5113495bSYour Name 		if (!pdev) {
323*5113495bSYour Name 			dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
324*5113495bSYour Name 					soc, rx_desc->pool_id);
325*5113495bSYour Name 			return rx_bufs_used;
326*5113495bSYour Name 		}
327*5113495bSYour Name 
328*5113495bSYour Name 		if (!dp_rx_desc_check_magic(rx_desc)) {
329*5113495bSYour Name 			dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
330*5113495bSYour Name 				      soc, msdu_list.sw_cookie[i]);
331*5113495bSYour Name 			return rx_bufs_used;
332*5113495bSYour Name 		}
333*5113495bSYour Name 
334*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
335*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
336*5113495bSYour Name 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
337*5113495bSYour Name 		rx_desc->unmapped = 1;
338*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
339*5113495bSYour Name 
340*5113495bSYour Name 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
341*5113495bSYour Name 
342*5113495bSYour Name 		rx_bufs_used++;
343*5113495bSYour Name 		tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
344*5113495bSYour Name 						rx_desc->rx_buf_start);
345*5113495bSYour Name 		dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
346*5113495bSYour Name 			      soc, tid);
347*5113495bSYour Name 
348*5113495bSYour Name 		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
349*5113495bSYour Name 		if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
350*5113495bSYour Name 			hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
351*5113495bSYour Name 
352*5113495bSYour Name 		dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
353*5113495bSYour Name 				      rx_desc->nbuf,
354*5113495bSYour Name 				      QDF_TX_RX_STATUS_DROP, true);
355*5113495bSYour Name 		/* Just free the buffers */
356*5113495bSYour Name 		dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
357*5113495bSYour Name 
358*5113495bSYour Name 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
359*5113495bSYour Name 					    &pdev->free_list_tail, rx_desc);
360*5113495bSYour Name 	}
361*5113495bSYour Name 
362*5113495bSYour Name 	/*
363*5113495bSYour Name 	 * If the msdu's are spread across multiple link-descriptors,
364*5113495bSYour Name 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
365*5113495bSYour Name 	 * spread across multiple buffers).Hence, it is
366*5113495bSYour Name 	 * necessary to check the next link_descriptor and release
367*5113495bSYour Name 	 * all the msdu's that are part of it.
368*5113495bSYour Name 	 */
369*5113495bSYour Name 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
370*5113495bSYour Name 			link_desc_va,
371*5113495bSYour Name 			&next_link_desc_addr_info);
372*5113495bSYour Name 
373*5113495bSYour Name 	if (hal_rx_is_buf_addr_info_valid(
374*5113495bSYour Name 				&next_link_desc_addr_info)) {
375*5113495bSYour Name 		/* Clear the next link desc info for the current link_desc */
376*5113495bSYour Name 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
377*5113495bSYour Name 
378*5113495bSYour Name 		dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
379*5113495bSYour Name 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
380*5113495bSYour Name 		hal_rx_buffer_addr_info_get_paddr(
381*5113495bSYour Name 				&next_link_desc_addr_info,
382*5113495bSYour Name 				&buf_info);
383*5113495bSYour Name 		/* buffer_addr_info is the first element of ring_desc */
384*5113495bSYour Name 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
385*5113495bSYour Name 					  (uint32_t *)&next_link_desc_addr_info,
386*5113495bSYour Name 					  &buf_info);
387*5113495bSYour Name 		cur_link_desc_addr_info = next_link_desc_addr_info;
388*5113495bSYour Name 		buf_addr_info = &cur_link_desc_addr_info;
389*5113495bSYour Name 
390*5113495bSYour Name 		link_desc_va =
391*5113495bSYour Name 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
392*5113495bSYour Name 
393*5113495bSYour Name 		goto more_msdu_link_desc;
394*5113495bSYour Name 	}
395*5113495bSYour Name 	quota--;
396*5113495bSYour Name 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397*5113495bSYour Name 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398*5113495bSYour Name 	return rx_bufs_used;
399*5113495bSYour Name }
400*5113495bSYour Name 
401*5113495bSYour Name /**
402*5113495bSYour Name  * dp_rx_pn_error_handle() - Handles PN check errors
403*5113495bSYour Name  *
404*5113495bSYour Name  * @soc: core txrx main context
405*5113495bSYour Name  * @ring_desc: opaque pointer to the REO error ring descriptor
406*5113495bSYour Name  * @mpdu_desc_info: MPDU descriptor information from ring descriptor
407*5113495bSYour Name  * @mac_id: mac ID
408*5113495bSYour Name  * @quota: No. of units (packets) that can be serviced in one shot.
409*5113495bSYour Name  *
410*5113495bSYour Name  * This function implements PN error handling
411*5113495bSYour Name  * If the peer is configured to ignore the PN check errors
412*5113495bSYour Name  * or if DP feels, that this frame is still OK, the frame can be
413*5113495bSYour Name  * re-injected back to REO to use some of the other features
414*5113495bSYour Name  * of REO e.g. duplicate detection/routing to other cores
415*5113495bSYour Name  *
416*5113495bSYour Name  * Return: uint32_t: No. of elements processed
417*5113495bSYour Name  */
418*5113495bSYour Name static uint32_t
dp_rx_pn_error_handle(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint8_t * mac_id,uint32_t quota)419*5113495bSYour Name dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
420*5113495bSYour Name 		      struct hal_rx_mpdu_desc_info *mpdu_desc_info,
421*5113495bSYour Name 		      uint8_t *mac_id,
422*5113495bSYour Name 		      uint32_t quota)
423*5113495bSYour Name {
424*5113495bSYour Name 	uint16_t peer_id;
425*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
426*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
427*5113495bSYour Name 	bool peer_pn_policy = false;
428*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
429*5113495bSYour Name 
430*5113495bSYour Name 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
431*5113495bSYour Name 					       mpdu_desc_info->peer_meta_data);
432*5113495bSYour Name 
433*5113495bSYour Name 
434*5113495bSYour Name 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
435*5113495bSYour Name 						   &txrx_ref_handle,
436*5113495bSYour Name 						   DP_MOD_ID_RX_ERR);
437*5113495bSYour Name 
438*5113495bSYour Name 	if (qdf_likely(txrx_peer)) {
439*5113495bSYour Name 		/*
440*5113495bSYour Name 		 * TODO: Check for peer specific policies & set peer_pn_policy
441*5113495bSYour Name 		 */
442*5113495bSYour Name 		dp_err_rl("discard rx due to PN error for peer  %pK",
443*5113495bSYour Name 			  txrx_peer);
444*5113495bSYour Name 
445*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
446*5113495bSYour Name 	}
447*5113495bSYour Name 	dp_rx_err_err("%pK: Packet received with PN error", soc);
448*5113495bSYour Name 
449*5113495bSYour Name 	/* No peer PN policy -- definitely drop */
450*5113495bSYour Name 	if (!peer_pn_policy)
451*5113495bSYour Name 		rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
452*5113495bSYour Name 						mpdu_desc_info,
453*5113495bSYour Name 						mac_id, quota);
454*5113495bSYour Name 
455*5113495bSYour Name 	return rx_bufs_used;
456*5113495bSYour Name }
457*5113495bSYour Name 
458*5113495bSYour Name #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
459*5113495bSYour Name /**
460*5113495bSYour Name  * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
461*5113495bSYour Name  * @soc: Datapath soc handler
462*5113495bSYour Name  * @txrx_peer: pointer to DP peer
463*5113495bSYour Name  * @nbuf: pointer to the skb of RX frame
464*5113495bSYour Name  * @frame_mask: the mask for special frame needed
465*5113495bSYour Name  * @rx_tlv_hdr: start of rx tlv header
466*5113495bSYour Name  *
467*5113495bSYour Name  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
468*5113495bSYour Name  * single nbuf is expected.
469*5113495bSYour Name  *
470*5113495bSYour Name  * return: true - nbuf has been delivered to stack, false - not.
471*5113495bSYour Name  */
472*5113495bSYour Name static bool
dp_rx_deliver_oor_frame(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint32_t frame_mask,uint8_t * rx_tlv_hdr)473*5113495bSYour Name dp_rx_deliver_oor_frame(struct dp_soc *soc,
474*5113495bSYour Name 			struct dp_txrx_peer *txrx_peer,
475*5113495bSYour Name 			qdf_nbuf_t nbuf, uint32_t frame_mask,
476*5113495bSYour Name 			uint8_t *rx_tlv_hdr)
477*5113495bSYour Name {
478*5113495bSYour Name 	uint32_t l2_hdr_offset = 0;
479*5113495bSYour Name 	uint16_t msdu_len = 0;
480*5113495bSYour Name 	uint32_t skip_len;
481*5113495bSYour Name 
482*5113495bSYour Name 	l2_hdr_offset =
483*5113495bSYour Name 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
484*5113495bSYour Name 
485*5113495bSYour Name 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
486*5113495bSYour Name 		skip_len = l2_hdr_offset;
487*5113495bSYour Name 	} else {
488*5113495bSYour Name 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
489*5113495bSYour Name 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
490*5113495bSYour Name 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
491*5113495bSYour Name 	}
492*5113495bSYour Name 
493*5113495bSYour Name 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
494*5113495bSYour Name 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
495*5113495bSYour Name 	qdf_nbuf_pull_head(nbuf, skip_len);
496*5113495bSYour Name 	qdf_nbuf_set_exc_frame(nbuf, 1);
497*5113495bSYour Name 
498*5113495bSYour Name 	dp_info_rl("OOR frame, mpdu sn 0x%x",
499*5113495bSYour Name 		   hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
500*5113495bSYour Name 	dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
501*5113495bSYour Name 	return true;
502*5113495bSYour Name }
503*5113495bSYour Name 
504*5113495bSYour Name #else
505*5113495bSYour Name static bool
dp_rx_deliver_oor_frame(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint32_t frame_mask,uint8_t * rx_tlv_hdr)506*5113495bSYour Name dp_rx_deliver_oor_frame(struct dp_soc *soc,
507*5113495bSYour Name 			struct dp_txrx_peer *txrx_peer,
508*5113495bSYour Name 			qdf_nbuf_t nbuf, uint32_t frame_mask,
509*5113495bSYour Name 			uint8_t *rx_tlv_hdr)
510*5113495bSYour Name {
511*5113495bSYour Name 	return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
512*5113495bSYour Name 					   rx_tlv_hdr);
513*5113495bSYour Name }
514*5113495bSYour Name #endif
515*5113495bSYour Name 
516*5113495bSYour Name /**
517*5113495bSYour Name  * dp_rx_oor_handle() - Handles the msdu which is OOR error
518*5113495bSYour Name  *
519*5113495bSYour Name  * @soc: core txrx main context
520*5113495bSYour Name  * @nbuf: pointer to msdu skb
521*5113495bSYour Name  * @peer_id: dp peer ID
522*5113495bSYour Name  * @rx_tlv_hdr: start of rx tlv header
523*5113495bSYour Name  *
524*5113495bSYour Name  * This function process the msdu delivered from REO2TCL
525*5113495bSYour Name  * ring with error type OOR
526*5113495bSYour Name  *
527*5113495bSYour Name  * Return: None
528*5113495bSYour Name  */
529*5113495bSYour Name static void
dp_rx_oor_handle(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t peer_id,uint8_t * rx_tlv_hdr)530*5113495bSYour Name dp_rx_oor_handle(struct dp_soc *soc,
531*5113495bSYour Name 		 qdf_nbuf_t nbuf,
532*5113495bSYour Name 		 uint16_t peer_id,
533*5113495bSYour Name 		 uint8_t *rx_tlv_hdr)
534*5113495bSYour Name {
535*5113495bSYour Name 	uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx);
536*5113495bSYour Name 
537*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
538*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
539*5113495bSYour Name 
540*5113495bSYour Name 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
541*5113495bSYour Name 						   &txrx_ref_handle,
542*5113495bSYour Name 						   DP_MOD_ID_RX_ERR);
543*5113495bSYour Name 	if (!txrx_peer) {
544*5113495bSYour Name 		dp_info_rl("peer not found");
545*5113495bSYour Name 		goto free_nbuf;
546*5113495bSYour Name 	}
547*5113495bSYour Name 
548*5113495bSYour Name 	if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
549*5113495bSYour Name 				    rx_tlv_hdr)) {
550*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
551*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
552*5113495bSYour Name 		return;
553*5113495bSYour Name 	}
554*5113495bSYour Name 
555*5113495bSYour Name free_nbuf:
556*5113495bSYour Name 	if (txrx_peer)
557*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
558*5113495bSYour Name 
559*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
560*5113495bSYour Name 	dp_rx_nbuf_free(nbuf);
561*5113495bSYour Name }
562*5113495bSYour Name 
563*5113495bSYour Name /**
564*5113495bSYour Name  * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
565*5113495bSYour Name  *				is a monotonous increment of packet number
566*5113495bSYour Name  *				from the previous successfully re-ordered
567*5113495bSYour Name  *				frame.
568*5113495bSYour Name  * @soc: Datapath SOC handle
569*5113495bSYour Name  * @ring_desc: REO ring descriptor
570*5113495bSYour Name  * @nbuf: Current packet
571*5113495bSYour Name  *
572*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
573*5113495bSYour Name  */
574*5113495bSYour Name static inline QDF_STATUS
dp_rx_err_nbuf_pn_check(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf)575*5113495bSYour Name dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
576*5113495bSYour Name 			qdf_nbuf_t nbuf)
577*5113495bSYour Name {
578*5113495bSYour Name 	uint64_t prev_pn, curr_pn[2];
579*5113495bSYour Name 
580*5113495bSYour Name 	if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
581*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
582*5113495bSYour Name 
583*5113495bSYour Name 	hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
584*5113495bSYour Name 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
585*5113495bSYour Name 
586*5113495bSYour Name 	if (curr_pn[0] > prev_pn)
587*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
588*5113495bSYour Name 
589*5113495bSYour Name 	return QDF_STATUS_E_FAILURE;
590*5113495bSYour Name }
591*5113495bSYour Name 
592*5113495bSYour Name #ifdef WLAN_SKIP_BAR_UPDATE
593*5113495bSYour Name static
dp_rx_err_handle_bar(struct dp_soc * soc,struct dp_peer * peer,qdf_nbuf_t nbuf)594*5113495bSYour Name void dp_rx_err_handle_bar(struct dp_soc *soc,
595*5113495bSYour Name 			  struct dp_peer *peer,
596*5113495bSYour Name 			  qdf_nbuf_t nbuf)
597*5113495bSYour Name {
598*5113495bSYour Name 	dp_info_rl("BAR update to H.W is skipped");
599*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
600*5113495bSYour Name }
601*5113495bSYour Name #else
602*5113495bSYour Name static
dp_rx_err_handle_bar(struct dp_soc * soc,struct dp_peer * peer,qdf_nbuf_t nbuf)603*5113495bSYour Name void dp_rx_err_handle_bar(struct dp_soc *soc,
604*5113495bSYour Name 			  struct dp_peer *peer,
605*5113495bSYour Name 			  qdf_nbuf_t nbuf)
606*5113495bSYour Name {
607*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
608*5113495bSYour Name 	unsigned char type, subtype;
609*5113495bSYour Name 	uint16_t start_seq_num;
610*5113495bSYour Name 	uint32_t tid;
611*5113495bSYour Name 	QDF_STATUS status;
612*5113495bSYour Name 	struct ieee80211_frame_bar *bar;
613*5113495bSYour Name 
614*5113495bSYour Name 	/*
615*5113495bSYour Name 	 * 1. Is this a BAR frame. If not Discard it.
616*5113495bSYour Name 	 * 2. If it is, get the peer id, tid, ssn
617*5113495bSYour Name 	 * 2a Do a tid update
618*5113495bSYour Name 	 */
619*5113495bSYour Name 
620*5113495bSYour Name 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
621*5113495bSYour Name 	bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
622*5113495bSYour Name 
623*5113495bSYour Name 	type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
624*5113495bSYour Name 	subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
625*5113495bSYour Name 
626*5113495bSYour Name 	if (!(type == IEEE80211_FC0_TYPE_CTL &&
627*5113495bSYour Name 	      subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
628*5113495bSYour Name 		dp_err_rl("Not a BAR frame!");
629*5113495bSYour Name 		return;
630*5113495bSYour Name 	}
631*5113495bSYour Name 
632*5113495bSYour Name 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
633*5113495bSYour Name 	qdf_assert_always(tid < DP_MAX_TIDS);
634*5113495bSYour Name 
635*5113495bSYour Name 	start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
636*5113495bSYour Name 
637*5113495bSYour Name 	dp_info_rl("tid %u window_size %u start_seq_num %u",
638*5113495bSYour Name 		   tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
639*5113495bSYour Name 
640*5113495bSYour Name 	status = dp_rx_tid_update_wifi3(peer, tid,
641*5113495bSYour Name 					peer->rx_tid[tid].ba_win_size,
642*5113495bSYour Name 					start_seq_num,
643*5113495bSYour Name 					true);
644*5113495bSYour Name 	if (status != QDF_STATUS_SUCCESS) {
645*5113495bSYour Name 		dp_err_rl("failed to handle bar frame update rx tid");
646*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
647*5113495bSYour Name 	} else {
648*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
649*5113495bSYour Name 	}
650*5113495bSYour Name }
651*5113495bSYour Name #endif
652*5113495bSYour Name 
653*5113495bSYour Name /**
654*5113495bSYour Name  * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
655*5113495bSYour Name  * @soc: Datapath SoC handle
656*5113495bSYour Name  * @nbuf: packet being processed
657*5113495bSYour Name  * @mpdu_desc_info: mpdu desc info for the current packet
658*5113495bSYour Name  * @tid: tid on which the packet arrived
659*5113495bSYour Name  * @err_status: Flag to indicate if REO encountered an error while routing this
660*5113495bSYour Name  *		frame
661*5113495bSYour Name  * @error_code: REO error code
662*5113495bSYour Name  *
663*5113495bSYour Name  * Return: None
664*5113495bSYour Name  */
665*5113495bSYour Name static void
_dp_rx_bar_frame_handle(struct dp_soc * soc,qdf_nbuf_t nbuf,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint32_t tid,uint8_t err_status,uint32_t error_code)666*5113495bSYour Name _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
667*5113495bSYour Name 			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
668*5113495bSYour Name 			uint32_t tid, uint8_t err_status, uint32_t error_code)
669*5113495bSYour Name {
670*5113495bSYour Name 	uint16_t peer_id;
671*5113495bSYour Name 	struct dp_peer *peer;
672*5113495bSYour Name 
673*5113495bSYour Name 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
674*5113495bSYour Name 					       mpdu_desc_info->peer_meta_data);
675*5113495bSYour Name 	peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
676*5113495bSYour Name 	if (!peer)
677*5113495bSYour Name 		return;
678*5113495bSYour Name 
679*5113495bSYour Name 	dp_info_rl("BAR frame: "
680*5113495bSYour Name 		" peer_id = %d"
681*5113495bSYour Name 		" tid = %u"
682*5113495bSYour Name 		" SSN = %d"
683*5113495bSYour Name 		" error status = %d",
684*5113495bSYour Name 		peer->peer_id,
685*5113495bSYour Name 		tid,
686*5113495bSYour Name 		mpdu_desc_info->mpdu_seq,
687*5113495bSYour Name 		err_status);
688*5113495bSYour Name 
689*5113495bSYour Name 	if (err_status == HAL_REO_ERROR_DETECTED) {
690*5113495bSYour Name 		switch (error_code) {
691*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
692*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_OOR:
693*5113495bSYour Name 			dp_rx_err_handle_bar(soc, peer, nbuf);
694*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
695*5113495bSYour Name 			break;
696*5113495bSYour Name 		default:
697*5113495bSYour Name 			DP_STATS_INC(soc, rx.bar_frame, 1);
698*5113495bSYour Name 		}
699*5113495bSYour Name 	}
700*5113495bSYour Name 
701*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
702*5113495bSYour Name }
703*5113495bSYour Name 
704*5113495bSYour Name /**
705*5113495bSYour Name  * dp_rx_bar_frame_handle() - Function to handle err BAR frames
706*5113495bSYour Name  * @soc: core DP main context
707*5113495bSYour Name  * @ring_desc: Hal ring desc
708*5113495bSYour Name  * @rx_desc: dp rx desc
709*5113495bSYour Name  * @mpdu_desc_info: mpdu desc info
710*5113495bSYour Name  * @err_status: error status
711*5113495bSYour Name  * @err_code: error code
712*5113495bSYour Name  *
713*5113495bSYour Name  * Handle the error BAR frames received. Ensure the SOC level
714*5113495bSYour Name  * stats are updated based on the REO error code. The BAR frames
715*5113495bSYour Name  * are further processed by updating the Rx tids with the start
716*5113495bSYour Name  * sequence number (SSN) and BA window size. Desc is returned
717*5113495bSYour Name  * to the free desc list
718*5113495bSYour Name  *
719*5113495bSYour Name  * Return: none
720*5113495bSYour Name  */
721*5113495bSYour Name static void
dp_rx_bar_frame_handle(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint8_t err_status,uint32_t err_code)722*5113495bSYour Name dp_rx_bar_frame_handle(struct dp_soc *soc,
723*5113495bSYour Name 		       hal_ring_desc_t ring_desc,
724*5113495bSYour Name 		       struct dp_rx_desc *rx_desc,
725*5113495bSYour Name 		       struct hal_rx_mpdu_desc_info *mpdu_desc_info,
726*5113495bSYour Name 		       uint8_t err_status,
727*5113495bSYour Name 		       uint32_t err_code)
728*5113495bSYour Name {
729*5113495bSYour Name 	qdf_nbuf_t nbuf;
730*5113495bSYour Name 	struct dp_pdev *pdev;
731*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
732*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
733*5113495bSYour Name 	uint32_t tid;
734*5113495bSYour Name 
735*5113495bSYour Name 	nbuf = rx_desc->nbuf;
736*5113495bSYour Name 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
737*5113495bSYour Name 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
738*5113495bSYour Name 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
739*5113495bSYour Name 	rx_desc->unmapped = 1;
740*5113495bSYour Name 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
741*5113495bSYour Name 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
742*5113495bSYour Name 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
743*5113495bSYour Name 					rx_tlv_hdr);
744*5113495bSYour Name 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
745*5113495bSYour Name 
746*5113495bSYour Name 	if (!pdev) {
747*5113495bSYour Name 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
748*5113495bSYour Name 				soc, rx_desc->pool_id);
749*5113495bSYour Name 		return;
750*5113495bSYour Name 	}
751*5113495bSYour Name 
752*5113495bSYour Name 	_dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
753*5113495bSYour Name 				err_code);
754*5113495bSYour Name 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
755*5113495bSYour Name 			      QDF_TX_RX_STATUS_DROP, true);
756*5113495bSYour Name 	dp_rx_link_desc_return(soc, ring_desc,
757*5113495bSYour Name 			       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
758*5113495bSYour Name 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
759*5113495bSYour Name 				    rx_desc->pool_id);
760*5113495bSYour Name 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
761*5113495bSYour Name 				    &pdev->free_list_tail,
762*5113495bSYour Name 				    rx_desc);
763*5113495bSYour Name }
764*5113495bSYour Name 
765*5113495bSYour Name #endif /* QCA_HOST_MODE_WIFI_DISABLED */
766*5113495bSYour Name 
dp_2k_jump_handle(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint16_t peer_id,uint8_t tid)767*5113495bSYour Name void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
768*5113495bSYour Name 		       uint16_t peer_id, uint8_t tid)
769*5113495bSYour Name {
770*5113495bSYour Name 	struct dp_peer *peer = NULL;
771*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
772*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
773*5113495bSYour Name 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
774*5113495bSYour Name 
775*5113495bSYour Name 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
776*5113495bSYour Name 	if (!peer) {
777*5113495bSYour Name 		dp_rx_err_info_rl("%pK: peer not found", soc);
778*5113495bSYour Name 		goto free_nbuf;
779*5113495bSYour Name 	}
780*5113495bSYour Name 
781*5113495bSYour Name 	txrx_peer = dp_get_txrx_peer(peer);
782*5113495bSYour Name 	if (!txrx_peer) {
783*5113495bSYour Name 		dp_rx_err_info_rl("%pK: txrx_peer not found", soc);
784*5113495bSYour Name 		goto free_nbuf;
785*5113495bSYour Name 	}
786*5113495bSYour Name 
787*5113495bSYour Name 	if (tid >= DP_MAX_TIDS) {
788*5113495bSYour Name 		dp_info_rl("invalid tid");
789*5113495bSYour Name 		goto nbuf_deliver;
790*5113495bSYour Name 	}
791*5113495bSYour Name 
792*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
793*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
794*5113495bSYour Name 
795*5113495bSYour Name 	/* only if BA session is active, allow send Delba */
796*5113495bSYour Name 	if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
797*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
798*5113495bSYour Name 		goto nbuf_deliver;
799*5113495bSYour Name 	}
800*5113495bSYour Name 
801*5113495bSYour Name 	if (!rx_tid->delba_tx_status) {
802*5113495bSYour Name 		rx_tid->delba_tx_retry++;
803*5113495bSYour Name 		rx_tid->delba_tx_status = 1;
804*5113495bSYour Name 		rx_tid->delba_rcode =
805*5113495bSYour Name 			IEEE80211_REASON_QOS_SETUP_REQUIRED;
806*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
807*5113495bSYour Name 		if (soc->cdp_soc.ol_ops->send_delba) {
808*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
809*5113495bSYour Name 				     1);
810*5113495bSYour Name 			soc->cdp_soc.ol_ops->send_delba(
811*5113495bSYour Name 					peer->vdev->pdev->soc->ctrl_psoc,
812*5113495bSYour Name 					peer->vdev->vdev_id,
813*5113495bSYour Name 					peer->mac_addr.raw,
814*5113495bSYour Name 					tid,
815*5113495bSYour Name 					rx_tid->delba_rcode,
816*5113495bSYour Name 					CDP_DELBA_2K_JUMP);
817*5113495bSYour Name 		}
818*5113495bSYour Name 	} else {
819*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
820*5113495bSYour Name 	}
821*5113495bSYour Name 
822*5113495bSYour Name nbuf_deliver:
823*5113495bSYour Name 	if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
824*5113495bSYour Name 					rx_tlv_hdr)) {
825*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
826*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
827*5113495bSYour Name 		return;
828*5113495bSYour Name 	}
829*5113495bSYour Name 
830*5113495bSYour Name free_nbuf:
831*5113495bSYour Name 	if (peer)
832*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
833*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
834*5113495bSYour Name 	dp_rx_nbuf_free(nbuf);
835*5113495bSYour Name }
836*5113495bSYour Name 
837*5113495bSYour Name #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
838*5113495bSYour Name     defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
839*5113495bSYour Name bool
dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc * soc,uint8_t pool_id,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)840*5113495bSYour Name dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
841*5113495bSYour Name 					      uint8_t pool_id,
842*5113495bSYour Name 					      uint8_t *rx_tlv_hdr,
843*5113495bSYour Name 					      qdf_nbuf_t nbuf)
844*5113495bSYour Name {
845*5113495bSYour Name 	struct dp_peer *peer = NULL;
846*5113495bSYour Name 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
847*5113495bSYour Name 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
848*5113495bSYour Name 	struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
849*5113495bSYour Name 
850*5113495bSYour Name 	if (!pdev) {
851*5113495bSYour Name 		dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
852*5113495bSYour Name 				soc, pool_id);
853*5113495bSYour Name 		return false;
854*5113495bSYour Name 	}
855*5113495bSYour Name 	/*
856*5113495bSYour Name 	 * WAR- In certain types of packets if peer_id is not correct then
857*5113495bSYour Name 	 * driver may not be able find. Try finding peer by addr_2 of
858*5113495bSYour Name 	 * received MPDU
859*5113495bSYour Name 	 */
860*5113495bSYour Name 	if (wh)
861*5113495bSYour Name 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
862*5113495bSYour Name 					      DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
863*5113495bSYour Name 	if (peer) {
864*5113495bSYour Name 		dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
865*5113495bSYour Name 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
866*5113495bSYour Name 				     QDF_TRACE_LEVEL_DEBUG);
867*5113495bSYour Name 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
868*5113495bSYour Name 				 1, qdf_nbuf_len(nbuf));
869*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
870*5113495bSYour Name 
871*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
872*5113495bSYour Name 		return true;
873*5113495bSYour Name 	}
874*5113495bSYour Name 	return false;
875*5113495bSYour Name }
876*5113495bSYour Name #else
877*5113495bSYour Name bool
dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc * soc,uint8_t pool_id,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)878*5113495bSYour Name dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
879*5113495bSYour Name 					      uint8_t pool_id,
880*5113495bSYour Name 					      uint8_t *rx_tlv_hdr,
881*5113495bSYour Name 					      qdf_nbuf_t nbuf)
882*5113495bSYour Name {
883*5113495bSYour Name 	return false;
884*5113495bSYour Name }
885*5113495bSYour Name #endif
886*5113495bSYour Name 
dp_rx_check_pkt_len(struct dp_soc * soc,uint32_t pkt_len)887*5113495bSYour Name bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
888*5113495bSYour Name {
889*5113495bSYour Name 	uint16_t buf_size;
890*5113495bSYour Name 
891*5113495bSYour Name 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
892*5113495bSYour Name 
893*5113495bSYour Name 	if (qdf_unlikely(pkt_len > buf_size)) {
894*5113495bSYour Name 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
895*5113495bSYour Name 				 1, pkt_len);
896*5113495bSYour Name 		return true;
897*5113495bSYour Name 	} else {
898*5113495bSYour Name 		return false;
899*5113495bSYour Name 	}
900*5113495bSYour Name }
901*5113495bSYour Name 
902*5113495bSYour Name #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
903*5113495bSYour Name void
dp_rx_deliver_to_osif_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,qdf_nbuf_t tail,bool is_eapol)904*5113495bSYour Name dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
905*5113495bSYour Name 			    struct dp_vdev *vdev,
906*5113495bSYour Name 			    struct dp_txrx_peer *txrx_peer,
907*5113495bSYour Name 			    qdf_nbuf_t nbuf,
908*5113495bSYour Name 			    qdf_nbuf_t tail,
909*5113495bSYour Name 			    bool is_eapol)
910*5113495bSYour Name {
911*5113495bSYour Name 	if (is_eapol && soc->eapol_over_control_port)
912*5113495bSYour Name 		dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
913*5113495bSYour Name 	else
914*5113495bSYour Name 		dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
915*5113495bSYour Name }
916*5113495bSYour Name #else
917*5113495bSYour Name void
dp_rx_deliver_to_osif_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,qdf_nbuf_t tail,bool is_eapol)918*5113495bSYour Name dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
919*5113495bSYour Name 			    struct dp_vdev *vdev,
920*5113495bSYour Name 			    struct dp_txrx_peer *txrx_peer,
921*5113495bSYour Name 			    qdf_nbuf_t nbuf,
922*5113495bSYour Name 			    qdf_nbuf_t tail,
923*5113495bSYour Name 			    bool is_eapol)
924*5113495bSYour Name {
925*5113495bSYour Name 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
926*5113495bSYour Name }
927*5113495bSYour Name #endif
928*5113495bSYour Name 
929*5113495bSYour Name #ifdef WLAN_FEATURE_11BE_MLO
dp_rx_err_match_dhost(qdf_ether_header_t * eh,struct dp_vdev * vdev)930*5113495bSYour Name int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
931*5113495bSYour Name {
932*5113495bSYour Name 	return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
933*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE) == 0) ||
934*5113495bSYour Name 		(qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
935*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE) == 0));
936*5113495bSYour Name }
937*5113495bSYour Name 
938*5113495bSYour Name #else
dp_rx_err_match_dhost(qdf_ether_header_t * eh,struct dp_vdev * vdev)939*5113495bSYour Name int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
940*5113495bSYour Name {
941*5113495bSYour Name 	return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
942*5113495bSYour Name 			    QDF_MAC_ADDR_SIZE) == 0);
943*5113495bSYour Name }
944*5113495bSYour Name #endif
945*5113495bSYour Name 
946*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
947*5113495bSYour Name 
948*5113495bSYour Name bool
dp_rx_err_drop_3addr_mcast(struct dp_vdev * vdev,uint8_t * rx_tlv_hdr)949*5113495bSYour Name dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
950*5113495bSYour Name {
951*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
952*5113495bSYour Name 
953*5113495bSYour Name 	if (!vdev->drop_3addr_mcast)
954*5113495bSYour Name 		return false;
955*5113495bSYour Name 
956*5113495bSYour Name 	if (vdev->opmode != wlan_op_mode_sta)
957*5113495bSYour Name 		return false;
958*5113495bSYour Name 
959*5113495bSYour Name 	if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
960*5113495bSYour Name 		return true;
961*5113495bSYour Name 
962*5113495bSYour Name 	return false;
963*5113495bSYour Name }
964*5113495bSYour Name 
965*5113495bSYour Name /**
966*5113495bSYour Name  * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
967*5113495bSYour Name  *				for this frame received in REO error ring.
968*5113495bSYour Name  * @soc: Datapath SOC handle
969*5113495bSYour Name  * @error: REO error detected or not
970*5113495bSYour Name  * @error_code: Error code in case of REO error
971*5113495bSYour Name  *
972*5113495bSYour Name  * Return: true if pn check if needed in software,
973*5113495bSYour Name  *	false, if pn check if not needed.
974*5113495bSYour Name  */
975*5113495bSYour Name static inline bool
dp_rx_err_is_pn_check_needed(struct dp_soc * soc,uint8_t error,uint32_t error_code)976*5113495bSYour Name dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
977*5113495bSYour Name 			     uint32_t error_code)
978*5113495bSYour Name {
979*5113495bSYour Name 	return (soc->features.pn_in_reo_dest &&
980*5113495bSYour Name 		(error == HAL_REO_ERROR_DETECTED &&
981*5113495bSYour Name 		 (hal_rx_reo_is_2k_jump(error_code) ||
982*5113495bSYour Name 		  hal_rx_reo_is_oor_error(error_code) ||
983*5113495bSYour Name 		  hal_rx_reo_is_bar_oor_2k_jump(error_code))));
984*5113495bSYour Name }
985*5113495bSYour Name 
986*5113495bSYour Name #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
987*5113495bSYour Name static inline void
dp_rx_err_populate_mpdu_desc_info(struct dp_soc * soc,qdf_nbuf_t nbuf,struct hal_rx_mpdu_desc_info * mpdu_desc_info,bool first_msdu_in_mpdu_processed)988*5113495bSYour Name dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
989*5113495bSYour Name 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
990*5113495bSYour Name 				  bool first_msdu_in_mpdu_processed)
991*5113495bSYour Name {
992*5113495bSYour Name 	if (first_msdu_in_mpdu_processed) {
993*5113495bSYour Name 		/*
994*5113495bSYour Name 		 * This is the 2nd indication of first_msdu in the same mpdu.
995*5113495bSYour Name 		 * Skip re-parsing the mdpu_desc_info and use the cached one,
996*5113495bSYour Name 		 * since this msdu is most probably from the current mpdu
997*5113495bSYour Name 		 * which is being processed
998*5113495bSYour Name 		 */
999*5113495bSYour Name 	} else {
1000*5113495bSYour Name 		hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
1001*5113495bSYour Name 						   qdf_nbuf_data(nbuf),
1002*5113495bSYour Name 						   mpdu_desc_info);
1003*5113495bSYour Name 	}
1004*5113495bSYour Name }
1005*5113495bSYour Name #else
1006*5113495bSYour Name static inline void
dp_rx_err_populate_mpdu_desc_info(struct dp_soc * soc,qdf_nbuf_t nbuf,struct hal_rx_mpdu_desc_info * mpdu_desc_info,bool first_msdu_in_mpdu_processed)1007*5113495bSYour Name dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1008*5113495bSYour Name 				  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1009*5113495bSYour Name 				  bool first_msdu_in_mpdu_processed)
1010*5113495bSYour Name {
1011*5113495bSYour Name 	hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1012*5113495bSYour Name 					   mpdu_desc_info);
1013*5113495bSYour Name }
1014*5113495bSYour Name #endif
1015*5113495bSYour Name 
1016*5113495bSYour Name /**
1017*5113495bSYour Name  * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1018*5113495bSYour Name  *
1019*5113495bSYour Name  * @soc: core txrx main context
1020*5113495bSYour Name  * @ring_desc: opaque pointer to the REO error ring descriptor
1021*5113495bSYour Name  * @mpdu_desc_info: pointer to mpdu level description info
1022*5113495bSYour Name  * @link_desc_va: pointer to msdu_link_desc virtual address
1023*5113495bSYour Name  * @err_code: reo error code fetched from ring entry
1024*5113495bSYour Name  *
1025*5113495bSYour Name  * Function to handle msdus fetched from msdu link desc, currently
1026*5113495bSYour Name  * support REO error NULL queue, 2K jump, OOR.
1027*5113495bSYour Name  *
1028*5113495bSYour Name  * Return: msdu count processed
1029*5113495bSYour Name  */
1030*5113495bSYour Name static uint32_t
dp_rx_reo_err_entry_process(struct dp_soc * soc,void * ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,void * link_desc_va,enum hal_reo_error_code err_code)1031*5113495bSYour Name dp_rx_reo_err_entry_process(struct dp_soc *soc,
1032*5113495bSYour Name 			    void *ring_desc,
1033*5113495bSYour Name 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1034*5113495bSYour Name 			    void *link_desc_va,
1035*5113495bSYour Name 			    enum hal_reo_error_code err_code)
1036*5113495bSYour Name {
1037*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
1038*5113495bSYour Name 	struct dp_pdev *pdev;
1039*5113495bSYour Name 	int i;
1040*5113495bSYour Name 	uint8_t *rx_tlv_hdr_first;
1041*5113495bSYour Name 	uint8_t *rx_tlv_hdr_last;
1042*5113495bSYour Name 	uint32_t tid = DP_MAX_TIDS;
1043*5113495bSYour Name 	uint16_t peer_id;
1044*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
1045*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
1046*5113495bSYour Name 	qdf_nbuf_t nbuf;
1047*5113495bSYour Name 	qdf_nbuf_t next_nbuf;
1048*5113495bSYour Name 	struct hal_buf_info buf_info;
1049*5113495bSYour Name 	struct hal_rx_msdu_list msdu_list;
1050*5113495bSYour Name 	uint16_t num_msdus;
1051*5113495bSYour Name 	struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1052*5113495bSYour Name 	struct buffer_addr_info next_link_desc_addr_info = { 0 };
1053*5113495bSYour Name 	/* First field in REO Dst ring Desc is buffer_addr_info */
1054*5113495bSYour Name 	void *buf_addr_info = ring_desc;
1055*5113495bSYour Name 	qdf_nbuf_t head_nbuf = NULL;
1056*5113495bSYour Name 	qdf_nbuf_t tail_nbuf = NULL;
1057*5113495bSYour Name 	uint16_t msdu_processed = 0;
1058*5113495bSYour Name 	QDF_STATUS status;
1059*5113495bSYour Name 	bool ret, is_pn_check_needed;
1060*5113495bSYour Name 	uint8_t rx_desc_pool_id;
1061*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
1062*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1063*5113495bSYour Name 	hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1064*5113495bSYour Name 	bool first_msdu_in_mpdu_processed = false;
1065*5113495bSYour Name 	bool msdu_dropped = false;
1066*5113495bSYour Name 	uint8_t link_id = 0;
1067*5113495bSYour Name 
1068*5113495bSYour Name 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1069*5113495bSYour Name 					mpdu_desc_info->peer_meta_data);
1070*5113495bSYour Name 	is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1071*5113495bSYour Name 							  HAL_REO_ERROR_DETECTED,
1072*5113495bSYour Name 							  err_code);
1073*5113495bSYour Name more_msdu_link_desc:
1074*5113495bSYour Name 	hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1075*5113495bSYour Name 			     &num_msdus);
1076*5113495bSYour Name 	for (i = 0; i < num_msdus; i++) {
1077*5113495bSYour Name 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1078*5113495bSYour Name 						soc,
1079*5113495bSYour Name 						msdu_list.sw_cookie[i]);
1080*5113495bSYour Name 
1081*5113495bSYour Name 		if (dp_assert_always_internal_stat(rx_desc, soc,
1082*5113495bSYour Name 						   rx.err.reo_err_rx_desc_null))
1083*5113495bSYour Name 			continue;
1084*5113495bSYour Name 
1085*5113495bSYour Name 		nbuf = rx_desc->nbuf;
1086*5113495bSYour Name 
1087*5113495bSYour Name 		/*
1088*5113495bSYour Name 		 * this is a unlikely scenario where the host is reaping
1089*5113495bSYour Name 		 * a descriptor which it already reaped just a while ago
1090*5113495bSYour Name 		 * but is yet to replenish it back to HW.
1091*5113495bSYour Name 		 * In this case host will dump the last 128 descriptors
1092*5113495bSYour Name 		 * including the software descriptor rx_desc and assert.
1093*5113495bSYour Name 		 */
1094*5113495bSYour Name 		if (qdf_unlikely(!rx_desc->in_use) ||
1095*5113495bSYour Name 		    qdf_unlikely(!nbuf)) {
1096*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1097*5113495bSYour Name 			dp_info_rl("Reaping rx_desc not in use!");
1098*5113495bSYour Name 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1099*5113495bSYour Name 						   ring_desc, rx_desc);
1100*5113495bSYour Name 			/* ignore duplicate RX desc and continue to process */
1101*5113495bSYour Name 			/* Pop out the descriptor */
1102*5113495bSYour Name 			msdu_dropped = true;
1103*5113495bSYour Name 			continue;
1104*5113495bSYour Name 		}
1105*5113495bSYour Name 
1106*5113495bSYour Name 		ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1107*5113495bSYour Name 						    msdu_list.paddr[i]);
1108*5113495bSYour Name 		if (!ret) {
1109*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1110*5113495bSYour Name 			rx_desc->in_err_state = 1;
1111*5113495bSYour Name 			msdu_dropped = true;
1112*5113495bSYour Name 			continue;
1113*5113495bSYour Name 		}
1114*5113495bSYour Name 
1115*5113495bSYour Name 		rx_desc_pool_id = rx_desc->pool_id;
1116*5113495bSYour Name 		/* all buffers from a MSDU link belong to same pdev */
1117*5113495bSYour Name 		pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1118*5113495bSYour Name 
1119*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1120*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1121*5113495bSYour Name 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1122*5113495bSYour Name 		rx_desc->unmapped = 1;
1123*5113495bSYour Name 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1124*5113495bSYour Name 
1125*5113495bSYour Name 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1126*5113495bSYour Name 		rx_bufs_used++;
1127*5113495bSYour Name 		dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1128*5113495bSYour Name 					    &pdev->free_list_tail, rx_desc);
1129*5113495bSYour Name 
1130*5113495bSYour Name 		DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1131*5113495bSYour Name 
1132*5113495bSYour Name 		if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1133*5113495bSYour Name 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
1134*5113495bSYour Name 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1135*5113495bSYour Name 			continue;
1136*5113495bSYour Name 		}
1137*5113495bSYour Name 
1138*5113495bSYour Name 		if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1139*5113495bSYour Name 					     rx_desc_pool_id)) {
1140*5113495bSYour Name 			/* MSDU queued back to the pool */
1141*5113495bSYour Name 			msdu_dropped = true;
1142*5113495bSYour Name 			head_nbuf = NULL;
1143*5113495bSYour Name 			goto process_next_msdu;
1144*5113495bSYour Name 		}
1145*5113495bSYour Name 
1146*5113495bSYour Name 		if (is_pn_check_needed) {
1147*5113495bSYour Name 			if (msdu_list.msdu_info[i].msdu_flags &
1148*5113495bSYour Name 			    HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1149*5113495bSYour Name 				dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1150*5113495bSYour Name 						mpdu_desc_info,
1151*5113495bSYour Name 						first_msdu_in_mpdu_processed);
1152*5113495bSYour Name 				first_msdu_in_mpdu_processed = true;
1153*5113495bSYour Name 			} else {
1154*5113495bSYour Name 				if (!first_msdu_in_mpdu_processed) {
1155*5113495bSYour Name 					/*
1156*5113495bSYour Name 					 * If no msdu in this mpdu was dropped
1157*5113495bSYour Name 					 * due to failed sanity checks, then
1158*5113495bSYour Name 					 * its not expected to hit this
1159*5113495bSYour Name 					 * condition. Hence we assert here.
1160*5113495bSYour Name 					 */
1161*5113495bSYour Name 					if (!msdu_dropped)
1162*5113495bSYour Name 						qdf_assert_always(0);
1163*5113495bSYour Name 
1164*5113495bSYour Name 					/*
1165*5113495bSYour Name 					 * We do not have valid mpdu_desc_info
1166*5113495bSYour Name 					 * to process this nbuf, hence drop it.
1167*5113495bSYour Name 					 * TODO - Increment stats
1168*5113495bSYour Name 					 */
1169*5113495bSYour Name 					goto process_next_msdu;
1170*5113495bSYour Name 				}
1171*5113495bSYour Name 				/*
1172*5113495bSYour Name 				 * DO NOTHING -
1173*5113495bSYour Name 				 * Continue using the same mpdu_desc_info
1174*5113495bSYour Name 				 * details populated from the first msdu in
1175*5113495bSYour Name 				 * the mpdu.
1176*5113495bSYour Name 				 */
1177*5113495bSYour Name 			}
1178*5113495bSYour Name 
1179*5113495bSYour Name 			status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1180*5113495bSYour Name 			if (QDF_IS_STATUS_ERROR(status)) {
1181*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1182*5113495bSYour Name 					     1);
1183*5113495bSYour Name 				goto process_next_msdu;
1184*5113495bSYour Name 			}
1185*5113495bSYour Name 
1186*5113495bSYour Name 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1187*5113495bSYour Name 					mpdu_desc_info->peer_meta_data);
1188*5113495bSYour Name 
1189*5113495bSYour Name 			if (mpdu_desc_info->bar_frame)
1190*5113495bSYour Name 				_dp_rx_bar_frame_handle(soc, nbuf,
1191*5113495bSYour Name 							mpdu_desc_info, tid,
1192*5113495bSYour Name 							HAL_REO_ERROR_DETECTED,
1193*5113495bSYour Name 							err_code);
1194*5113495bSYour Name 		}
1195*5113495bSYour Name 
1196*5113495bSYour Name 		rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1197*5113495bSYour Name 		rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1198*5113495bSYour Name 
1199*5113495bSYour Name 		if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1200*5113495bSYour Name 			/*
1201*5113495bSYour Name 			 * For SG case, only the length of last skb is valid
1202*5113495bSYour Name 			 * as HW only populate the msdu_len for last msdu
1203*5113495bSYour Name 			 * in rx link descriptor, use the length from
1204*5113495bSYour Name 			 * last skb to overwrite the head skb for further
1205*5113495bSYour Name 			 * SG processing.
1206*5113495bSYour Name 			 */
1207*5113495bSYour Name 			QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
1208*5113495bSYour Name 					QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
1209*5113495bSYour Name 			nbuf = dp_rx_sg_create(soc, head_nbuf);
1210*5113495bSYour Name 			qdf_nbuf_set_is_frag(nbuf, 1);
1211*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1212*5113495bSYour Name 		}
1213*5113495bSYour Name 		head_nbuf = NULL;
1214*5113495bSYour Name 
1215*5113495bSYour Name 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1216*5113495bSYour Name 				soc, peer_id,
1217*5113495bSYour Name 				&txrx_ref_handle,
1218*5113495bSYour Name 				DP_MOD_ID_RX_ERR);
1219*5113495bSYour Name 		if (!txrx_peer)
1220*5113495bSYour Name 			dp_info_rl("txrx_peer is null peer_id %u",
1221*5113495bSYour Name 				   peer_id);
1222*5113495bSYour Name 
1223*5113495bSYour Name 		dp_rx_nbuf_set_link_id_from_tlv(soc, qdf_nbuf_data(nbuf), nbuf);
1224*5113495bSYour Name 
1225*5113495bSYour Name 		if (pdev && pdev->link_peer_stats &&
1226*5113495bSYour Name 		    txrx_peer && txrx_peer->is_mld_peer) {
1227*5113495bSYour Name 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
1228*5113495bSYour Name 								nbuf,
1229*5113495bSYour Name 								txrx_peer);
1230*5113495bSYour Name 		}
1231*5113495bSYour Name 
1232*5113495bSYour Name 		if (txrx_peer)
1233*5113495bSYour Name 			dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id);
1234*5113495bSYour Name 
1235*5113495bSYour Name 		switch (err_code) {
1236*5113495bSYour Name 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1237*5113495bSYour Name 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1238*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1239*5113495bSYour Name 			/*
1240*5113495bSYour Name 			 * only first msdu, mpdu start description tlv valid?
1241*5113495bSYour Name 			 * and use it for following msdu.
1242*5113495bSYour Name 			 */
1243*5113495bSYour Name 			if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1244*5113495bSYour Name 							   rx_tlv_hdr_last))
1245*5113495bSYour Name 				tid = hal_rx_mpdu_start_tid_get(
1246*5113495bSYour Name 							soc->hal_soc,
1247*5113495bSYour Name 							rx_tlv_hdr_first);
1248*5113495bSYour Name 
1249*5113495bSYour Name 			dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1250*5113495bSYour Name 					  peer_id, tid);
1251*5113495bSYour Name 			break;
1252*5113495bSYour Name 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
1253*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_OOR:
1254*5113495bSYour Name 			dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1255*5113495bSYour Name 			break;
1256*5113495bSYour Name 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1257*5113495bSYour Name 			soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1258*5113495bSYour Name 							       rx_tlv_hdr_last,
1259*5113495bSYour Name 							       rx_desc_pool_id,
1260*5113495bSYour Name 							       txrx_peer,
1261*5113495bSYour Name 							       TRUE,
1262*5113495bSYour Name 							       link_id);
1263*5113495bSYour Name 			break;
1264*5113495bSYour Name 		default:
1265*5113495bSYour Name 			dp_err_rl("Non-support error code %d", err_code);
1266*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
1267*5113495bSYour Name 		}
1268*5113495bSYour Name 
1269*5113495bSYour Name 		if (txrx_peer)
1270*5113495bSYour Name 			dp_txrx_peer_unref_delete(txrx_ref_handle,
1271*5113495bSYour Name 						  DP_MOD_ID_RX_ERR);
1272*5113495bSYour Name process_next_msdu:
1273*5113495bSYour Name 		nbuf = head_nbuf;
1274*5113495bSYour Name 		while (nbuf) {
1275*5113495bSYour Name 			next_nbuf = qdf_nbuf_next(nbuf);
1276*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
1277*5113495bSYour Name 			nbuf = next_nbuf;
1278*5113495bSYour Name 		}
1279*5113495bSYour Name 		msdu_processed++;
1280*5113495bSYour Name 		head_nbuf = NULL;
1281*5113495bSYour Name 		tail_nbuf = NULL;
1282*5113495bSYour Name 	}
1283*5113495bSYour Name 
1284*5113495bSYour Name 	/*
1285*5113495bSYour Name 	 * If the msdu's are spread across multiple link-descriptors,
1286*5113495bSYour Name 	 * we cannot depend solely on the msdu_count(e.g., if msdu is
1287*5113495bSYour Name 	 * spread across multiple buffers).Hence, it is
1288*5113495bSYour Name 	 * necessary to check the next link_descriptor and release
1289*5113495bSYour Name 	 * all the msdu's that are part of it.
1290*5113495bSYour Name 	 */
1291*5113495bSYour Name 	hal_rx_get_next_msdu_link_desc_buf_addr_info(
1292*5113495bSYour Name 			link_desc_va,
1293*5113495bSYour Name 			&next_link_desc_addr_info);
1294*5113495bSYour Name 
1295*5113495bSYour Name 	if (hal_rx_is_buf_addr_info_valid(
1296*5113495bSYour Name 				&next_link_desc_addr_info)) {
1297*5113495bSYour Name 		/* Clear the next link desc info for the current link_desc */
1298*5113495bSYour Name 		hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1299*5113495bSYour Name 		dp_rx_link_desc_return_by_addr(
1300*5113495bSYour Name 				soc,
1301*5113495bSYour Name 				buf_addr_info,
1302*5113495bSYour Name 				HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1303*5113495bSYour Name 
1304*5113495bSYour Name 		hal_rx_buffer_addr_info_get_paddr(
1305*5113495bSYour Name 				&next_link_desc_addr_info,
1306*5113495bSYour Name 				&buf_info);
1307*5113495bSYour Name 		/* buffer_addr_info is the first element of ring_desc */
1308*5113495bSYour Name 		hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1309*5113495bSYour Name 					  (uint32_t *)&next_link_desc_addr_info,
1310*5113495bSYour Name 					  &buf_info);
1311*5113495bSYour Name 		link_desc_va =
1312*5113495bSYour Name 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1313*5113495bSYour Name 		cur_link_desc_addr_info = next_link_desc_addr_info;
1314*5113495bSYour Name 		buf_addr_info = &cur_link_desc_addr_info;
1315*5113495bSYour Name 
1316*5113495bSYour Name 		goto more_msdu_link_desc;
1317*5113495bSYour Name 	}
1318*5113495bSYour Name 
1319*5113495bSYour Name 	dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1320*5113495bSYour Name 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1321*5113495bSYour Name 	if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1322*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1323*5113495bSYour Name 
1324*5113495bSYour Name 	return rx_bufs_used;
1325*5113495bSYour Name }
1326*5113495bSYour Name 
1327*5113495bSYour Name #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1328*5113495bSYour Name 
1329*5113495bSYour Name void
dp_rx_process_rxdma_err(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint8_t err_code,uint8_t mac_id,uint8_t link_id)1330*5113495bSYour Name dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1331*5113495bSYour Name 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1332*5113495bSYour Name 			uint8_t err_code, uint8_t mac_id, uint8_t link_id)
1333*5113495bSYour Name {
1334*5113495bSYour Name 	uint32_t pkt_len, l2_hdr_offset;
1335*5113495bSYour Name 	uint16_t msdu_len;
1336*5113495bSYour Name 	struct dp_vdev *vdev;
1337*5113495bSYour Name 	qdf_ether_header_t *eh;
1338*5113495bSYour Name 	bool is_broadcast;
1339*5113495bSYour Name 
1340*5113495bSYour Name 	/*
1341*5113495bSYour Name 	 * Check if DMA completed -- msdu_done is the last bit
1342*5113495bSYour Name 	 * to be written
1343*5113495bSYour Name 	 */
1344*5113495bSYour Name 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1345*5113495bSYour Name 
1346*5113495bSYour Name 		dp_err_rl("MSDU DONE failure");
1347*5113495bSYour Name 
1348*5113495bSYour Name 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1349*5113495bSYour Name 				     QDF_TRACE_LEVEL_INFO);
1350*5113495bSYour Name 		qdf_assert(0);
1351*5113495bSYour Name 	}
1352*5113495bSYour Name 
1353*5113495bSYour Name 	l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1354*5113495bSYour Name 							   rx_tlv_hdr);
1355*5113495bSYour Name 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1356*5113495bSYour Name 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1357*5113495bSYour Name 
1358*5113495bSYour Name 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
1359*5113495bSYour Name 		/* Drop & free packet */
1360*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
1361*5113495bSYour Name 		return;
1362*5113495bSYour Name 	}
1363*5113495bSYour Name 	/* Set length in nbuf */
1364*5113495bSYour Name 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1365*5113495bSYour Name 
1366*5113495bSYour Name 	qdf_nbuf_set_next(nbuf, NULL);
1367*5113495bSYour Name 
1368*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1369*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1370*5113495bSYour Name 
1371*5113495bSYour Name 	if (!txrx_peer) {
1372*5113495bSYour Name 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1373*5113495bSYour Name 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1374*5113495bSYour Name 				qdf_nbuf_len(nbuf));
1375*5113495bSYour Name 		/* Trigger invalid peer handler wrapper */
1376*5113495bSYour Name 		dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1377*5113495bSYour Name 		return;
1378*5113495bSYour Name 	}
1379*5113495bSYour Name 
1380*5113495bSYour Name 	vdev = txrx_peer->vdev;
1381*5113495bSYour Name 	if (!vdev) {
1382*5113495bSYour Name 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1383*5113495bSYour Name 				 vdev);
1384*5113495bSYour Name 		/* Drop & free packet */
1385*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
1386*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1387*5113495bSYour Name 		return;
1388*5113495bSYour Name 	}
1389*5113495bSYour Name 
1390*5113495bSYour Name 	/*
1391*5113495bSYour Name 	 * Advance the packet start pointer by total size of
1392*5113495bSYour Name 	 * pre-header TLV's
1393*5113495bSYour Name 	 */
1394*5113495bSYour Name 	dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1395*5113495bSYour Name 
1396*5113495bSYour Name 	if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1397*5113495bSYour Name 		uint8_t *pkt_type;
1398*5113495bSYour Name 
1399*5113495bSYour Name 		pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1400*5113495bSYour Name 		if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1401*5113495bSYour Name 			if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1402*5113495bSYour Name 							htons(QDF_LLC_STP)) {
1403*5113495bSYour Name 				DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1404*5113495bSYour Name 				goto process_mesh;
1405*5113495bSYour Name 			} else {
1406*5113495bSYour Name 				goto process_rx;
1407*5113495bSYour Name 			}
1408*5113495bSYour Name 		}
1409*5113495bSYour Name 	}
1410*5113495bSYour Name 	if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1411*5113495bSYour Name 		goto process_mesh;
1412*5113495bSYour Name 
1413*5113495bSYour Name 	/*
1414*5113495bSYour Name 	 * WAPI cert AP sends rekey frames as unencrypted.
1415*5113495bSYour Name 	 * Thus RXDMA will report unencrypted frame error.
1416*5113495bSYour Name 	 * To pass WAPI cert case, SW needs to pass unencrypted
1417*5113495bSYour Name 	 * rekey frame to stack.
1418*5113495bSYour Name 	 */
1419*5113495bSYour Name 	if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1420*5113495bSYour Name 		goto process_rx;
1421*5113495bSYour Name 	}
1422*5113495bSYour Name 	/*
1423*5113495bSYour Name 	 * In dynamic WEP case rekey frames are not encrypted
1424*5113495bSYour Name 	 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1425*5113495bSYour Name 	 * key install is already done
1426*5113495bSYour Name 	 */
1427*5113495bSYour Name 	if ((vdev->sec_type == cdp_sec_type_wep104) &&
1428*5113495bSYour Name 	    (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1429*5113495bSYour Name 		goto process_rx;
1430*5113495bSYour Name 
1431*5113495bSYour Name process_mesh:
1432*5113495bSYour Name 
1433*5113495bSYour Name 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1434*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
1435*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1436*5113495bSYour Name 		return;
1437*5113495bSYour Name 	}
1438*5113495bSYour Name 
1439*5113495bSYour Name 	if (vdev->mesh_vdev) {
1440*5113495bSYour Name 		if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1441*5113495bSYour Name 				      == QDF_STATUS_SUCCESS) {
1442*5113495bSYour Name 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
1443*5113495bSYour Name 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1444*5113495bSYour Name 
1445*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
1446*5113495bSYour Name 			return;
1447*5113495bSYour Name 		}
1448*5113495bSYour Name 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1449*5113495bSYour Name 	}
1450*5113495bSYour Name process_rx:
1451*5113495bSYour Name 	if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1452*5113495bSYour Name 							rx_tlv_hdr) &&
1453*5113495bSYour Name 		(vdev->rx_decap_type ==
1454*5113495bSYour Name 				htt_cmn_pkt_type_ethernet))) {
1455*5113495bSYour Name 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1456*5113495bSYour Name 		is_broadcast = (QDF_IS_ADDR_BROADCAST
1457*5113495bSYour Name 				(eh->ether_dhost)) ? 1 : 0 ;
1458*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1459*5113495bSYour Name 					      qdf_nbuf_len(nbuf), link_id);
1460*5113495bSYour Name 		if (is_broadcast) {
1461*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1462*5113495bSYour Name 						      qdf_nbuf_len(nbuf),
1463*5113495bSYour Name 						      link_id);
1464*5113495bSYour Name 		}
1465*5113495bSYour Name 	} else {
1466*5113495bSYour Name 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
1467*5113495bSYour Name 					      qdf_nbuf_len(nbuf),
1468*5113495bSYour Name 					      link_id);
1469*5113495bSYour Name 	}
1470*5113495bSYour Name 
1471*5113495bSYour Name 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1472*5113495bSYour Name 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
1473*5113495bSYour Name 	} else {
1474*5113495bSYour Name 		/* Update the protocol tag in SKB based on CCE metadata */
1475*5113495bSYour Name 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1476*5113495bSYour Name 					  EXCEPTION_DEST_RING_ID, true, true);
1477*5113495bSYour Name 		/* Update the flow tag in SKB based on FSE metadata */
1478*5113495bSYour Name 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1479*5113495bSYour Name 		DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1480*5113495bSYour Name 		qdf_nbuf_set_exc_frame(nbuf, 1);
1481*5113495bSYour Name 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1482*5113495bSYour Name 					    qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1483*5113495bSYour Name 	}
1484*5113495bSYour Name 
1485*5113495bSYour Name 	return;
1486*5113495bSYour Name }
1487*5113495bSYour Name 
dp_rx_process_mic_error(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer)1488*5113495bSYour Name void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1489*5113495bSYour Name 			     uint8_t *rx_tlv_hdr,
1490*5113495bSYour Name 			     struct dp_txrx_peer *txrx_peer)
1491*5113495bSYour Name {
1492*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
1493*5113495bSYour Name 	struct dp_pdev *pdev = NULL;
1494*5113495bSYour Name 	struct ol_if_ops *tops = NULL;
1495*5113495bSYour Name 	uint16_t rx_seq, fragno;
1496*5113495bSYour Name 	uint8_t is_raw;
1497*5113495bSYour Name 	unsigned int tid;
1498*5113495bSYour Name 	QDF_STATUS status;
1499*5113495bSYour Name 	struct cdp_rx_mic_err_info mic_failure_info;
1500*5113495bSYour Name 
1501*5113495bSYour Name 	if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1502*5113495bSYour Name 					    rx_tlv_hdr))
1503*5113495bSYour Name 		return;
1504*5113495bSYour Name 
1505*5113495bSYour Name 	if (!txrx_peer) {
1506*5113495bSYour Name 		dp_info_rl("txrx_peer not found");
1507*5113495bSYour Name 		goto fail;
1508*5113495bSYour Name 	}
1509*5113495bSYour Name 
1510*5113495bSYour Name 	vdev = txrx_peer->vdev;
1511*5113495bSYour Name 	if (!vdev) {
1512*5113495bSYour Name 		dp_info_rl("VDEV not found");
1513*5113495bSYour Name 		goto fail;
1514*5113495bSYour Name 	}
1515*5113495bSYour Name 
1516*5113495bSYour Name 	pdev = vdev->pdev;
1517*5113495bSYour Name 	if (!pdev) {
1518*5113495bSYour Name 		dp_info_rl("PDEV not found");
1519*5113495bSYour Name 		goto fail;
1520*5113495bSYour Name 	}
1521*5113495bSYour Name 
1522*5113495bSYour Name 	is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1523*5113495bSYour Name 	if (is_raw) {
1524*5113495bSYour Name 		fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1525*5113495bSYour Name 							 qdf_nbuf_data(nbuf));
1526*5113495bSYour Name 		/* Can get only last fragment */
1527*5113495bSYour Name 		if (fragno) {
1528*5113495bSYour Name 			tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1529*5113495bSYour Name 							qdf_nbuf_data(nbuf));
1530*5113495bSYour Name 			rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1531*5113495bSYour Name 							qdf_nbuf_data(nbuf));
1532*5113495bSYour Name 
1533*5113495bSYour Name 			status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1534*5113495bSYour Name 							    tid, rx_seq, nbuf);
1535*5113495bSYour Name 			dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1536*5113495bSYour Name 				   "status %d !", rx_seq, fragno, status);
1537*5113495bSYour Name 			return;
1538*5113495bSYour Name 		}
1539*5113495bSYour Name 	}
1540*5113495bSYour Name 
1541*5113495bSYour Name 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1542*5113495bSYour Name 				  &mic_failure_info.da_mac_addr.bytes[0])) {
1543*5113495bSYour Name 		dp_err_rl("Failed to get da_mac_addr");
1544*5113495bSYour Name 		goto fail;
1545*5113495bSYour Name 	}
1546*5113495bSYour Name 
1547*5113495bSYour Name 	if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1548*5113495bSYour Name 				  &mic_failure_info.ta_mac_addr.bytes[0])) {
1549*5113495bSYour Name 		dp_err_rl("Failed to get ta_mac_addr");
1550*5113495bSYour Name 		goto fail;
1551*5113495bSYour Name 	}
1552*5113495bSYour Name 
1553*5113495bSYour Name 	mic_failure_info.key_id = 0;
1554*5113495bSYour Name 	mic_failure_info.multicast =
1555*5113495bSYour Name 		IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1556*5113495bSYour Name 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1557*5113495bSYour Name 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1558*5113495bSYour Name 	mic_failure_info.data = NULL;
1559*5113495bSYour Name 	mic_failure_info.vdev_id = vdev->vdev_id;
1560*5113495bSYour Name 
1561*5113495bSYour Name 	tops = pdev->soc->cdp_soc.ol_ops;
1562*5113495bSYour Name 	if (tops->rx_mic_error)
1563*5113495bSYour Name 		tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1564*5113495bSYour Name 				   &mic_failure_info);
1565*5113495bSYour Name 
1566*5113495bSYour Name fail:
1567*5113495bSYour Name 	dp_rx_nbuf_free(nbuf);
1568*5113495bSYour Name 	return;
1569*5113495bSYour Name }
1570*5113495bSYour Name 
1571*5113495bSYour Name #ifdef WLAN_SUPPORT_RX_FLOW_TAG
dp_rx_peek_trapped_packet(struct dp_soc * soc,struct dp_vdev * vdev)1572*5113495bSYour Name static void dp_rx_peek_trapped_packet(struct dp_soc *soc,
1573*5113495bSYour Name 				      struct dp_vdev *vdev)
1574*5113495bSYour Name {
1575*5113495bSYour Name 	if (soc->cdp_soc.ol_ops->send_wakeup_trigger)
1576*5113495bSYour Name 		soc->cdp_soc.ol_ops->send_wakeup_trigger(soc->ctrl_psoc,
1577*5113495bSYour Name 				vdev->vdev_id);
1578*5113495bSYour Name }
1579*5113495bSYour Name #else
dp_rx_peek_trapped_packet(struct dp_soc * soc,struct dp_vdev * vdev)1580*5113495bSYour Name static void dp_rx_peek_trapped_packet(struct dp_soc *soc,
1581*5113495bSYour Name 				      struct dp_vdev *vdev)
1582*5113495bSYour Name {
1583*5113495bSYour Name 	return;
1584*5113495bSYour Name }
1585*5113495bSYour Name #endif
1586*5113495bSYour Name 
1587*5113495bSYour Name #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1588*5113495bSYour Name 	defined(WLAN_MCAST_MLO)
dp_rx_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1589*5113495bSYour Name static bool dp_rx_igmp_handler(struct dp_soc *soc,
1590*5113495bSYour Name 			       struct dp_vdev *vdev,
1591*5113495bSYour Name 			       struct dp_txrx_peer *peer,
1592*5113495bSYour Name 			       qdf_nbuf_t nbuf,
1593*5113495bSYour Name 			       uint8_t link_id)
1594*5113495bSYour Name {
1595*5113495bSYour Name 	if (soc->arch_ops.dp_rx_mcast_handler) {
1596*5113495bSYour Name 		if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
1597*5113495bSYour Name 						      nbuf, link_id))
1598*5113495bSYour Name 			return true;
1599*5113495bSYour Name 	}
1600*5113495bSYour Name 	return false;
1601*5113495bSYour Name }
1602*5113495bSYour Name #else
dp_rx_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1603*5113495bSYour Name static bool dp_rx_igmp_handler(struct dp_soc *soc,
1604*5113495bSYour Name 			       struct dp_vdev *vdev,
1605*5113495bSYour Name 			       struct dp_txrx_peer *peer,
1606*5113495bSYour Name 			       qdf_nbuf_t nbuf,
1607*5113495bSYour Name 			       uint8_t link_id)
1608*5113495bSYour Name {
1609*5113495bSYour Name 	return false;
1610*5113495bSYour Name }
1611*5113495bSYour Name #endif
1612*5113495bSYour Name 
1613*5113495bSYour Name /**
1614*5113495bSYour Name  * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1615*5113495bSYour Name  *                            Free any other packet which comes in
1616*5113495bSYour Name  *                            this path.
1617*5113495bSYour Name  *
1618*5113495bSYour Name  * @soc: core DP main context
1619*5113495bSYour Name  * @nbuf: buffer pointer
1620*5113495bSYour Name  * @txrx_peer: txrx peer handle
1621*5113495bSYour Name  * @rx_tlv_hdr: start of rx tlv header
1622*5113495bSYour Name  * @err_src: rxdma/reo
1623*5113495bSYour Name  * @link_id: link id on which the packet is received
1624*5113495bSYour Name  *
1625*5113495bSYour Name  * This function indicates EAPOL frame received in wbm error ring to stack.
1626*5113495bSYour Name  * Any other frame should be dropped.
1627*5113495bSYour Name  *
1628*5113495bSYour Name  * Return: SUCCESS if delivered to stack
1629*5113495bSYour Name  */
1630*5113495bSYour Name static void
dp_rx_err_route_hdl(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer,uint8_t * rx_tlv_hdr,enum hal_rx_wbm_error_source err_src,uint8_t link_id)1631*5113495bSYour Name dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1632*5113495bSYour Name 		    struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1633*5113495bSYour Name 		    enum hal_rx_wbm_error_source err_src,
1634*5113495bSYour Name 		    uint8_t link_id)
1635*5113495bSYour Name {
1636*5113495bSYour Name 	uint32_t pkt_len;
1637*5113495bSYour Name 	uint16_t msdu_len;
1638*5113495bSYour Name 	struct dp_vdev *vdev;
1639*5113495bSYour Name 	struct hal_rx_msdu_metadata msdu_metadata;
1640*5113495bSYour Name 	bool is_eapol;
1641*5113495bSYour Name 	uint16_t buf_size;
1642*5113495bSYour Name 
1643*5113495bSYour Name 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
1644*5113495bSYour Name 
1645*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_start(
1646*5113495bSYour Name 				nbuf,
1647*5113495bSYour Name 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1648*5113495bSYour Name 							       rx_tlv_hdr));
1649*5113495bSYour Name 	qdf_nbuf_set_rx_chfrag_end(nbuf,
1650*5113495bSYour Name 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1651*5113495bSYour Name 								 rx_tlv_hdr));
1652*5113495bSYour Name 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1653*5113495bSYour Name 								  rx_tlv_hdr));
1654*5113495bSYour Name 	qdf_nbuf_set_da_valid(nbuf,
1655*5113495bSYour Name 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1656*5113495bSYour Name 							      rx_tlv_hdr));
1657*5113495bSYour Name 	qdf_nbuf_set_sa_valid(nbuf,
1658*5113495bSYour Name 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1659*5113495bSYour Name 							      rx_tlv_hdr));
1660*5113495bSYour Name 
1661*5113495bSYour Name 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1662*5113495bSYour Name 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1663*5113495bSYour Name 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1664*5113495bSYour Name 
1665*5113495bSYour Name 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1666*5113495bSYour Name 		if (dp_rx_check_pkt_len(soc, pkt_len))
1667*5113495bSYour Name 			goto drop_nbuf;
1668*5113495bSYour Name 
1669*5113495bSYour Name 		/* Set length in nbuf */
1670*5113495bSYour Name 		qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size));
1671*5113495bSYour Name 	}
1672*5113495bSYour Name 
1673*5113495bSYour Name 	/*
1674*5113495bSYour Name 	 * Check if DMA completed -- msdu_done is the last bit
1675*5113495bSYour Name 	 * to be written
1676*5113495bSYour Name 	 */
1677*5113495bSYour Name 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1678*5113495bSYour Name 		dp_err_rl("MSDU DONE failure");
1679*5113495bSYour Name 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1680*5113495bSYour Name 				     QDF_TRACE_LEVEL_INFO);
1681*5113495bSYour Name 		qdf_assert(0);
1682*5113495bSYour Name 	}
1683*5113495bSYour Name 
1684*5113495bSYour Name 	if (!txrx_peer)
1685*5113495bSYour Name 		goto drop_nbuf;
1686*5113495bSYour Name 
1687*5113495bSYour Name 	vdev = txrx_peer->vdev;
1688*5113495bSYour Name 	if (!vdev) {
1689*5113495bSYour Name 		dp_err_rl("Null vdev!");
1690*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1691*5113495bSYour Name 		goto drop_nbuf;
1692*5113495bSYour Name 	}
1693*5113495bSYour Name 
1694*5113495bSYour Name 	/*
1695*5113495bSYour Name 	 * Advance the packet start pointer by total size of
1696*5113495bSYour Name 	 * pre-header TLV's
1697*5113495bSYour Name 	 */
1698*5113495bSYour Name 	if (qdf_nbuf_is_frag(nbuf))
1699*5113495bSYour Name 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1700*5113495bSYour Name 	else
1701*5113495bSYour Name 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1702*5113495bSYour Name 				   soc->rx_pkt_tlv_size));
1703*5113495bSYour Name 
1704*5113495bSYour Name 	if (hal_rx_msdu_cce_metadata_get(soc->hal_soc, rx_tlv_hdr) ==
1705*5113495bSYour Name 			CDP_STANDBY_METADATA)
1706*5113495bSYour Name 		dp_rx_peek_trapped_packet(soc, vdev);
1707*5113495bSYour Name 
1708*5113495bSYour Name 	QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
1709*5113495bSYour Name 	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
1710*5113495bSYour Name 		return;
1711*5113495bSYour Name 
1712*5113495bSYour Name 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1713*5113495bSYour Name 
1714*5113495bSYour Name 	/*
1715*5113495bSYour Name 	 * Indicate EAPOL frame to stack only when vap mac address
1716*5113495bSYour Name 	 * matches the destination address.
1717*5113495bSYour Name 	 */
1718*5113495bSYour Name 	is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1719*5113495bSYour Name 	if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1720*5113495bSYour Name 		qdf_ether_header_t *eh =
1721*5113495bSYour Name 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1722*5113495bSYour Name 		if (dp_rx_err_match_dhost(eh, vdev)) {
1723*5113495bSYour Name 			DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1724*5113495bSYour Name 					 qdf_nbuf_len(nbuf));
1725*5113495bSYour Name 
1726*5113495bSYour Name 			/*
1727*5113495bSYour Name 			 * Update the protocol tag in SKB based on
1728*5113495bSYour Name 			 * CCE metadata.
1729*5113495bSYour Name 			 */
1730*5113495bSYour Name 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1731*5113495bSYour Name 						  EXCEPTION_DEST_RING_ID,
1732*5113495bSYour Name 						  true, true);
1733*5113495bSYour Name 			/* Update the flow tag in SKB based on FSE metadata */
1734*5113495bSYour Name 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1735*5113495bSYour Name 					      true);
1736*5113495bSYour Name 			DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1737*5113495bSYour Name 						  qdf_nbuf_len(nbuf),
1738*5113495bSYour Name 						  vdev->pdev->enhanced_stats_en);
1739*5113495bSYour Name 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
1740*5113495bSYour Name 						      rx.rx_success, 1,
1741*5113495bSYour Name 						      qdf_nbuf_len(nbuf),
1742*5113495bSYour Name 						      link_id);
1743*5113495bSYour Name 			qdf_nbuf_set_exc_frame(nbuf, 1);
1744*5113495bSYour Name 			qdf_nbuf_set_next(nbuf, NULL);
1745*5113495bSYour Name 
1746*5113495bSYour Name 			dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1747*5113495bSYour Name 						    NULL, is_eapol);
1748*5113495bSYour Name 
1749*5113495bSYour Name 			return;
1750*5113495bSYour Name 		}
1751*5113495bSYour Name 	}
1752*5113495bSYour Name 
1753*5113495bSYour Name drop_nbuf:
1754*5113495bSYour Name 
1755*5113495bSYour Name 	DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1756*5113495bSYour Name 		      err_src == HAL_RX_WBM_ERR_SRC_REO);
1757*5113495bSYour Name 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1758*5113495bSYour Name 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1759*5113495bSYour Name 
1760*5113495bSYour Name 	dp_rx_nbuf_free(nbuf);
1761*5113495bSYour Name }
1762*5113495bSYour Name 
1763*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
1764*5113495bSYour Name 
1765*5113495bSYour Name #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1766*5113495bSYour Name /**
1767*5113495bSYour Name  * dp_rx_link_cookie_check() - Validate link desc cookie
1768*5113495bSYour Name  * @ring_desc: ring descriptor
1769*5113495bSYour Name  *
1770*5113495bSYour Name  * Return: qdf status
1771*5113495bSYour Name  */
1772*5113495bSYour Name static inline QDF_STATUS
dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)1773*5113495bSYour Name dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1774*5113495bSYour Name {
1775*5113495bSYour Name 	if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1776*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1777*5113495bSYour Name 
1778*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1779*5113495bSYour Name }
1780*5113495bSYour Name 
1781*5113495bSYour Name /**
1782*5113495bSYour Name  * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1783*5113495bSYour Name  * @ring_desc: ring descriptor
1784*5113495bSYour Name  *
1785*5113495bSYour Name  * Return: None
1786*5113495bSYour Name  */
1787*5113495bSYour Name static inline void
dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)1788*5113495bSYour Name dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1789*5113495bSYour Name {
1790*5113495bSYour Name 	HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1791*5113495bSYour Name }
1792*5113495bSYour Name #else
1793*5113495bSYour Name static inline QDF_STATUS
dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)1794*5113495bSYour Name dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1795*5113495bSYour Name {
1796*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1797*5113495bSYour Name }
1798*5113495bSYour Name 
1799*5113495bSYour Name static inline void
dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)1800*5113495bSYour Name dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1801*5113495bSYour Name {
1802*5113495bSYour Name }
1803*5113495bSYour Name #endif
1804*5113495bSYour Name 
1805*5113495bSYour Name #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1806*5113495bSYour Name /**
1807*5113495bSYour Name  * dp_rx_err_ring_record_entry() - Record rx err ring history
1808*5113495bSYour Name  * @soc: Datapath soc structure
1809*5113495bSYour Name  * @paddr: paddr of the buffer in RX err ring
1810*5113495bSYour Name  * @sw_cookie: SW cookie of the buffer in RX err ring
1811*5113495bSYour Name  * @rbm: Return buffer manager of the buffer in RX err ring
1812*5113495bSYour Name  *
1813*5113495bSYour Name  * Return: None
1814*5113495bSYour Name  */
1815*5113495bSYour Name static inline void
dp_rx_err_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1816*5113495bSYour Name dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1817*5113495bSYour Name 			    uint32_t sw_cookie, uint8_t rbm)
1818*5113495bSYour Name {
1819*5113495bSYour Name 	struct dp_buf_info_record *record;
1820*5113495bSYour Name 	uint32_t idx;
1821*5113495bSYour Name 
1822*5113495bSYour Name 	if (qdf_unlikely(!soc->rx_err_ring_history))
1823*5113495bSYour Name 		return;
1824*5113495bSYour Name 
1825*5113495bSYour Name 	idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1826*5113495bSYour Name 					DP_RX_ERR_HIST_MAX);
1827*5113495bSYour Name 
1828*5113495bSYour Name 	/* No NULL check needed for record since its an array */
1829*5113495bSYour Name 	record = &soc->rx_err_ring_history->entry[idx];
1830*5113495bSYour Name 
1831*5113495bSYour Name 	record->timestamp = qdf_get_log_timestamp();
1832*5113495bSYour Name 	record->hbi.paddr = paddr;
1833*5113495bSYour Name 	record->hbi.sw_cookie = sw_cookie;
1834*5113495bSYour Name 	record->hbi.rbm = rbm;
1835*5113495bSYour Name }
1836*5113495bSYour Name #else
1837*5113495bSYour Name static inline void
dp_rx_err_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1838*5113495bSYour Name dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1839*5113495bSYour Name 			    uint32_t sw_cookie, uint8_t rbm)
1840*5113495bSYour Name {
1841*5113495bSYour Name }
1842*5113495bSYour Name #endif
1843*5113495bSYour Name 
1844*5113495bSYour Name #if defined(HANDLE_RX_REROUTE_ERR) || defined(REO_EXCEPTION_MSDU_WAR)
dp_rx_err_handle_msdu_buf(struct dp_soc * soc,hal_ring_desc_t ring_desc)1845*5113495bSYour Name static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1846*5113495bSYour Name 				     hal_ring_desc_t ring_desc)
1847*5113495bSYour Name {
1848*5113495bSYour Name 	int lmac_id = DP_INVALID_LMAC_ID;
1849*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
1850*5113495bSYour Name 	struct hal_buf_info hbi;
1851*5113495bSYour Name 	struct dp_pdev *pdev;
1852*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
1853*5113495bSYour Name 
1854*5113495bSYour Name 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1855*5113495bSYour Name 
1856*5113495bSYour Name 	rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1857*5113495bSYour Name 
1858*5113495bSYour Name 	/* sanity */
1859*5113495bSYour Name 	if (!rx_desc) {
1860*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1861*5113495bSYour Name 		goto assert_return;
1862*5113495bSYour Name 	}
1863*5113495bSYour Name 
1864*5113495bSYour Name 	if (!rx_desc->nbuf)
1865*5113495bSYour Name 		goto assert_return;
1866*5113495bSYour Name 
1867*5113495bSYour Name 	dp_rx_err_ring_record_entry(soc, hbi.paddr,
1868*5113495bSYour Name 				    hbi.sw_cookie,
1869*5113495bSYour Name 				    hal_rx_ret_buf_manager_get(soc->hal_soc,
1870*5113495bSYour Name 							       ring_desc));
1871*5113495bSYour Name 	if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1872*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1873*5113495bSYour Name 		rx_desc->in_err_state = 1;
1874*5113495bSYour Name 		goto assert_return;
1875*5113495bSYour Name 	}
1876*5113495bSYour Name 
1877*5113495bSYour Name 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1878*5113495bSYour Name 	/* After this point the rx_desc and nbuf are valid */
1879*5113495bSYour Name 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
1880*5113495bSYour Name 	qdf_assert_always(!rx_desc->unmapped);
1881*5113495bSYour Name 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1882*5113495bSYour Name 	rx_desc->unmapped = 1;
1883*5113495bSYour Name 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1884*5113495bSYour Name 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1885*5113495bSYour Name 				    rx_desc->pool_id);
1886*5113495bSYour Name 
1887*5113495bSYour Name 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1888*5113495bSYour Name 	lmac_id = rx_desc->pool_id;
1889*5113495bSYour Name 	dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1890*5113495bSYour Name 				    &pdev->free_list_tail,
1891*5113495bSYour Name 				    rx_desc);
1892*5113495bSYour Name 	return lmac_id;
1893*5113495bSYour Name 
1894*5113495bSYour Name assert_return:
1895*5113495bSYour Name 	qdf_assert(0);
1896*5113495bSYour Name 	return lmac_id;
1897*5113495bSYour Name }
1898*5113495bSYour Name #endif
1899*5113495bSYour Name 
1900*5113495bSYour Name #ifdef HANDLE_RX_REROUTE_ERR
dp_rx_err_exception(struct dp_soc * soc,hal_ring_desc_t ring_desc)1901*5113495bSYour Name static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1902*5113495bSYour Name {
1903*5113495bSYour Name 	int ret;
1904*5113495bSYour Name 	uint64_t cur_time_stamp;
1905*5113495bSYour Name 
1906*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1907*5113495bSYour Name 
1908*5113495bSYour Name 	/* Recover if overall error count exceeds threshold */
1909*5113495bSYour Name 	if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1910*5113495bSYour Name 	    DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1911*5113495bSYour Name 		dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1912*5113495bSYour Name 		       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1913*5113495bSYour Name 		       soc->rx_route_err_start_pkt_ts);
1914*5113495bSYour Name 		qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1915*5113495bSYour Name 	}
1916*5113495bSYour Name 
1917*5113495bSYour Name 	cur_time_stamp = qdf_get_log_timestamp_usecs();
1918*5113495bSYour Name 	if (!soc->rx_route_err_start_pkt_ts)
1919*5113495bSYour Name 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1920*5113495bSYour Name 
1921*5113495bSYour Name 	/* Recover if threshold number of packets received in threshold time */
1922*5113495bSYour Name 	if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1923*5113495bSYour Name 						DP_RX_ERR_ROUTE_TIMEOUT_US) {
1924*5113495bSYour Name 		soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1925*5113495bSYour Name 
1926*5113495bSYour Name 		if (soc->rx_route_err_in_window >
1927*5113495bSYour Name 		    DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1928*5113495bSYour Name 			qdf_trigger_self_recovery(NULL,
1929*5113495bSYour Name 						  QDF_RX_REG_PKT_ROUTE_ERR);
1930*5113495bSYour Name 			dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1931*5113495bSYour Name 			       soc->stats.rx.err.reo_err_msdu_buf_rcved,
1932*5113495bSYour Name 			       soc->rx_route_err_start_pkt_ts);
1933*5113495bSYour Name 		} else {
1934*5113495bSYour Name 			soc->rx_route_err_in_window = 1;
1935*5113495bSYour Name 		}
1936*5113495bSYour Name 	} else {
1937*5113495bSYour Name 		soc->rx_route_err_in_window++;
1938*5113495bSYour Name 	}
1939*5113495bSYour Name 
1940*5113495bSYour Name 	ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1941*5113495bSYour Name 
1942*5113495bSYour Name 	return ret;
1943*5113495bSYour Name }
1944*5113495bSYour Name #else /* HANDLE_RX_REROUTE_ERR */
1945*5113495bSYour Name #ifdef REO_EXCEPTION_MSDU_WAR
dp_rx_err_exception(struct dp_soc * soc,hal_ring_desc_t ring_desc)1946*5113495bSYour Name static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1947*5113495bSYour Name {
1948*5113495bSYour Name 	return dp_rx_err_handle_msdu_buf(soc, ring_desc);
1949*5113495bSYour Name }
1950*5113495bSYour Name #else	/* REO_EXCEPTION_MSDU_WAR */
dp_rx_err_exception(struct dp_soc * soc,hal_ring_desc_t ring_desc)1951*5113495bSYour Name static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1952*5113495bSYour Name {
1953*5113495bSYour Name 	qdf_assert_always(0);
1954*5113495bSYour Name 
1955*5113495bSYour Name 	return DP_INVALID_LMAC_ID;
1956*5113495bSYour Name }
1957*5113495bSYour Name #endif /* REO_EXCEPTION_MSDU_WAR */
1958*5113495bSYour Name #endif /* HANDLE_RX_REROUTE_ERR */
1959*5113495bSYour Name 
1960*5113495bSYour Name #ifdef WLAN_MLO_MULTI_CHIP
1961*5113495bSYour Name /**
1962*5113495bSYour Name  * dp_idle_link_bm_id_check() - war for HW issue
1963*5113495bSYour Name  *
1964*5113495bSYour Name  * @soc: DP SOC handle
1965*5113495bSYour Name  * @rbm: idle link RBM value
1966*5113495bSYour Name  * @ring_desc: reo error link descriptor
1967*5113495bSYour Name  *
1968*5113495bSYour Name  * This is a war for HW issue where link descriptor
1969*5113495bSYour Name  * of partner soc received due to packets wrongly
1970*5113495bSYour Name  * interpreted as fragments
1971*5113495bSYour Name  *
1972*5113495bSYour Name  * Return: true in case link desc is consumed
1973*5113495bSYour Name  *	   false in other cases
1974*5113495bSYour Name  */
dp_idle_link_bm_id_check(struct dp_soc * soc,uint8_t rbm,void * ring_desc)1975*5113495bSYour Name static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1976*5113495bSYour Name 				     void *ring_desc)
1977*5113495bSYour Name {
1978*5113495bSYour Name 	struct dp_soc *replenish_soc = NULL;
1979*5113495bSYour Name 
1980*5113495bSYour Name 	/* return ok incase of link desc of same soc */
1981*5113495bSYour Name 	if (rbm == soc->idle_link_bm_id)
1982*5113495bSYour Name 		return false;
1983*5113495bSYour Name 
1984*5113495bSYour Name 	if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1985*5113495bSYour Name 		replenish_soc =
1986*5113495bSYour Name 			soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1987*5113495bSYour Name 
1988*5113495bSYour Name 	qdf_assert_always(replenish_soc);
1989*5113495bSYour Name 
1990*5113495bSYour Name 	/*
1991*5113495bSYour Name 	 * For WIN usecase we should only get fragment packets in
1992*5113495bSYour Name 	 * this ring as for MLO case fragmentation is not supported
1993*5113495bSYour Name 	 * we should not see links from other soc.
1994*5113495bSYour Name 	 *
1995*5113495bSYour Name 	 * Drop all packets from partner soc and replenish the descriptors
1996*5113495bSYour Name 	 */
1997*5113495bSYour Name 	dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1998*5113495bSYour Name 				     HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1999*5113495bSYour Name 
2000*5113495bSYour Name 	return true;
2001*5113495bSYour Name }
2002*5113495bSYour Name #else
dp_idle_link_bm_id_check(struct dp_soc * soc,uint8_t rbm,void * ring_desc)2003*5113495bSYour Name static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
2004*5113495bSYour Name 				     void *ring_desc)
2005*5113495bSYour Name {
2006*5113495bSYour Name 	return false;
2007*5113495bSYour Name }
2008*5113495bSYour Name #endif
2009*5113495bSYour Name 
2010*5113495bSYour Name static inline void
dp_rx_err_dup_frame(struct dp_soc * soc,struct hal_rx_mpdu_desc_info * mpdu_desc_info)2011*5113495bSYour Name dp_rx_err_dup_frame(struct dp_soc *soc,
2012*5113495bSYour Name 		    struct hal_rx_mpdu_desc_info *mpdu_desc_info)
2013*5113495bSYour Name {
2014*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
2015*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
2016*5113495bSYour Name 	uint16_t peer_id;
2017*5113495bSYour Name 
2018*5113495bSYour Name 	peer_id =
2019*5113495bSYour Name 		dp_rx_peer_metadata_peer_id_get(soc,
2020*5113495bSYour Name 						mpdu_desc_info->peer_meta_data);
2021*5113495bSYour Name 	txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2022*5113495bSYour Name 						   &txrx_ref_handle,
2023*5113495bSYour Name 						   DP_MOD_ID_RX_ERR);
2024*5113495bSYour Name 	if (txrx_peer) {
2025*5113495bSYour Name 		DP_STATS_INC(txrx_peer->vdev, rx.duplicate_count, 1);
2026*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2027*5113495bSYour Name 	}
2028*5113495bSYour Name }
2029*5113495bSYour Name 
2030*5113495bSYour Name uint32_t
dp_rx_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint32_t quota)2031*5113495bSYour Name dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2032*5113495bSYour Name 		  hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2033*5113495bSYour Name {
2034*5113495bSYour Name 	hal_ring_desc_t ring_desc;
2035*5113495bSYour Name 	hal_soc_handle_t hal_soc;
2036*5113495bSYour Name 	uint32_t count = 0;
2037*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
2038*5113495bSYour Name 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2039*5113495bSYour Name 	uint8_t mac_id = 0;
2040*5113495bSYour Name 	uint8_t buf_type;
2041*5113495bSYour Name 	uint8_t err_status;
2042*5113495bSYour Name 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2043*5113495bSYour Name 	struct hal_buf_info hbi;
2044*5113495bSYour Name 	struct dp_pdev *dp_pdev;
2045*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
2046*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
2047*5113495bSYour Name 	void *link_desc_va;
2048*5113495bSYour Name 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2049*5113495bSYour Name 	uint16_t num_msdus;
2050*5113495bSYour Name 	struct dp_rx_desc *rx_desc = NULL;
2051*5113495bSYour Name 	QDF_STATUS status;
2052*5113495bSYour Name 	bool ret;
2053*5113495bSYour Name 	uint32_t error_code = 0;
2054*5113495bSYour Name 	bool sw_pn_check_needed;
2055*5113495bSYour Name 	int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2056*5113495bSYour Name 	int i, rx_bufs_reaped_total;
2057*5113495bSYour Name 	uint16_t peer_id;
2058*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
2059*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
2060*5113495bSYour Name 	uint32_t num_pending, num_entries;
2061*5113495bSYour Name 	bool near_full;
2062*5113495bSYour Name 
2063*5113495bSYour Name 	/* Debug -- Remove later */
2064*5113495bSYour Name 	qdf_assert(soc && hal_ring_hdl);
2065*5113495bSYour Name 
2066*5113495bSYour Name 	hal_soc = soc->hal_soc;
2067*5113495bSYour Name 
2068*5113495bSYour Name 	/* Debug -- Remove later */
2069*5113495bSYour Name 	qdf_assert(hal_soc);
2070*5113495bSYour Name 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
2071*5113495bSYour Name 
2072*5113495bSYour Name more_data:
2073*5113495bSYour Name 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2074*5113495bSYour Name 
2075*5113495bSYour Name 		/* TODO */
2076*5113495bSYour Name 		/*
2077*5113495bSYour Name 		 * Need API to convert from hal_ring pointer to
2078*5113495bSYour Name 		 * Ring Type / Ring Id combo
2079*5113495bSYour Name 		 */
2080*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2081*5113495bSYour Name 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2082*5113495bSYour Name 			      hal_ring_hdl);
2083*5113495bSYour Name 		goto done;
2084*5113495bSYour Name 	}
2085*5113495bSYour Name 
2086*5113495bSYour Name 	while (qdf_likely(quota-- && (ring_desc =
2087*5113495bSYour Name 				hal_srng_dst_peek(hal_soc,
2088*5113495bSYour Name 						  hal_ring_hdl)))) {
2089*5113495bSYour Name 
2090*5113495bSYour Name 		DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2091*5113495bSYour Name 		err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2092*5113495bSYour Name 		buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2093*5113495bSYour Name 
2094*5113495bSYour Name 		if (err_status == HAL_REO_ERROR_DETECTED)
2095*5113495bSYour Name 			error_code = hal_rx_get_reo_error_code(hal_soc,
2096*5113495bSYour Name 							       ring_desc);
2097*5113495bSYour Name 
2098*5113495bSYour Name 		qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2099*5113495bSYour Name 		sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2100*5113495bSYour Name 								  err_status,
2101*5113495bSYour Name 								  error_code);
2102*5113495bSYour Name 		if (!sw_pn_check_needed) {
2103*5113495bSYour Name 			/*
2104*5113495bSYour Name 			 * MPDU desc info will be present in the REO desc
2105*5113495bSYour Name 			 * only in the below scenarios
2106*5113495bSYour Name 			 * 1) pn_in_dest_disabled:  always
2107*5113495bSYour Name 			 * 2) pn_in_dest enabled: All cases except 2k-jup
2108*5113495bSYour Name 			 *			and OOR errors
2109*5113495bSYour Name 			 */
2110*5113495bSYour Name 			hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2111*5113495bSYour Name 						  &mpdu_desc_info);
2112*5113495bSYour Name 		}
2113*5113495bSYour Name 
2114*5113495bSYour Name 		if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2115*5113495bSYour Name 			goto next_entry;
2116*5113495bSYour Name 
2117*5113495bSYour Name 		/*
2118*5113495bSYour Name 		 * For REO error ring, only MSDU LINK DESC is expected.
2119*5113495bSYour Name 		 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2120*5113495bSYour Name 		 */
2121*5113495bSYour Name 		if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2122*5113495bSYour Name 			int lmac_id;
2123*5113495bSYour Name 
2124*5113495bSYour Name 			lmac_id = dp_rx_err_exception(soc, ring_desc);
2125*5113495bSYour Name 			if (lmac_id >= 0)
2126*5113495bSYour Name 				rx_bufs_reaped[lmac_id] += 1;
2127*5113495bSYour Name 			goto next_entry;
2128*5113495bSYour Name 		}
2129*5113495bSYour Name 
2130*5113495bSYour Name 		hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2131*5113495bSYour Name 					  &hbi);
2132*5113495bSYour Name 		/*
2133*5113495bSYour Name 		 * check for the magic number in the sw cookie
2134*5113495bSYour Name 		 */
2135*5113495bSYour Name 		qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2136*5113495bSYour Name 					soc->link_desc_id_start);
2137*5113495bSYour Name 
2138*5113495bSYour Name 		if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2139*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2140*5113495bSYour Name 			goto next_entry;
2141*5113495bSYour Name 		}
2142*5113495bSYour Name 
2143*5113495bSYour Name 		status = dp_rx_link_cookie_check(ring_desc);
2144*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2145*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2146*5113495bSYour Name 			break;
2147*5113495bSYour Name 		}
2148*5113495bSYour Name 
2149*5113495bSYour Name 		hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2150*5113495bSYour Name 		link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2151*5113495bSYour Name 		hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2152*5113495bSYour Name 				     &num_msdus);
2153*5113495bSYour Name 		if (!num_msdus ||
2154*5113495bSYour Name 		    !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2155*5113495bSYour Name 			dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2156*5113495bSYour Name 					  num_msdus, msdu_list.sw_cookie[0]);
2157*5113495bSYour Name 			dp_rx_link_desc_return(soc, ring_desc,
2158*5113495bSYour Name 					       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2159*5113495bSYour Name 			goto next_entry;
2160*5113495bSYour Name 		}
2161*5113495bSYour Name 
2162*5113495bSYour Name 		dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2163*5113495bSYour Name 					    msdu_list.sw_cookie[0],
2164*5113495bSYour Name 					    msdu_list.rbm[0]);
2165*5113495bSYour Name 		// TODO - BE- Check if the RBM is to be checked for all chips
2166*5113495bSYour Name 		if (qdf_unlikely((msdu_list.rbm[0] !=
2167*5113495bSYour Name 					dp_rx_get_rx_bm_id(soc)) &&
2168*5113495bSYour Name 				 (msdu_list.rbm[0] !=
2169*5113495bSYour Name 				  soc->idle_link_bm_id) &&
2170*5113495bSYour Name 				 (msdu_list.rbm[0] !=
2171*5113495bSYour Name 					dp_rx_get_defrag_bm_id(soc)))) {
2172*5113495bSYour Name 			/* TODO */
2173*5113495bSYour Name 			/* Call appropriate handler */
2174*5113495bSYour Name 			if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2175*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2176*5113495bSYour Name 				dp_rx_err_err("%pK: Invalid RBM %d",
2177*5113495bSYour Name 					      soc, msdu_list.rbm[0]);
2178*5113495bSYour Name 			}
2179*5113495bSYour Name 
2180*5113495bSYour Name 			/* Return link descriptor through WBM ring (SW2WBM)*/
2181*5113495bSYour Name 			dp_rx_link_desc_return(soc, ring_desc,
2182*5113495bSYour Name 					HAL_BM_ACTION_RELEASE_MSDU_LIST);
2183*5113495bSYour Name 			goto next_entry;
2184*5113495bSYour Name 		}
2185*5113495bSYour Name 
2186*5113495bSYour Name 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2187*5113495bSYour Name 						soc,
2188*5113495bSYour Name 						msdu_list.sw_cookie[0]);
2189*5113495bSYour Name 		qdf_assert_always(rx_desc);
2190*5113495bSYour Name 
2191*5113495bSYour Name 		mac_id = rx_desc->pool_id;
2192*5113495bSYour Name 
2193*5113495bSYour Name 		if (sw_pn_check_needed) {
2194*5113495bSYour Name 			goto process_reo_error_code;
2195*5113495bSYour Name 		}
2196*5113495bSYour Name 
2197*5113495bSYour Name 		if (mpdu_desc_info.bar_frame) {
2198*5113495bSYour Name 			qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2199*5113495bSYour Name 
2200*5113495bSYour Name 			dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2201*5113495bSYour Name 					       &mpdu_desc_info, err_status,
2202*5113495bSYour Name 					       error_code);
2203*5113495bSYour Name 
2204*5113495bSYour Name 			rx_bufs_reaped[mac_id] += 1;
2205*5113495bSYour Name 			goto next_entry;
2206*5113495bSYour Name 		}
2207*5113495bSYour Name 
2208*5113495bSYour Name 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2209*5113495bSYour Name 			/*
2210*5113495bSYour Name 			 * We only handle one msdu per link desc for fragmented
2211*5113495bSYour Name 			 * case. We drop the msdus and release the link desc
2212*5113495bSYour Name 			 * back if there are more than one msdu in link desc.
2213*5113495bSYour Name 			 */
2214*5113495bSYour Name 			if (qdf_unlikely(num_msdus > 1)) {
2215*5113495bSYour Name 				count = dp_rx_msdus_drop(soc, ring_desc,
2216*5113495bSYour Name 							 &mpdu_desc_info,
2217*5113495bSYour Name 							 &mac_id, quota);
2218*5113495bSYour Name 				rx_bufs_reaped[mac_id] += count;
2219*5113495bSYour Name 				goto next_entry;
2220*5113495bSYour Name 			}
2221*5113495bSYour Name 
2222*5113495bSYour Name 			/*
2223*5113495bSYour Name 			 * this is a unlikely scenario where the host is reaping
2224*5113495bSYour Name 			 * a descriptor which it already reaped just a while ago
2225*5113495bSYour Name 			 * but is yet to replenish it back to HW.
2226*5113495bSYour Name 			 * In this case host will dump the last 128 descriptors
2227*5113495bSYour Name 			 * including the software descriptor rx_desc and assert.
2228*5113495bSYour Name 			 */
2229*5113495bSYour Name 
2230*5113495bSYour Name 			if (qdf_unlikely(!rx_desc->in_use)) {
2231*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2232*5113495bSYour Name 				dp_info_rl("Reaping rx_desc not in use!");
2233*5113495bSYour Name 				dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2234*5113495bSYour Name 							   ring_desc, rx_desc);
2235*5113495bSYour Name 				/* ignore duplicate RX desc and continue */
2236*5113495bSYour Name 				/* Pop out the descriptor */
2237*5113495bSYour Name 				goto next_entry;
2238*5113495bSYour Name 			}
2239*5113495bSYour Name 
2240*5113495bSYour Name 			ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2241*5113495bSYour Name 							    msdu_list.paddr[0]);
2242*5113495bSYour Name 			if (!ret) {
2243*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2244*5113495bSYour Name 				rx_desc->in_err_state = 1;
2245*5113495bSYour Name 				goto next_entry;
2246*5113495bSYour Name 			}
2247*5113495bSYour Name 
2248*5113495bSYour Name 			count = dp_rx_frag_handle(soc,
2249*5113495bSYour Name 						  ring_desc, &mpdu_desc_info,
2250*5113495bSYour Name 						  rx_desc, &mac_id, quota);
2251*5113495bSYour Name 
2252*5113495bSYour Name 			rx_bufs_reaped[mac_id] += count;
2253*5113495bSYour Name 			DP_STATS_INC(soc, rx.rx_frags, 1);
2254*5113495bSYour Name 
2255*5113495bSYour Name 			peer_id = dp_rx_peer_metadata_peer_id_get(soc,
2256*5113495bSYour Name 					mpdu_desc_info.peer_meta_data);
2257*5113495bSYour Name 			txrx_peer =
2258*5113495bSYour Name 				dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2259*5113495bSYour Name 							       &txrx_ref_handle,
2260*5113495bSYour Name 							       DP_MOD_ID_RX_ERR);
2261*5113495bSYour Name 			if (txrx_peer) {
2262*5113495bSYour Name 				DP_STATS_INC(txrx_peer->vdev,
2263*5113495bSYour Name 					     rx.fragment_count, 1);
2264*5113495bSYour Name 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2265*5113495bSYour Name 							  DP_MOD_ID_RX_ERR);
2266*5113495bSYour Name 			}
2267*5113495bSYour Name 			goto next_entry;
2268*5113495bSYour Name 		}
2269*5113495bSYour Name 
2270*5113495bSYour Name process_reo_error_code:
2271*5113495bSYour Name 		/*
2272*5113495bSYour Name 		 * Expect REO errors to be handled after this point
2273*5113495bSYour Name 		 */
2274*5113495bSYour Name 		qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2275*5113495bSYour Name 
2276*5113495bSYour Name 		dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2277*5113495bSYour Name 
2278*5113495bSYour Name 		switch (error_code) {
2279*5113495bSYour Name 		case HAL_REO_ERR_PN_CHECK_FAILED:
2280*5113495bSYour Name 		case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2281*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2282*5113495bSYour Name 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2283*5113495bSYour Name 			if (dp_pdev)
2284*5113495bSYour Name 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2285*5113495bSYour Name 			count = dp_rx_pn_error_handle(soc,
2286*5113495bSYour Name 						      ring_desc,
2287*5113495bSYour Name 						      &mpdu_desc_info, &mac_id,
2288*5113495bSYour Name 						      quota);
2289*5113495bSYour Name 
2290*5113495bSYour Name 			rx_bufs_reaped[mac_id] += count;
2291*5113495bSYour Name 			break;
2292*5113495bSYour Name 		case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2293*5113495bSYour Name 		case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2294*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2295*5113495bSYour Name 		case HAL_REO_ERR_REGULAR_FRAME_OOR:
2296*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_OOR:
2297*5113495bSYour Name 		case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2298*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2299*5113495bSYour Name 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2300*5113495bSYour Name 			if (dp_pdev)
2301*5113495bSYour Name 				DP_STATS_INC(dp_pdev, err.reo_error, 1);
2302*5113495bSYour Name 			count = dp_rx_reo_err_entry_process(
2303*5113495bSYour Name 					soc,
2304*5113495bSYour Name 					ring_desc,
2305*5113495bSYour Name 					&mpdu_desc_info,
2306*5113495bSYour Name 					link_desc_va,
2307*5113495bSYour Name 					error_code);
2308*5113495bSYour Name 
2309*5113495bSYour Name 			rx_bufs_reaped[mac_id] += count;
2310*5113495bSYour Name 			break;
2311*5113495bSYour Name 		case HAL_REO_ERR_NON_BA_DUPLICATE:
2312*5113495bSYour Name 			dp_rx_err_dup_frame(soc, &mpdu_desc_info);
2313*5113495bSYour Name 			fallthrough;
2314*5113495bSYour Name 		case HAL_REO_ERR_QUEUE_DESC_INVALID:
2315*5113495bSYour Name 		case HAL_REO_ERR_AMPDU_IN_NON_BA:
2316*5113495bSYour Name 		case HAL_REO_ERR_BA_DUPLICATE:
2317*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2318*5113495bSYour Name 		case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2319*5113495bSYour Name 		case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2320*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2321*5113495bSYour Name 			count = dp_rx_msdus_drop(soc, ring_desc,
2322*5113495bSYour Name 						 &mpdu_desc_info,
2323*5113495bSYour Name 						 &mac_id, quota);
2324*5113495bSYour Name 			rx_bufs_reaped[mac_id] += count;
2325*5113495bSYour Name 			break;
2326*5113495bSYour Name 		default:
2327*5113495bSYour Name 			/* Assert if unexpected error type */
2328*5113495bSYour Name 			qdf_assert_always(0);
2329*5113495bSYour Name 		}
2330*5113495bSYour Name next_entry:
2331*5113495bSYour Name 		dp_rx_link_cookie_invalidate(ring_desc);
2332*5113495bSYour Name 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2333*5113495bSYour Name 
2334*5113495bSYour Name 		rx_bufs_reaped_total = 0;
2335*5113495bSYour Name 		for (i = 0; i < MAX_PDEV_CNT; i++)
2336*5113495bSYour Name 			rx_bufs_reaped_total += rx_bufs_reaped[i];
2337*5113495bSYour Name 
2338*5113495bSYour Name 		if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2339*5113495bSYour Name 						  max_reap_limit))
2340*5113495bSYour Name 			break;
2341*5113495bSYour Name 	}
2342*5113495bSYour Name 
2343*5113495bSYour Name done:
2344*5113495bSYour Name 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2345*5113495bSYour Name 
2346*5113495bSYour Name 	if (soc->rx.flags.defrag_timeout_check) {
2347*5113495bSYour Name 		uint32_t now_ms =
2348*5113495bSYour Name 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2349*5113495bSYour Name 
2350*5113495bSYour Name 		if (now_ms >= soc->rx.defrag.next_flush_ms)
2351*5113495bSYour Name 			dp_rx_defrag_waitlist_flush(soc);
2352*5113495bSYour Name 	}
2353*5113495bSYour Name 
2354*5113495bSYour Name 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2355*5113495bSYour Name 		if (rx_bufs_reaped[mac_id]) {
2356*5113495bSYour Name 			dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2357*5113495bSYour Name 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2358*5113495bSYour Name 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
2359*5113495bSYour Name 
2360*5113495bSYour Name 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2361*5113495bSYour Name 						rx_desc_pool,
2362*5113495bSYour Name 						rx_bufs_reaped[mac_id],
2363*5113495bSYour Name 						&dp_pdev->free_list_head,
2364*5113495bSYour Name 						&dp_pdev->free_list_tail,
2365*5113495bSYour Name 						false);
2366*5113495bSYour Name 			rx_bufs_used += rx_bufs_reaped[mac_id];
2367*5113495bSYour Name 		}
2368*5113495bSYour Name 		rx_bufs_reaped[mac_id] = 0;
2369*5113495bSYour Name 	}
2370*5113495bSYour Name 
2371*5113495bSYour Name 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2372*5113495bSYour Name 		if (quota) {
2373*5113495bSYour Name 			num_pending =
2374*5113495bSYour Name 				dp_rx_srng_get_num_pending(hal_soc,
2375*5113495bSYour Name 							   hal_ring_hdl,
2376*5113495bSYour Name 							   num_entries,
2377*5113495bSYour Name 							   &near_full);
2378*5113495bSYour Name 
2379*5113495bSYour Name 			if (num_pending) {
2380*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.hp_oos2, 1);
2381*5113495bSYour Name 
2382*5113495bSYour Name 				if (!hif_exec_should_yield(soc->hif_handle,
2383*5113495bSYour Name 							   int_ctx->dp_intr_id))
2384*5113495bSYour Name 					goto more_data;
2385*5113495bSYour Name 
2386*5113495bSYour Name 				if (qdf_unlikely(near_full)) {
2387*5113495bSYour Name 					DP_STATS_INC(soc, rx.err.near_full, 1);
2388*5113495bSYour Name 					goto more_data;
2389*5113495bSYour Name 				}
2390*5113495bSYour Name 			}
2391*5113495bSYour Name 		}
2392*5113495bSYour Name 	}
2393*5113495bSYour Name 
2394*5113495bSYour Name 	return rx_bufs_used; /* Assume no scale factor for now */
2395*5113495bSYour Name }
2396*5113495bSYour Name 
2397*5113495bSYour Name #ifdef DROP_RXDMA_DECRYPT_ERR
2398*5113495bSYour Name /**
2399*5113495bSYour Name  * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2400*5113495bSYour Name  *
2401*5113495bSYour Name  * Return: true if rxdma decrypt err frames are handled and false otherwise
2402*5113495bSYour Name  */
dp_handle_rxdma_decrypt_err(void)2403*5113495bSYour Name static inline bool dp_handle_rxdma_decrypt_err(void)
2404*5113495bSYour Name {
2405*5113495bSYour Name 	return false;
2406*5113495bSYour Name }
2407*5113495bSYour Name #else
dp_handle_rxdma_decrypt_err(void)2408*5113495bSYour Name static inline bool dp_handle_rxdma_decrypt_err(void)
2409*5113495bSYour Name {
2410*5113495bSYour Name 	return true;
2411*5113495bSYour Name }
2412*5113495bSYour Name #endif
2413*5113495bSYour Name 
dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc * soc)2414*5113495bSYour Name void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2415*5113495bSYour Name {
2416*5113495bSYour Name 	if (soc->wbm_sg_last_msdu_war) {
2417*5113495bSYour Name 		uint32_t len;
2418*5113495bSYour Name 		qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2419*5113495bSYour Name 
2420*5113495bSYour Name 		len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2421*5113495bSYour Name 						     qdf_nbuf_data(temp));
2422*5113495bSYour Name 		temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2423*5113495bSYour Name 		while (temp) {
2424*5113495bSYour Name 			QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2425*5113495bSYour Name 			temp = temp->next;
2426*5113495bSYour Name 		}
2427*5113495bSYour Name 	}
2428*5113495bSYour Name }
2429*5113495bSYour Name 
2430*5113495bSYour Name #ifdef RX_DESC_DEBUG_CHECK
dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)2431*5113495bSYour Name QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2432*5113495bSYour Name 					    hal_ring_handle_t hal_ring_hdl,
2433*5113495bSYour Name 					    hal_ring_desc_t ring_desc,
2434*5113495bSYour Name 					    struct dp_rx_desc *rx_desc)
2435*5113495bSYour Name {
2436*5113495bSYour Name 	struct hal_buf_info hbi;
2437*5113495bSYour Name 
2438*5113495bSYour Name 	hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2439*5113495bSYour Name 	/* Sanity check for possible buffer paddr corruption */
2440*5113495bSYour Name 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2441*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
2442*5113495bSYour Name 
2443*5113495bSYour Name 	hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2444*5113495bSYour Name 
2445*5113495bSYour Name 	return QDF_STATUS_E_FAILURE;
2446*5113495bSYour Name }
2447*5113495bSYour Name 
2448*5113495bSYour Name #else
dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)2449*5113495bSYour Name QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2450*5113495bSYour Name 					    hal_ring_handle_t hal_ring_hdl,
2451*5113495bSYour Name 					    hal_ring_desc_t ring_desc,
2452*5113495bSYour Name 					    struct dp_rx_desc *rx_desc)
2453*5113495bSYour Name {
2454*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2455*5113495bSYour Name }
2456*5113495bSYour Name #endif
2457*5113495bSYour Name bool
dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info * info)2458*5113495bSYour Name dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2459*5113495bSYour Name {
2460*5113495bSYour Name 	/*
2461*5113495bSYour Name 	 * Currently Null Queue and Unencrypted error handlers has support for
2462*5113495bSYour Name 	 * SG. Other error handler do not deal with SG buffer.
2463*5113495bSYour Name 	 */
2464*5113495bSYour Name 	if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2465*5113495bSYour Name 	     (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2466*5113495bSYour Name 	    ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2467*5113495bSYour Name 	     (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2468*5113495bSYour Name 		return true;
2469*5113495bSYour Name 
2470*5113495bSYour Name 	return false;
2471*5113495bSYour Name }
2472*5113495bSYour Name 
2473*5113495bSYour Name #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
dp_rx_err_tlv_invalidate(struct dp_soc * soc,qdf_nbuf_t nbuf)2474*5113495bSYour Name void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2475*5113495bSYour Name 			      qdf_nbuf_t nbuf)
2476*5113495bSYour Name {
2477*5113495bSYour Name 	/*
2478*5113495bSYour Name 	 * In case of fast recycle TX driver can avoid invalidate
2479*5113495bSYour Name 	 * of buffer in case of SFE forward. We need to invalidate
2480*5113495bSYour Name 	 * the TLV headers after writing to this location
2481*5113495bSYour Name 	 */
2482*5113495bSYour Name 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2483*5113495bSYour Name 				      (void *)(nbuf->data +
2484*5113495bSYour Name 					       soc->rx_pkt_tlv_size +
2485*5113495bSYour Name 					       L3_HEADER_PAD));
2486*5113495bSYour Name }
2487*5113495bSYour Name #else
dp_rx_err_tlv_invalidate(struct dp_soc * soc,qdf_nbuf_t nbuf)2488*5113495bSYour Name void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2489*5113495bSYour Name 			      qdf_nbuf_t nbuf)
2490*5113495bSYour Name {
2491*5113495bSYour Name }
2492*5113495bSYour Name #endif
2493*5113495bSYour Name 
2494*5113495bSYour Name #ifndef CONFIG_NBUF_AP_PLATFORM
2495*5113495bSYour Name static inline uint16_t
dp_rx_get_peer_id(struct dp_soc * soc,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2496*5113495bSYour Name dp_rx_get_peer_id(struct dp_soc *soc,
2497*5113495bSYour Name 		  uint8_t *rx_tlv_hdr,
2498*5113495bSYour Name 		  qdf_nbuf_t nbuf)
2499*5113495bSYour Name {
2500*5113495bSYour Name 	uint32_t peer_mdata = 0;
2501*5113495bSYour Name 
2502*5113495bSYour Name 	peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2503*5113495bSYour Name 						   rx_tlv_hdr);
2504*5113495bSYour Name 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2505*5113495bSYour Name }
2506*5113495bSYour Name 
2507*5113495bSYour Name static inline void
dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,union hal_wbm_err_info_u * wbm_err)2508*5113495bSYour Name dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2509*5113495bSYour Name 				 qdf_nbuf_t nbuf,
2510*5113495bSYour Name 				 uint8_t *rx_tlv_hdr,
2511*5113495bSYour Name 				 union hal_wbm_err_info_u *wbm_err)
2512*5113495bSYour Name {
2513*5113495bSYour Name 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2514*5113495bSYour Name 				      (uint8_t *)&wbm_err->info,
2515*5113495bSYour Name 				      sizeof(union hal_wbm_err_info_u));
2516*5113495bSYour Name }
2517*5113495bSYour Name 
2518*5113495bSYour Name void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,union hal_wbm_err_info_u wbm_err)2519*5113495bSYour Name dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2520*5113495bSYour Name 			       qdf_nbuf_t nbuf,
2521*5113495bSYour Name 			       union hal_wbm_err_info_u wbm_err)
2522*5113495bSYour Name {
2523*5113495bSYour Name 	hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2524*5113495bSYour Name 				    qdf_nbuf_data(nbuf),
2525*5113495bSYour Name 				    (uint8_t *)&wbm_err.info,
2526*5113495bSYour Name 				    sizeof(union hal_wbm_err_info_u));
2527*5113495bSYour Name }
2528*5113495bSYour Name #else
2529*5113495bSYour Name static inline uint16_t
dp_rx_get_peer_id(struct dp_soc * soc,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2530*5113495bSYour Name dp_rx_get_peer_id(struct dp_soc *soc,
2531*5113495bSYour Name 		  uint8_t *rx_tlv_hdr,
2532*5113495bSYour Name 		  qdf_nbuf_t nbuf)
2533*5113495bSYour Name {
2534*5113495bSYour Name 	uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
2535*5113495bSYour Name 
2536*5113495bSYour Name 	return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2537*5113495bSYour Name }
2538*5113495bSYour Name 
2539*5113495bSYour Name static inline void
dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,union hal_wbm_err_info_u * wbm_err)2540*5113495bSYour Name dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2541*5113495bSYour Name 				 qdf_nbuf_t nbuf,
2542*5113495bSYour Name 				 uint8_t *rx_tlv_hdr,
2543*5113495bSYour Name 				 union hal_wbm_err_info_u *wbm_err)
2544*5113495bSYour Name {
2545*5113495bSYour Name 	wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
2546*5113495bSYour Name }
2547*5113495bSYour Name 
2548*5113495bSYour Name void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,union hal_wbm_err_info_u wbm_err)2549*5113495bSYour Name dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2550*5113495bSYour Name 			       qdf_nbuf_t nbuf,
2551*5113495bSYour Name 			       union hal_wbm_err_info_u wbm_err)
2552*5113495bSYour Name {
2553*5113495bSYour Name 	QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
2554*5113495bSYour Name }
2555*5113495bSYour Name #endif /* CONFIG_NBUF_AP_PLATFORM */
2556*5113495bSYour Name 
2557*5113495bSYour Name uint32_t
dp_rx_wbm_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint32_t quota)2558*5113495bSYour Name dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2559*5113495bSYour Name 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2560*5113495bSYour Name {
2561*5113495bSYour Name 	hal_soc_handle_t hal_soc;
2562*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
2563*5113495bSYour Name 	struct dp_pdev *dp_pdev;
2564*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
2565*5113495bSYour Name 	bool is_tkip_mic_err;
2566*5113495bSYour Name 	qdf_nbuf_t nbuf_head = NULL;
2567*5113495bSYour Name 	qdf_nbuf_t nbuf, next;
2568*5113495bSYour Name 	union hal_wbm_err_info_u wbm_err = { 0 };
2569*5113495bSYour Name 	uint8_t pool_id;
2570*5113495bSYour Name 	uint8_t tid = 0;
2571*5113495bSYour Name 	uint8_t link_id = 0;
2572*5113495bSYour Name 
2573*5113495bSYour Name 	/* Debug -- Remove later */
2574*5113495bSYour Name 	qdf_assert(soc && hal_ring_hdl);
2575*5113495bSYour Name 
2576*5113495bSYour Name 	hal_soc = soc->hal_soc;
2577*5113495bSYour Name 
2578*5113495bSYour Name 	/* Debug -- Remove later */
2579*5113495bSYour Name 	qdf_assert(hal_soc);
2580*5113495bSYour Name 
2581*5113495bSYour Name 	nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2582*5113495bSYour Name 							  hal_ring_hdl,
2583*5113495bSYour Name 							  quota,
2584*5113495bSYour Name 							  &rx_bufs_used);
2585*5113495bSYour Name 	nbuf = nbuf_head;
2586*5113495bSYour Name 	while (nbuf) {
2587*5113495bSYour Name 		struct dp_txrx_peer *txrx_peer;
2588*5113495bSYour Name 		struct dp_peer *peer;
2589*5113495bSYour Name 		uint16_t peer_id;
2590*5113495bSYour Name 		uint8_t err_code;
2591*5113495bSYour Name 		uint8_t *tlv_hdr;
2592*5113495bSYour Name 		dp_txrx_ref_handle txrx_ref_handle = NULL;
2593*5113495bSYour Name 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2594*5113495bSYour Name 
2595*5113495bSYour Name 		/*
2596*5113495bSYour Name 		 * retrieve the wbm desc info from nbuf CB/TLV, so we can
2597*5113495bSYour Name 		 * handle error cases appropriately
2598*5113495bSYour Name 		 */
2599*5113495bSYour Name 		dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
2600*5113495bSYour Name 						 rx_tlv_hdr,
2601*5113495bSYour Name 						 &wbm_err);
2602*5113495bSYour Name 
2603*5113495bSYour Name 		peer_id = dp_rx_get_peer_id(soc,
2604*5113495bSYour Name 					    rx_tlv_hdr,
2605*5113495bSYour Name 					    nbuf);
2606*5113495bSYour Name 		txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2607*5113495bSYour Name 							   &txrx_ref_handle,
2608*5113495bSYour Name 							   DP_MOD_ID_RX_ERR);
2609*5113495bSYour Name 
2610*5113495bSYour Name 		if (!txrx_peer)
2611*5113495bSYour Name 			dp_info_rl("peer is null peer_id %u err_src %u, "
2612*5113495bSYour Name 				   "REO: push_rsn %u err_code %u, "
2613*5113495bSYour Name 				   "RXDMA: push_rsn %u err_code %u",
2614*5113495bSYour Name 				   peer_id, wbm_err.info_bit.wbm_err_src,
2615*5113495bSYour Name 				   wbm_err.info_bit.reo_psh_rsn,
2616*5113495bSYour Name 				   wbm_err.info_bit.reo_err_code,
2617*5113495bSYour Name 				   wbm_err.info_bit.rxdma_psh_rsn,
2618*5113495bSYour Name 				   wbm_err.info_bit.rxdma_err_code);
2619*5113495bSYour Name 
2620*5113495bSYour Name 		/* Set queue_mapping in nbuf to 0 */
2621*5113495bSYour Name 		dp_set_rx_queue(nbuf, 0);
2622*5113495bSYour Name 
2623*5113495bSYour Name 		next = nbuf->next;
2624*5113495bSYour Name 		/*
2625*5113495bSYour Name 		 * Form the SG for msdu continued buffers
2626*5113495bSYour Name 		 * QCN9000 has this support
2627*5113495bSYour Name 		 */
2628*5113495bSYour Name 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2629*5113495bSYour Name 			nbuf = dp_rx_sg_create(soc, nbuf);
2630*5113495bSYour Name 			next = nbuf->next;
2631*5113495bSYour Name 			/*
2632*5113495bSYour Name 			 * SG error handling is not done correctly,
2633*5113495bSYour Name 			 * drop SG frames for now.
2634*5113495bSYour Name 			 */
2635*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
2636*5113495bSYour Name 			dp_info_rl("scattered msdu dropped");
2637*5113495bSYour Name 			nbuf = next;
2638*5113495bSYour Name 			if (txrx_peer)
2639*5113495bSYour Name 				dp_txrx_peer_unref_delete(txrx_ref_handle,
2640*5113495bSYour Name 							  DP_MOD_ID_RX_ERR);
2641*5113495bSYour Name 			continue;
2642*5113495bSYour Name 		}
2643*5113495bSYour Name 
2644*5113495bSYour Name 		dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf);
2645*5113495bSYour Name 
2646*5113495bSYour Name 		pool_id = wbm_err.info_bit.pool_id;
2647*5113495bSYour Name 		dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2648*5113495bSYour Name 
2649*5113495bSYour Name 		if (dp_pdev && dp_pdev->link_peer_stats &&
2650*5113495bSYour Name 		    txrx_peer && txrx_peer->is_mld_peer) {
2651*5113495bSYour Name 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
2652*5113495bSYour Name 								nbuf,
2653*5113495bSYour Name 								txrx_peer);
2654*5113495bSYour Name 		} else {
2655*5113495bSYour Name 			link_id = 0;
2656*5113495bSYour Name 		}
2657*5113495bSYour Name 
2658*5113495bSYour Name 		if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2659*5113495bSYour Name 			if (wbm_err.info_bit.reo_psh_rsn
2660*5113495bSYour Name 					== HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2661*5113495bSYour Name 
2662*5113495bSYour Name 				DP_STATS_INC(soc,
2663*5113495bSYour Name 					rx.err.reo_error
2664*5113495bSYour Name 					[wbm_err.info_bit.reo_err_code], 1);
2665*5113495bSYour Name 				/* increment @pdev level */
2666*5113495bSYour Name 				if (dp_pdev)
2667*5113495bSYour Name 					DP_STATS_INC(dp_pdev, err.reo_error,
2668*5113495bSYour Name 						     1);
2669*5113495bSYour Name 
2670*5113495bSYour Name 				switch (wbm_err.info_bit.reo_err_code) {
2671*5113495bSYour Name 				/*
2672*5113495bSYour Name 				 * Handling for packets which have NULL REO
2673*5113495bSYour Name 				 * queue descriptor
2674*5113495bSYour Name 				 */
2675*5113495bSYour Name 				case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2676*5113495bSYour Name 					pool_id = wbm_err.info_bit.pool_id;
2677*5113495bSYour Name 					soc->arch_ops.dp_rx_null_q_desc_handle(
2678*5113495bSYour Name 								soc, nbuf,
2679*5113495bSYour Name 								rx_tlv_hdr,
2680*5113495bSYour Name 								pool_id,
2681*5113495bSYour Name 								txrx_peer,
2682*5113495bSYour Name 								FALSE,
2683*5113495bSYour Name 								link_id);
2684*5113495bSYour Name 					break;
2685*5113495bSYour Name 				/* TODO */
2686*5113495bSYour Name 				/* Add per error code accounting */
2687*5113495bSYour Name 				case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2688*5113495bSYour Name 					if (txrx_peer)
2689*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2690*5113495bSYour Name 									  rx.err.jump_2k_err,
2691*5113495bSYour Name 									  1,
2692*5113495bSYour Name 									  link_id);
2693*5113495bSYour Name 
2694*5113495bSYour Name 					pool_id = wbm_err.info_bit.pool_id;
2695*5113495bSYour Name 
2696*5113495bSYour Name 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2697*5113495bSYour Name 									   rx_tlv_hdr)) {
2698*5113495bSYour Name 						tid =
2699*5113495bSYour Name 						hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2700*5113495bSYour Name 					}
2701*5113495bSYour Name 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2702*5113495bSYour Name 					hal_rx_msdu_start_msdu_len_get(
2703*5113495bSYour Name 						soc->hal_soc, rx_tlv_hdr);
2704*5113495bSYour Name 					nbuf->next = NULL;
2705*5113495bSYour Name 					dp_2k_jump_handle(soc, nbuf,
2706*5113495bSYour Name 							  rx_tlv_hdr,
2707*5113495bSYour Name 							  peer_id, tid);
2708*5113495bSYour Name 					break;
2709*5113495bSYour Name 				case HAL_REO_ERR_REGULAR_FRAME_OOR:
2710*5113495bSYour Name 					if (txrx_peer)
2711*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2712*5113495bSYour Name 									  rx.err.oor_err,
2713*5113495bSYour Name 									  1,
2714*5113495bSYour Name 									  link_id);
2715*5113495bSYour Name 					if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2716*5113495bSYour Name 									   rx_tlv_hdr)) {
2717*5113495bSYour Name 						tid =
2718*5113495bSYour Name 							hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2719*5113495bSYour Name 					}
2720*5113495bSYour Name 					QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2721*5113495bSYour Name 						hal_rx_msdu_start_msdu_len_get(
2722*5113495bSYour Name 						soc->hal_soc, rx_tlv_hdr);
2723*5113495bSYour Name 					nbuf->next = NULL;
2724*5113495bSYour Name 					dp_rx_oor_handle(soc, nbuf,
2725*5113495bSYour Name 							 peer_id,
2726*5113495bSYour Name 							 rx_tlv_hdr);
2727*5113495bSYour Name 					break;
2728*5113495bSYour Name 				case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2729*5113495bSYour Name 				case HAL_REO_ERR_BAR_FRAME_OOR:
2730*5113495bSYour Name 					peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2731*5113495bSYour Name 					if (peer) {
2732*5113495bSYour Name 						dp_rx_err_handle_bar(soc, peer,
2733*5113495bSYour Name 								     nbuf);
2734*5113495bSYour Name 						dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2735*5113495bSYour Name 					}
2736*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
2737*5113495bSYour Name 					break;
2738*5113495bSYour Name 
2739*5113495bSYour Name 				case HAL_REO_ERR_PN_CHECK_FAILED:
2740*5113495bSYour Name 				case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2741*5113495bSYour Name 					if (txrx_peer)
2742*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2743*5113495bSYour Name 									  rx.err.pn_err,
2744*5113495bSYour Name 									  1,
2745*5113495bSYour Name 									  link_id);
2746*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
2747*5113495bSYour Name 					break;
2748*5113495bSYour Name 
2749*5113495bSYour Name 				default:
2750*5113495bSYour Name 					dp_info_rl("Got pkt with REO ERROR: %d",
2751*5113495bSYour Name 						   wbm_err.info_bit.
2752*5113495bSYour Name 						   reo_err_code);
2753*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
2754*5113495bSYour Name 				}
2755*5113495bSYour Name 			} else if (wbm_err.info_bit.reo_psh_rsn
2756*5113495bSYour Name 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2757*5113495bSYour Name 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2758*5113495bSYour Name 						    rx_tlv_hdr,
2759*5113495bSYour Name 						    HAL_RX_WBM_ERR_SRC_REO,
2760*5113495bSYour Name 						    link_id);
2761*5113495bSYour Name 			} else {
2762*5113495bSYour Name 				/* should not enter here */
2763*5113495bSYour Name 				dp_rx_err_alert("invalid reo push reason %u",
2764*5113495bSYour Name 						wbm_err.info_bit.reo_psh_rsn);
2765*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
2766*5113495bSYour Name 				dp_assert_always_internal(0);
2767*5113495bSYour Name 			}
2768*5113495bSYour Name 		} else if (wbm_err.info_bit.wbm_err_src ==
2769*5113495bSYour Name 					HAL_RX_WBM_ERR_SRC_RXDMA) {
2770*5113495bSYour Name 			if (wbm_err.info_bit.rxdma_psh_rsn
2771*5113495bSYour Name 					== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2772*5113495bSYour Name 				DP_STATS_INC(soc,
2773*5113495bSYour Name 					rx.err.rxdma_error
2774*5113495bSYour Name 					[wbm_err.info_bit.rxdma_err_code], 1);
2775*5113495bSYour Name 				/* increment @pdev level */
2776*5113495bSYour Name 				if (dp_pdev)
2777*5113495bSYour Name 					DP_STATS_INC(dp_pdev,
2778*5113495bSYour Name 						     err.rxdma_error, 1);
2779*5113495bSYour Name 
2780*5113495bSYour Name 				switch (wbm_err.info_bit.rxdma_err_code) {
2781*5113495bSYour Name 				case HAL_RXDMA_ERR_UNENCRYPTED:
2782*5113495bSYour Name 
2783*5113495bSYour Name 				case HAL_RXDMA_ERR_WIFI_PARSE:
2784*5113495bSYour Name 					if (txrx_peer)
2785*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2786*5113495bSYour Name 									  rx.err.rxdma_wifi_parse_err,
2787*5113495bSYour Name 									  1,
2788*5113495bSYour Name 									  link_id);
2789*5113495bSYour Name 
2790*5113495bSYour Name 					pool_id = wbm_err.info_bit.pool_id;
2791*5113495bSYour Name 					dp_rx_process_rxdma_err(soc, nbuf,
2792*5113495bSYour Name 								rx_tlv_hdr,
2793*5113495bSYour Name 								txrx_peer,
2794*5113495bSYour Name 								wbm_err.
2795*5113495bSYour Name 								info_bit.
2796*5113495bSYour Name 								rxdma_err_code,
2797*5113495bSYour Name 								pool_id,
2798*5113495bSYour Name 								link_id);
2799*5113495bSYour Name 					break;
2800*5113495bSYour Name 
2801*5113495bSYour Name 				case HAL_RXDMA_ERR_TKIP_MIC:
2802*5113495bSYour Name 					dp_rx_process_mic_error(soc, nbuf,
2803*5113495bSYour Name 								rx_tlv_hdr,
2804*5113495bSYour Name 								txrx_peer);
2805*5113495bSYour Name 					if (txrx_peer)
2806*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2807*5113495bSYour Name 									  rx.err.mic_err,
2808*5113495bSYour Name 									  1,
2809*5113495bSYour Name 									  link_id);
2810*5113495bSYour Name 					break;
2811*5113495bSYour Name 
2812*5113495bSYour Name 				case HAL_RXDMA_ERR_DECRYPT:
2813*5113495bSYour Name 					/* All the TKIP-MIC failures are treated as Decrypt Errors
2814*5113495bSYour Name 					 * for QCN9224 Targets
2815*5113495bSYour Name 					 */
2816*5113495bSYour Name 					is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2817*5113495bSYour Name 
2818*5113495bSYour Name 					if (is_tkip_mic_err && txrx_peer) {
2819*5113495bSYour Name 						dp_rx_process_mic_error(soc, nbuf,
2820*5113495bSYour Name 									rx_tlv_hdr,
2821*5113495bSYour Name 									txrx_peer);
2822*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2823*5113495bSYour Name 									  rx.err.mic_err,
2824*5113495bSYour Name 									  1,
2825*5113495bSYour Name 									  link_id);
2826*5113495bSYour Name 						break;
2827*5113495bSYour Name 					}
2828*5113495bSYour Name 
2829*5113495bSYour Name 					if (txrx_peer) {
2830*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2831*5113495bSYour Name 									  rx.err.decrypt_err,
2832*5113495bSYour Name 									  1,
2833*5113495bSYour Name 									  link_id);
2834*5113495bSYour Name 						dp_rx_nbuf_free(nbuf);
2835*5113495bSYour Name 						break;
2836*5113495bSYour Name 					}
2837*5113495bSYour Name 
2838*5113495bSYour Name 					if (!dp_handle_rxdma_decrypt_err()) {
2839*5113495bSYour Name 						dp_rx_nbuf_free(nbuf);
2840*5113495bSYour Name 						break;
2841*5113495bSYour Name 					}
2842*5113495bSYour Name 
2843*5113495bSYour Name 					pool_id = wbm_err.info_bit.pool_id;
2844*5113495bSYour Name 					err_code = wbm_err.info_bit.rxdma_err_code;
2845*5113495bSYour Name 					tlv_hdr = rx_tlv_hdr;
2846*5113495bSYour Name 					dp_rx_process_rxdma_err(soc, nbuf,
2847*5113495bSYour Name 								tlv_hdr, NULL,
2848*5113495bSYour Name 								err_code,
2849*5113495bSYour Name 								pool_id,
2850*5113495bSYour Name 								link_id);
2851*5113495bSYour Name 					break;
2852*5113495bSYour Name 				case HAL_RXDMA_MULTICAST_ECHO:
2853*5113495bSYour Name 					if (txrx_peer)
2854*5113495bSYour Name 						DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2855*5113495bSYour Name 									      rx.mec_drop, 1,
2856*5113495bSYour Name 									      qdf_nbuf_len(nbuf),
2857*5113495bSYour Name 									      link_id);
2858*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
2859*5113495bSYour Name 					break;
2860*5113495bSYour Name 				case HAL_RXDMA_UNAUTHORIZED_WDS:
2861*5113495bSYour Name 					pool_id = wbm_err.info_bit.pool_id;
2862*5113495bSYour Name 					err_code = wbm_err.info_bit.rxdma_err_code;
2863*5113495bSYour Name 					tlv_hdr = rx_tlv_hdr;
2864*5113495bSYour Name 					dp_rx_process_rxdma_err(soc, nbuf,
2865*5113495bSYour Name 								tlv_hdr,
2866*5113495bSYour Name 								txrx_peer,
2867*5113495bSYour Name 								err_code,
2868*5113495bSYour Name 								pool_id,
2869*5113495bSYour Name 								link_id);
2870*5113495bSYour Name 					break;
2871*5113495bSYour Name 				default:
2872*5113495bSYour Name 					dp_rx_nbuf_free(nbuf);
2873*5113495bSYour Name 					dp_err_rl("RXDMA error %d",
2874*5113495bSYour Name 						  wbm_err.info_bit.rxdma_err_code);
2875*5113495bSYour Name 				}
2876*5113495bSYour Name 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2877*5113495bSYour Name 					== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2878*5113495bSYour Name 				dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2879*5113495bSYour Name 						    rx_tlv_hdr,
2880*5113495bSYour Name 						    HAL_RX_WBM_ERR_SRC_RXDMA,
2881*5113495bSYour Name 						    link_id);
2882*5113495bSYour Name 			} else if (wbm_err.info_bit.rxdma_psh_rsn
2883*5113495bSYour Name 					== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2884*5113495bSYour Name 				dp_rx_err_err("rxdma push reason %u",
2885*5113495bSYour Name 						wbm_err.info_bit.rxdma_psh_rsn);
2886*5113495bSYour Name 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2887*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
2888*5113495bSYour Name 			} else {
2889*5113495bSYour Name 				/* should not enter here */
2890*5113495bSYour Name 				dp_rx_err_alert("invalid rxdma push reason %u",
2891*5113495bSYour Name 						wbm_err.info_bit.rxdma_psh_rsn);
2892*5113495bSYour Name 				dp_rx_nbuf_free(nbuf);
2893*5113495bSYour Name 				dp_assert_always_internal(0);
2894*5113495bSYour Name 			}
2895*5113495bSYour Name 		} else {
2896*5113495bSYour Name 			/* Should not come here */
2897*5113495bSYour Name 			qdf_assert(0);
2898*5113495bSYour Name 		}
2899*5113495bSYour Name 
2900*5113495bSYour Name 		if (txrx_peer)
2901*5113495bSYour Name 			dp_txrx_peer_unref_delete(txrx_ref_handle,
2902*5113495bSYour Name 						  DP_MOD_ID_RX_ERR);
2903*5113495bSYour Name 
2904*5113495bSYour Name 		nbuf = next;
2905*5113495bSYour Name 	}
2906*5113495bSYour Name 	return rx_bufs_used; /* Assume no scale factor for now */
2907*5113495bSYour Name }
2908*5113495bSYour Name 
2909*5113495bSYour Name #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2910*5113495bSYour Name 
2911*5113495bSYour Name /**
2912*5113495bSYour Name  * dup_desc_dbg() - dump and assert if duplicate rx desc found
2913*5113495bSYour Name  *
2914*5113495bSYour Name  * @soc: core DP main context
2915*5113495bSYour Name  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2916*5113495bSYour Name  * @rx_desc: void pointer to rx descriptor
2917*5113495bSYour Name  *
2918*5113495bSYour Name  * Return: void
2919*5113495bSYour Name  */
dup_desc_dbg(struct dp_soc * soc,hal_rxdma_desc_t rxdma_dst_ring_desc,void * rx_desc)2920*5113495bSYour Name static void dup_desc_dbg(struct dp_soc *soc,
2921*5113495bSYour Name 			 hal_rxdma_desc_t rxdma_dst_ring_desc,
2922*5113495bSYour Name 			 void *rx_desc)
2923*5113495bSYour Name {
2924*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2925*5113495bSYour Name 	dp_rx_dump_info_and_assert(
2926*5113495bSYour Name 			soc,
2927*5113495bSYour Name 			soc->rx_rel_ring.hal_srng,
2928*5113495bSYour Name 			hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2929*5113495bSYour Name 			rx_desc);
2930*5113495bSYour Name }
2931*5113495bSYour Name 
2932*5113495bSYour Name /**
2933*5113495bSYour Name  * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2934*5113495bSYour Name  *
2935*5113495bSYour Name  * @soc: core DP main context
2936*5113495bSYour Name  * @mac_id: mac id which is one of 3 mac_ids
2937*5113495bSYour Name  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2938*5113495bSYour Name  * @head: head of descs list to be freed
2939*5113495bSYour Name  * @tail: tail of decs list to be freed
2940*5113495bSYour Name  *
2941*5113495bSYour Name  * Return: number of msdu in MPDU to be popped
2942*5113495bSYour Name  */
2943*5113495bSYour Name static inline uint32_t
dp_rx_err_mpdu_pop(struct dp_soc * soc,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail)2944*5113495bSYour Name dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2945*5113495bSYour Name 	hal_rxdma_desc_t rxdma_dst_ring_desc,
2946*5113495bSYour Name 	union dp_rx_desc_list_elem_t **head,
2947*5113495bSYour Name 	union dp_rx_desc_list_elem_t **tail)
2948*5113495bSYour Name {
2949*5113495bSYour Name 	void *rx_msdu_link_desc;
2950*5113495bSYour Name 	qdf_nbuf_t msdu;
2951*5113495bSYour Name 	qdf_nbuf_t last;
2952*5113495bSYour Name 	struct hal_rx_msdu_list msdu_list;
2953*5113495bSYour Name 	uint16_t num_msdus;
2954*5113495bSYour Name 	struct hal_buf_info buf_info;
2955*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
2956*5113495bSYour Name 	uint32_t msdu_cnt;
2957*5113495bSYour Name 	uint32_t i;
2958*5113495bSYour Name 	uint8_t push_reason;
2959*5113495bSYour Name 	uint8_t rxdma_error_code = 0;
2960*5113495bSYour Name 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2961*5113495bSYour Name 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2962*5113495bSYour Name 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2963*5113495bSYour Name 	hal_rxdma_desc_t ring_desc;
2964*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
2965*5113495bSYour Name 
2966*5113495bSYour Name 	if (!pdev) {
2967*5113495bSYour Name 		dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2968*5113495bSYour Name 				soc, mac_id);
2969*5113495bSYour Name 		return rx_bufs_used;
2970*5113495bSYour Name 	}
2971*5113495bSYour Name 
2972*5113495bSYour Name 	msdu = 0;
2973*5113495bSYour Name 
2974*5113495bSYour Name 	last = NULL;
2975*5113495bSYour Name 
2976*5113495bSYour Name 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2977*5113495bSYour Name 				     &buf_info, &msdu_cnt);
2978*5113495bSYour Name 
2979*5113495bSYour Name 	push_reason =
2980*5113495bSYour Name 		hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2981*5113495bSYour Name 	if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2982*5113495bSYour Name 		rxdma_error_code =
2983*5113495bSYour Name 			hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2984*5113495bSYour Name 	}
2985*5113495bSYour Name 
2986*5113495bSYour Name 	do {
2987*5113495bSYour Name 		rx_msdu_link_desc =
2988*5113495bSYour Name 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2989*5113495bSYour Name 
2990*5113495bSYour Name 		qdf_assert_always(rx_msdu_link_desc);
2991*5113495bSYour Name 
2992*5113495bSYour Name 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2993*5113495bSYour Name 				     &msdu_list, &num_msdus);
2994*5113495bSYour Name 
2995*5113495bSYour Name 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2996*5113495bSYour Name 			/* if the msdus belongs to NSS offloaded radio &&
2997*5113495bSYour Name 			 * the rbm is not SW1_BM then return the msdu_link
2998*5113495bSYour Name 			 * descriptor without freeing the msdus (nbufs). let
2999*5113495bSYour Name 			 * these buffers be given to NSS completion ring for
3000*5113495bSYour Name 			 * NSS to free them.
3001*5113495bSYour Name 			 * else iterate through the msdu link desc list and
3002*5113495bSYour Name 			 * free each msdu in the list.
3003*5113495bSYour Name 			 */
3004*5113495bSYour Name 			if (msdu_list.rbm[0] !=
3005*5113495bSYour Name 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
3006*5113495bSYour Name 			    wlan_cfg_get_dp_pdev_nss_enabled(
3007*5113495bSYour Name 							pdev->wlan_cfg_ctx))
3008*5113495bSYour Name 				bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
3009*5113495bSYour Name 			else {
3010*5113495bSYour Name 				for (i = 0; i < num_msdus; i++) {
3011*5113495bSYour Name 					struct dp_rx_desc *rx_desc =
3012*5113495bSYour Name 						soc->arch_ops.
3013*5113495bSYour Name 						dp_rx_desc_cookie_2_va(
3014*5113495bSYour Name 							soc,
3015*5113495bSYour Name 							msdu_list.sw_cookie[i]);
3016*5113495bSYour Name 					qdf_assert_always(rx_desc);
3017*5113495bSYour Name 					msdu = rx_desc->nbuf;
3018*5113495bSYour Name 					/*
3019*5113495bSYour Name 					 * this is a unlikely scenario
3020*5113495bSYour Name 					 * where the host is reaping
3021*5113495bSYour Name 					 * a descriptor which
3022*5113495bSYour Name 					 * it already reaped just a while ago
3023*5113495bSYour Name 					 * but is yet to replenish
3024*5113495bSYour Name 					 * it back to HW.
3025*5113495bSYour Name 					 * In this case host will dump
3026*5113495bSYour Name 					 * the last 128 descriptors
3027*5113495bSYour Name 					 * including the software descriptor
3028*5113495bSYour Name 					 * rx_desc and assert.
3029*5113495bSYour Name 					 */
3030*5113495bSYour Name 					ring_desc = rxdma_dst_ring_desc;
3031*5113495bSYour Name 					if (qdf_unlikely(!rx_desc->in_use)) {
3032*5113495bSYour Name 						dup_desc_dbg(soc,
3033*5113495bSYour Name 							     ring_desc,
3034*5113495bSYour Name 							     rx_desc);
3035*5113495bSYour Name 						continue;
3036*5113495bSYour Name 					}
3037*5113495bSYour Name 
3038*5113495bSYour Name 					if (rx_desc->unmapped == 0) {
3039*5113495bSYour Name 						rx_desc_pool =
3040*5113495bSYour Name 							&soc->rx_desc_buf[rx_desc->pool_id];
3041*5113495bSYour Name 						dp_ipa_rx_buf_smmu_mapping_lock(soc);
3042*5113495bSYour Name 						dp_rx_nbuf_unmap_pool(soc,
3043*5113495bSYour Name 								      rx_desc_pool,
3044*5113495bSYour Name 								      msdu);
3045*5113495bSYour Name 						rx_desc->unmapped = 1;
3046*5113495bSYour Name 						dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3047*5113495bSYour Name 					}
3048*5113495bSYour Name 
3049*5113495bSYour Name 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
3050*5113495bSYour Name 							soc, msdu);
3051*5113495bSYour Name 
3052*5113495bSYour Name 					dp_rx_buffer_pool_nbuf_free(soc, msdu,
3053*5113495bSYour Name 							rx_desc->pool_id);
3054*5113495bSYour Name 					rx_bufs_used++;
3055*5113495bSYour Name 					dp_rx_add_to_free_desc_list(head,
3056*5113495bSYour Name 						tail, rx_desc);
3057*5113495bSYour Name 				}
3058*5113495bSYour Name 			}
3059*5113495bSYour Name 		} else {
3060*5113495bSYour Name 			rxdma_error_code = HAL_RXDMA_ERR_WAR;
3061*5113495bSYour Name 		}
3062*5113495bSYour Name 
3063*5113495bSYour Name 		/*
3064*5113495bSYour Name 		 * Store the current link buffer into to the local structure
3065*5113495bSYour Name 		 * to be used for release purpose.
3066*5113495bSYour Name 		 */
3067*5113495bSYour Name 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3068*5113495bSYour Name 					     buf_info.paddr, buf_info.sw_cookie,
3069*5113495bSYour Name 					     buf_info.rbm);
3070*5113495bSYour Name 
3071*5113495bSYour Name 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3072*5113495bSYour Name 					      &buf_info);
3073*5113495bSYour Name 		dp_rx_link_desc_return_by_addr(soc,
3074*5113495bSYour Name 					       (hal_buff_addrinfo_t)
3075*5113495bSYour Name 						rx_link_buf_info,
3076*5113495bSYour Name 						bm_action);
3077*5113495bSYour Name 	} while (buf_info.paddr);
3078*5113495bSYour Name 
3079*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3080*5113495bSYour Name 	if (pdev)
3081*5113495bSYour Name 		DP_STATS_INC(pdev, err.rxdma_error, 1);
3082*5113495bSYour Name 
3083*5113495bSYour Name 	if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3084*5113495bSYour Name 		dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3085*5113495bSYour Name 	}
3086*5113495bSYour Name 
3087*5113495bSYour Name 	return rx_bufs_used;
3088*5113495bSYour Name }
3089*5113495bSYour Name 
3090*5113495bSYour Name uint32_t
dp_rxdma_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,uint32_t mac_id,uint32_t quota)3091*5113495bSYour Name dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3092*5113495bSYour Name 		     uint32_t mac_id, uint32_t quota)
3093*5113495bSYour Name {
3094*5113495bSYour Name 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3095*5113495bSYour Name 	hal_rxdma_desc_t rxdma_dst_ring_desc;
3096*5113495bSYour Name 	hal_soc_handle_t hal_soc;
3097*5113495bSYour Name 	void *err_dst_srng;
3098*5113495bSYour Name 	union dp_rx_desc_list_elem_t *head = NULL;
3099*5113495bSYour Name 	union dp_rx_desc_list_elem_t *tail = NULL;
3100*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
3101*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
3102*5113495bSYour Name 	uint32_t work_done = 0;
3103*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
3104*5113495bSYour Name 
3105*5113495bSYour Name 	if (!pdev)
3106*5113495bSYour Name 		return 0;
3107*5113495bSYour Name 
3108*5113495bSYour Name 	err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3109*5113495bSYour Name 
3110*5113495bSYour Name 	if (!err_dst_srng) {
3111*5113495bSYour Name 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3112*5113495bSYour Name 			      soc, err_dst_srng);
3113*5113495bSYour Name 		return 0;
3114*5113495bSYour Name 	}
3115*5113495bSYour Name 
3116*5113495bSYour Name 	hal_soc = soc->hal_soc;
3117*5113495bSYour Name 
3118*5113495bSYour Name 	qdf_assert(hal_soc);
3119*5113495bSYour Name 
3120*5113495bSYour Name 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3121*5113495bSYour Name 		dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3122*5113495bSYour Name 			      soc, err_dst_srng);
3123*5113495bSYour Name 		return 0;
3124*5113495bSYour Name 	}
3125*5113495bSYour Name 
3126*5113495bSYour Name 	while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3127*5113495bSYour Name 		hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3128*5113495bSYour Name 
3129*5113495bSYour Name 			rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3130*5113495bSYour Name 						rxdma_dst_ring_desc,
3131*5113495bSYour Name 						&head, &tail);
3132*5113495bSYour Name 	}
3133*5113495bSYour Name 
3134*5113495bSYour Name 	dp_srng_access_end(int_ctx, soc, err_dst_srng);
3135*5113495bSYour Name 
3136*5113495bSYour Name 	if (rx_bufs_used) {
3137*5113495bSYour Name 		if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3138*5113495bSYour Name 			dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3139*5113495bSYour Name 			rx_desc_pool = &soc->rx_desc_buf[mac_id];
3140*5113495bSYour Name 		} else {
3141*5113495bSYour Name 			dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3142*5113495bSYour Name 			rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3143*5113495bSYour Name 		}
3144*5113495bSYour Name 
3145*5113495bSYour Name 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3146*5113495bSYour Name 			rx_desc_pool, rx_bufs_used, &head, &tail, false);
3147*5113495bSYour Name 
3148*5113495bSYour Name 		work_done += rx_bufs_used;
3149*5113495bSYour Name 	}
3150*5113495bSYour Name 
3151*5113495bSYour Name 	return work_done;
3152*5113495bSYour Name }
3153*5113495bSYour Name 
3154*5113495bSYour Name #ifndef QCA_HOST_MODE_WIFI_DISABLED
3155*5113495bSYour Name 
3156*5113495bSYour Name static inline void
dp_wbm_int_err_mpdu_pop(struct dp_soc * soc,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,uint32_t * rx_bufs_used)3157*5113495bSYour Name dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3158*5113495bSYour Name 			hal_rxdma_desc_t rxdma_dst_ring_desc,
3159*5113495bSYour Name 			union dp_rx_desc_list_elem_t **head,
3160*5113495bSYour Name 			union dp_rx_desc_list_elem_t **tail,
3161*5113495bSYour Name 			uint32_t *rx_bufs_used)
3162*5113495bSYour Name {
3163*5113495bSYour Name 	void *rx_msdu_link_desc;
3164*5113495bSYour Name 	qdf_nbuf_t msdu;
3165*5113495bSYour Name 	qdf_nbuf_t last;
3166*5113495bSYour Name 	struct hal_rx_msdu_list msdu_list;
3167*5113495bSYour Name 	uint16_t num_msdus;
3168*5113495bSYour Name 	struct hal_buf_info buf_info;
3169*5113495bSYour Name 	uint32_t msdu_cnt, i;
3170*5113495bSYour Name 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3171*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
3172*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
3173*5113495bSYour Name 
3174*5113495bSYour Name 	msdu = 0;
3175*5113495bSYour Name 
3176*5113495bSYour Name 	last = NULL;
3177*5113495bSYour Name 
3178*5113495bSYour Name 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3179*5113495bSYour Name 				     &buf_info, &msdu_cnt);
3180*5113495bSYour Name 
3181*5113495bSYour Name 	do {
3182*5113495bSYour Name 		rx_msdu_link_desc =
3183*5113495bSYour Name 			dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3184*5113495bSYour Name 
3185*5113495bSYour Name 		if (!rx_msdu_link_desc) {
3186*5113495bSYour Name 			DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3187*5113495bSYour Name 			break;
3188*5113495bSYour Name 		}
3189*5113495bSYour Name 
3190*5113495bSYour Name 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3191*5113495bSYour Name 				     &msdu_list, &num_msdus);
3192*5113495bSYour Name 
3193*5113495bSYour Name 		if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3194*5113495bSYour Name 			for (i = 0; i < num_msdus; i++) {
3195*5113495bSYour Name 				if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3196*5113495bSYour Name 					dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3197*5113495bSYour Name 							  msdu_list.sw_cookie[i]);
3198*5113495bSYour Name 					continue;
3199*5113495bSYour Name 				}
3200*5113495bSYour Name 
3201*5113495bSYour Name 				rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3202*5113495bSYour Name 							soc,
3203*5113495bSYour Name 							msdu_list.sw_cookie[i]);
3204*5113495bSYour Name 				qdf_assert_always(rx_desc);
3205*5113495bSYour Name 				rx_desc_pool =
3206*5113495bSYour Name 					&soc->rx_desc_buf[rx_desc->pool_id];
3207*5113495bSYour Name 				msdu = rx_desc->nbuf;
3208*5113495bSYour Name 
3209*5113495bSYour Name 				/*
3210*5113495bSYour Name 				 * this is a unlikely scenario where the host is reaping
3211*5113495bSYour Name 				 * a descriptor which it already reaped just a while ago
3212*5113495bSYour Name 				 * but is yet to replenish it back to HW.
3213*5113495bSYour Name 				 */
3214*5113495bSYour Name 				if (qdf_unlikely(!rx_desc->in_use) ||
3215*5113495bSYour Name 				    qdf_unlikely(!msdu)) {
3216*5113495bSYour Name 					dp_rx_err_info_rl("Reaping rx_desc not in use!");
3217*5113495bSYour Name 					continue;
3218*5113495bSYour Name 				}
3219*5113495bSYour Name 
3220*5113495bSYour Name 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
3221*5113495bSYour Name 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3222*5113495bSYour Name 				rx_desc->unmapped = 1;
3223*5113495bSYour Name 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3224*5113495bSYour Name 
3225*5113495bSYour Name 				dp_rx_buffer_pool_nbuf_free(soc, msdu,
3226*5113495bSYour Name 							    rx_desc->pool_id);
3227*5113495bSYour Name 				rx_bufs_used[rx_desc->pool_id]++;
3228*5113495bSYour Name 				dp_rx_add_to_free_desc_list(head,
3229*5113495bSYour Name 							    tail, rx_desc);
3230*5113495bSYour Name 			}
3231*5113495bSYour Name 		}
3232*5113495bSYour Name 
3233*5113495bSYour Name 		/*
3234*5113495bSYour Name 		 * Store the current link buffer into to the local structure
3235*5113495bSYour Name 		 * to be used for release purpose.
3236*5113495bSYour Name 		 */
3237*5113495bSYour Name 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3238*5113495bSYour Name 					     buf_info.paddr, buf_info.sw_cookie,
3239*5113495bSYour Name 					     buf_info.rbm);
3240*5113495bSYour Name 
3241*5113495bSYour Name 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3242*5113495bSYour Name 					      &buf_info);
3243*5113495bSYour Name 		dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3244*5113495bSYour Name 					rx_link_buf_info,
3245*5113495bSYour Name 				       HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3246*5113495bSYour Name 	} while (buf_info.paddr);
3247*5113495bSYour Name }
3248*5113495bSYour Name 
3249*5113495bSYour Name void
dp_handle_wbm_internal_error(struct dp_soc * soc,void * hal_desc,uint32_t buf_type)3250*5113495bSYour Name dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3251*5113495bSYour Name 			     uint32_t buf_type)
3252*5113495bSYour Name {
3253*5113495bSYour Name 	struct hal_buf_info buf_info = {0};
3254*5113495bSYour Name 	struct dp_rx_desc *rx_desc = NULL;
3255*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
3256*5113495bSYour Name 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3257*5113495bSYour Name 	union dp_rx_desc_list_elem_t *head = NULL;
3258*5113495bSYour Name 	union dp_rx_desc_list_elem_t *tail = NULL;
3259*5113495bSYour Name 	uint8_t pool_id;
3260*5113495bSYour Name 	uint8_t mac_id;
3261*5113495bSYour Name 
3262*5113495bSYour Name 	hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3263*5113495bSYour Name 
3264*5113495bSYour Name 	if (!buf_info.paddr) {
3265*5113495bSYour Name 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3266*5113495bSYour Name 		return;
3267*5113495bSYour Name 	}
3268*5113495bSYour Name 
3269*5113495bSYour Name 	/* buffer_addr_info is the first element of ring_desc */
3270*5113495bSYour Name 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3271*5113495bSYour Name 				  &buf_info);
3272*5113495bSYour Name 
3273*5113495bSYour Name 	if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3274*5113495bSYour Name 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3275*5113495bSYour Name 		rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3276*5113495bSYour Name 							soc,
3277*5113495bSYour Name 							buf_info.sw_cookie);
3278*5113495bSYour Name 
3279*5113495bSYour Name 		if (rx_desc && rx_desc->nbuf) {
3280*5113495bSYour Name 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3281*5113495bSYour Name 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
3282*5113495bSYour Name 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3283*5113495bSYour Name 					      rx_desc->nbuf);
3284*5113495bSYour Name 			rx_desc->unmapped = 1;
3285*5113495bSYour Name 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3286*5113495bSYour Name 
3287*5113495bSYour Name 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3288*5113495bSYour Name 						    rx_desc->pool_id);
3289*5113495bSYour Name 			dp_rx_add_to_free_desc_list(&head,
3290*5113495bSYour Name 						    &tail,
3291*5113495bSYour Name 						    rx_desc);
3292*5113495bSYour Name 
3293*5113495bSYour Name 			rx_bufs_reaped[rx_desc->pool_id]++;
3294*5113495bSYour Name 		}
3295*5113495bSYour Name 	} else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3296*5113495bSYour Name 		pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3297*5113495bSYour Name 
3298*5113495bSYour Name 		dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3299*5113495bSYour Name 					&head, &tail, rx_bufs_reaped);
3300*5113495bSYour Name 	}
3301*5113495bSYour Name 
3302*5113495bSYour Name 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3303*5113495bSYour Name 		struct rx_desc_pool *rx_desc_pool;
3304*5113495bSYour Name 		struct dp_srng *dp_rxdma_srng;
3305*5113495bSYour Name 
3306*5113495bSYour Name 		if (!rx_bufs_reaped[mac_id])
3307*5113495bSYour Name 			continue;
3308*5113495bSYour Name 
3309*5113495bSYour Name 		DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3310*5113495bSYour Name 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3311*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
3312*5113495bSYour Name 
3313*5113495bSYour Name 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3314*5113495bSYour Name 					rx_desc_pool,
3315*5113495bSYour Name 					rx_bufs_reaped[mac_id],
3316*5113495bSYour Name 					&head, &tail, false);
3317*5113495bSYour Name 	}
3318*5113495bSYour Name }
3319*5113495bSYour Name 
3320*5113495bSYour Name #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3321