xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "hal_hw_headers.h"
21*5113495bSYour Name #ifndef RX_DEFRAG_DO_NOT_REINJECT
22*5113495bSYour Name #ifndef DP_BE_WAR
23*5113495bSYour Name #include "li/hal_li_rx.h"
24*5113495bSYour Name #endif
25*5113495bSYour Name #endif
26*5113495bSYour Name #include "dp_types.h"
27*5113495bSYour Name #include "dp_rx.h"
28*5113495bSYour Name #include "dp_peer.h"
29*5113495bSYour Name #include "hal_api.h"
30*5113495bSYour Name #include "qdf_trace.h"
31*5113495bSYour Name #include "qdf_nbuf.h"
32*5113495bSYour Name #include "dp_internal.h"
33*5113495bSYour Name #include "dp_rx_defrag.h"
34*5113495bSYour Name #include <enet.h>	/* LLC_SNAP_HDR_LEN */
35*5113495bSYour Name #include "dp_rx_defrag.h"
36*5113495bSYour Name #include "dp_ipa.h"
37*5113495bSYour Name #include "dp_rx_buffer_pool.h"
38*5113495bSYour Name 
39*5113495bSYour Name const struct dp_rx_defrag_cipher dp_f_ccmp = {
40*5113495bSYour Name 	"AES-CCM",
41*5113495bSYour Name 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
42*5113495bSYour Name 	IEEE80211_WEP_MICLEN,
43*5113495bSYour Name 	0,
44*5113495bSYour Name };
45*5113495bSYour Name 
46*5113495bSYour Name const struct dp_rx_defrag_cipher dp_f_tkip = {
47*5113495bSYour Name 	"TKIP",
48*5113495bSYour Name 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
49*5113495bSYour Name 	IEEE80211_WEP_CRCLEN,
50*5113495bSYour Name 	IEEE80211_WEP_MICLEN,
51*5113495bSYour Name };
52*5113495bSYour Name 
53*5113495bSYour Name const struct dp_rx_defrag_cipher dp_f_wep = {
54*5113495bSYour Name 	"WEP",
55*5113495bSYour Name 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
56*5113495bSYour Name 	IEEE80211_WEP_CRCLEN,
57*5113495bSYour Name 	0,
58*5113495bSYour Name };
59*5113495bSYour Name 
60*5113495bSYour Name /*
61*5113495bSYour Name  * The header and mic length are same for both
62*5113495bSYour Name  * GCMP-128 and GCMP-256.
63*5113495bSYour Name  */
64*5113495bSYour Name const struct dp_rx_defrag_cipher dp_f_gcmp = {
65*5113495bSYour Name 	"AES-GCMP",
66*5113495bSYour Name 	WLAN_IEEE80211_GCMP_HEADERLEN,
67*5113495bSYour Name 	WLAN_IEEE80211_GCMP_MICLEN,
68*5113495bSYour Name 	WLAN_IEEE80211_GCMP_MICLEN,
69*5113495bSYour Name };
70*5113495bSYour Name 
71*5113495bSYour Name /**
72*5113495bSYour Name  * dp_rx_defrag_frames_free() - Free fragment chain
73*5113495bSYour Name  * @frames: Fragment chain
74*5113495bSYour Name  *
75*5113495bSYour Name  * Iterates through the fragment chain and frees them
76*5113495bSYour Name  * Return: None
77*5113495bSYour Name  */
dp_rx_defrag_frames_free(qdf_nbuf_t frames)78*5113495bSYour Name static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
79*5113495bSYour Name {
80*5113495bSYour Name 	qdf_nbuf_t next, frag = frames;
81*5113495bSYour Name 
82*5113495bSYour Name 	while (frag) {
83*5113495bSYour Name 		next = qdf_nbuf_next(frag);
84*5113495bSYour Name 		dp_rx_nbuf_free(frag);
85*5113495bSYour Name 		frag = next;
86*5113495bSYour Name 	}
87*5113495bSYour Name }
88*5113495bSYour Name 
89*5113495bSYour Name #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
90*5113495bSYour Name /**
91*5113495bSYour Name  * dp_rx_clear_saved_desc_info() - Clears descriptor info
92*5113495bSYour Name  * @txrx_peer: Pointer to the peer data structure
93*5113495bSYour Name  * @tid: Transmit ID (TID)
94*5113495bSYour Name  *
95*5113495bSYour Name  * Saves MPDU descriptor info and MSDU link pointer from REO
96*5113495bSYour Name  * ring descriptor. The cache is created per peer, per TID
97*5113495bSYour Name  *
98*5113495bSYour Name  * Return: None
99*5113495bSYour Name  */
dp_rx_clear_saved_desc_info(struct dp_txrx_peer * txrx_peer,unsigned int tid)100*5113495bSYour Name static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
101*5113495bSYour Name 					unsigned int tid)
102*5113495bSYour Name {
103*5113495bSYour Name 	if (txrx_peer->rx_tid[tid].dst_ring_desc)
104*5113495bSYour Name 		qdf_mem_free(txrx_peer->rx_tid[tid].dst_ring_desc);
105*5113495bSYour Name 
106*5113495bSYour Name 	txrx_peer->rx_tid[tid].dst_ring_desc = NULL;
107*5113495bSYour Name 	txrx_peer->rx_tid[tid].head_frag_desc = NULL;
108*5113495bSYour Name }
109*5113495bSYour Name 
dp_rx_return_head_frag_desc(struct dp_txrx_peer * txrx_peer,unsigned int tid)110*5113495bSYour Name static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
111*5113495bSYour Name 					unsigned int tid)
112*5113495bSYour Name {
113*5113495bSYour Name 	struct dp_soc *soc;
114*5113495bSYour Name 	struct dp_pdev *pdev;
115*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
116*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
117*5113495bSYour Name 	union dp_rx_desc_list_elem_t *head = NULL;
118*5113495bSYour Name 	union dp_rx_desc_list_elem_t *tail = NULL;
119*5113495bSYour Name 	uint8_t pool_id;
120*5113495bSYour Name 
121*5113495bSYour Name 	pdev = txrx_peer->vdev->pdev;
122*5113495bSYour Name 	soc = pdev->soc;
123*5113495bSYour Name 
124*5113495bSYour Name 	if (txrx_peer->rx_tid[tid].head_frag_desc) {
125*5113495bSYour Name 		pool_id = txrx_peer->rx_tid[tid].head_frag_desc->pool_id;
126*5113495bSYour Name 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
127*5113495bSYour Name 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
128*5113495bSYour Name 
129*5113495bSYour Name 		dp_rx_add_to_free_desc_list(&head, &tail,
130*5113495bSYour Name 					    txrx_peer->rx_tid[tid].head_frag_desc);
131*5113495bSYour Name 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
132*5113495bSYour Name 					1, &head, &tail, false);
133*5113495bSYour Name 	}
134*5113495bSYour Name 
135*5113495bSYour Name 	if (txrx_peer->rx_tid[tid].dst_ring_desc) {
136*5113495bSYour Name 		if (dp_rx_link_desc_return(soc,
137*5113495bSYour Name 					   txrx_peer->rx_tid[tid].dst_ring_desc,
138*5113495bSYour Name 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
139*5113495bSYour Name 		    QDF_STATUS_SUCCESS)
140*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
141*5113495bSYour Name 				  "%s: Failed to return link desc", __func__);
142*5113495bSYour Name 	}
143*5113495bSYour Name }
144*5113495bSYour Name #else
145*5113495bSYour Name 
dp_rx_clear_saved_desc_info(struct dp_txrx_peer * txrx_peer,unsigned int tid)146*5113495bSYour Name static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
147*5113495bSYour Name 					unsigned int tid)
148*5113495bSYour Name {
149*5113495bSYour Name }
150*5113495bSYour Name 
dp_rx_return_head_frag_desc(struct dp_txrx_peer * txrx_peer,unsigned int tid)151*5113495bSYour Name static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
152*5113495bSYour Name 					unsigned int tid)
153*5113495bSYour Name {
154*5113495bSYour Name }
155*5113495bSYour Name #endif /* WLAN_SOFTUMAC_SUPPORT */
156*5113495bSYour Name 
dp_rx_reorder_flush_frag(struct dp_txrx_peer * txrx_peer,unsigned int tid)157*5113495bSYour Name void dp_rx_reorder_flush_frag(struct dp_txrx_peer *txrx_peer,
158*5113495bSYour Name 			      unsigned int tid)
159*5113495bSYour Name {
160*5113495bSYour Name 	dp_info_rl("Flushing TID %d", tid);
161*5113495bSYour Name 
162*5113495bSYour Name 	if (!txrx_peer) {
163*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
164*5113495bSYour Name 					"%s: NULL peer", __func__);
165*5113495bSYour Name 		return;
166*5113495bSYour Name 	}
167*5113495bSYour Name 
168*5113495bSYour Name 	dp_rx_return_head_frag_desc(txrx_peer, tid);
169*5113495bSYour Name 	dp_rx_defrag_cleanup(txrx_peer, tid);
170*5113495bSYour Name }
171*5113495bSYour Name 
dp_rx_defrag_waitlist_flush(struct dp_soc * soc)172*5113495bSYour Name void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
173*5113495bSYour Name {
174*5113495bSYour Name 	struct dp_rx_tid_defrag *waitlist_elem = NULL;
175*5113495bSYour Name 	struct dp_rx_tid_defrag *tmp;
176*5113495bSYour Name 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
177*5113495bSYour Name 	TAILQ_HEAD(, dp_rx_tid_defrag) temp_list;
178*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
179*5113495bSYour Name 
180*5113495bSYour Name 	TAILQ_INIT(&temp_list);
181*5113495bSYour Name 
182*5113495bSYour Name 	dp_debug("Current time  %u", now_ms);
183*5113495bSYour Name 
184*5113495bSYour Name 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
185*5113495bSYour Name 	TAILQ_FOREACH_SAFE(waitlist_elem, &soc->rx.defrag.waitlist,
186*5113495bSYour Name 			   defrag_waitlist_elem, tmp) {
187*5113495bSYour Name 		uint32_t tid;
188*5113495bSYour Name 
189*5113495bSYour Name 		if (waitlist_elem->defrag_timeout_ms > now_ms)
190*5113495bSYour Name 			break;
191*5113495bSYour Name 
192*5113495bSYour Name 		tid = waitlist_elem->tid;
193*5113495bSYour Name 		if (tid >= DP_MAX_TIDS) {
194*5113495bSYour Name 			qdf_assert(0);
195*5113495bSYour Name 			continue;
196*5113495bSYour Name 		}
197*5113495bSYour Name 
198*5113495bSYour Name 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, waitlist_elem,
199*5113495bSYour Name 			     defrag_waitlist_elem);
200*5113495bSYour Name 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
201*5113495bSYour Name 
202*5113495bSYour Name 		/* Move to temp list and clean-up later */
203*5113495bSYour Name 		TAILQ_INSERT_TAIL(&temp_list, waitlist_elem,
204*5113495bSYour Name 				  defrag_waitlist_elem);
205*5113495bSYour Name 	}
206*5113495bSYour Name 	if (waitlist_elem) {
207*5113495bSYour Name 		soc->rx.defrag.next_flush_ms =
208*5113495bSYour Name 			waitlist_elem->defrag_timeout_ms;
209*5113495bSYour Name 	} else {
210*5113495bSYour Name 		soc->rx.defrag.next_flush_ms =
211*5113495bSYour Name 			now_ms + soc->rx.defrag.timeout_ms;
212*5113495bSYour Name 	}
213*5113495bSYour Name 
214*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
215*5113495bSYour Name 
216*5113495bSYour Name 	TAILQ_FOREACH_SAFE(waitlist_elem, &temp_list,
217*5113495bSYour Name 			   defrag_waitlist_elem, tmp) {
218*5113495bSYour Name 		struct dp_txrx_peer *txrx_peer, *temp_peer = NULL;
219*5113495bSYour Name 
220*5113495bSYour Name 		qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
221*5113495bSYour Name 		TAILQ_REMOVE(&temp_list, waitlist_elem,
222*5113495bSYour Name 			     defrag_waitlist_elem);
223*5113495bSYour Name 		/* get address of current peer */
224*5113495bSYour Name 		txrx_peer = waitlist_elem->defrag_peer;
225*5113495bSYour Name 		qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
226*5113495bSYour Name 
227*5113495bSYour Name 		temp_peer = dp_txrx_peer_get_ref_by_id(soc, txrx_peer->peer_id,
228*5113495bSYour Name 						       &txrx_ref_handle,
229*5113495bSYour Name 						       DP_MOD_ID_RX_ERR);
230*5113495bSYour Name 		if (temp_peer == txrx_peer) {
231*5113495bSYour Name 			qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
232*5113495bSYour Name 			dp_rx_reorder_flush_frag(txrx_peer, waitlist_elem->tid);
233*5113495bSYour Name 			qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
234*5113495bSYour Name 		}
235*5113495bSYour Name 
236*5113495bSYour Name 		if (temp_peer)
237*5113495bSYour Name 			dp_txrx_peer_unref_delete(txrx_ref_handle,
238*5113495bSYour Name 						  DP_MOD_ID_RX_ERR);
239*5113495bSYour Name 
240*5113495bSYour Name 	}
241*5113495bSYour Name }
242*5113495bSYour Name 
dp_rx_defrag_waitlist_add(struct dp_txrx_peer * txrx_peer,unsigned int tid)243*5113495bSYour Name void dp_rx_defrag_waitlist_add(struct dp_txrx_peer *txrx_peer,
244*5113495bSYour Name 			       unsigned int tid)
245*5113495bSYour Name {
246*5113495bSYour Name 	struct dp_soc *psoc = txrx_peer->vdev->pdev->soc;
247*5113495bSYour Name 	struct dp_rx_tid_defrag *waitlist_elem = &txrx_peer->rx_tid[tid];
248*5113495bSYour Name 
249*5113495bSYour Name 	dp_debug("Adding TID %u to waitlist for peer %pK with peer_id = %d ",
250*5113495bSYour Name 		 tid, txrx_peer, txrx_peer->peer_id);
251*5113495bSYour Name 
252*5113495bSYour Name 	/* TODO: use LIST macros instead of TAIL macros */
253*5113495bSYour Name 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
254*5113495bSYour Name 	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
255*5113495bSYour Name 		psoc->rx.defrag.next_flush_ms =
256*5113495bSYour Name 			waitlist_elem->defrag_timeout_ms;
257*5113495bSYour Name 
258*5113495bSYour Name 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, waitlist_elem,
259*5113495bSYour Name 			  defrag_waitlist_elem);
260*5113495bSYour Name 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
261*5113495bSYour Name 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
262*5113495bSYour Name }
263*5113495bSYour Name 
dp_rx_defrag_waitlist_remove(struct dp_txrx_peer * txrx_peer,unsigned int tid)264*5113495bSYour Name void dp_rx_defrag_waitlist_remove(struct dp_txrx_peer *txrx_peer,
265*5113495bSYour Name 				  unsigned int tid)
266*5113495bSYour Name {
267*5113495bSYour Name 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
268*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
269*5113495bSYour Name 	struct dp_rx_tid_defrag *waitlist_elm;
270*5113495bSYour Name 	struct dp_rx_tid_defrag *tmp;
271*5113495bSYour Name 
272*5113495bSYour Name 	dp_debug("Removing TID %u to waitlist for peer %pK peer_id = %d ",
273*5113495bSYour Name 		 tid, txrx_peer, txrx_peer->peer_id);
274*5113495bSYour Name 
275*5113495bSYour Name 	if (tid >= DP_MAX_TIDS) {
276*5113495bSYour Name 		dp_err("TID out of bounds: %d", tid);
277*5113495bSYour Name 		qdf_assert_always(0);
278*5113495bSYour Name 	}
279*5113495bSYour Name 
280*5113495bSYour Name 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
281*5113495bSYour Name 	TAILQ_FOREACH_SAFE(waitlist_elm, &soc->rx.defrag.waitlist,
282*5113495bSYour Name 			   defrag_waitlist_elem, tmp) {
283*5113495bSYour Name 		struct dp_txrx_peer *peer_on_waitlist;
284*5113495bSYour Name 
285*5113495bSYour Name 		/* get address of current peer */
286*5113495bSYour Name 		peer_on_waitlist = waitlist_elm->defrag_peer;
287*5113495bSYour Name 
288*5113495bSYour Name 		/* Ensure it is TID for same peer */
289*5113495bSYour Name 		if (peer_on_waitlist == txrx_peer && waitlist_elm->tid == tid) {
290*5113495bSYour Name 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
291*5113495bSYour Name 				     waitlist_elm, defrag_waitlist_elem);
292*5113495bSYour Name 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
293*5113495bSYour Name 		}
294*5113495bSYour Name 	}
295*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
296*5113495bSYour Name }
297*5113495bSYour Name 
298*5113495bSYour Name QDF_STATUS
dp_rx_defrag_fraglist_insert(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t * head_addr,qdf_nbuf_t * tail_addr,qdf_nbuf_t frag,uint8_t * all_frag_present)299*5113495bSYour Name dp_rx_defrag_fraglist_insert(struct dp_txrx_peer *txrx_peer, unsigned int tid,
300*5113495bSYour Name 			     qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr,
301*5113495bSYour Name 			     qdf_nbuf_t frag, uint8_t *all_frag_present)
302*5113495bSYour Name {
303*5113495bSYour Name 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
304*5113495bSYour Name 	qdf_nbuf_t next;
305*5113495bSYour Name 	qdf_nbuf_t prev = NULL;
306*5113495bSYour Name 	qdf_nbuf_t cur;
307*5113495bSYour Name 	uint16_t head_fragno, cur_fragno, next_fragno;
308*5113495bSYour Name 	uint8_t last_morefrag = 1, count = 0;
309*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
310*5113495bSYour Name 	uint8_t *rx_desc_info;
311*5113495bSYour Name 
312*5113495bSYour Name 	qdf_assert(frag);
313*5113495bSYour Name 	qdf_assert(head_addr);
314*5113495bSYour Name 	qdf_assert(tail_addr);
315*5113495bSYour Name 
316*5113495bSYour Name 	*all_frag_present = 0;
317*5113495bSYour Name 	rx_desc_info = qdf_nbuf_data(frag);
318*5113495bSYour Name 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc_info);
319*5113495bSYour Name 
320*5113495bSYour Name 	dp_debug("cur_fragno %d", cur_fragno);
321*5113495bSYour Name 	/* If this is the first fragment */
322*5113495bSYour Name 	if (!(*head_addr)) {
323*5113495bSYour Name 		*head_addr = *tail_addr = frag;
324*5113495bSYour Name 		qdf_nbuf_set_next(*tail_addr, NULL);
325*5113495bSYour Name 		rx_tid->curr_frag_num = cur_fragno;
326*5113495bSYour Name 
327*5113495bSYour Name 		goto insert_done;
328*5113495bSYour Name 	}
329*5113495bSYour Name 
330*5113495bSYour Name 	/* In sequence fragment */
331*5113495bSYour Name 	if (cur_fragno > rx_tid->curr_frag_num) {
332*5113495bSYour Name 		qdf_nbuf_set_next(*tail_addr, frag);
333*5113495bSYour Name 		*tail_addr = frag;
334*5113495bSYour Name 		qdf_nbuf_set_next(*tail_addr, NULL);
335*5113495bSYour Name 		rx_tid->curr_frag_num = cur_fragno;
336*5113495bSYour Name 	} else {
337*5113495bSYour Name 		/* Out of sequence fragment */
338*5113495bSYour Name 		cur = *head_addr;
339*5113495bSYour Name 		rx_desc_info = qdf_nbuf_data(cur);
340*5113495bSYour Name 		head_fragno = dp_rx_frag_get_mpdu_frag_number(soc,
341*5113495bSYour Name 							      rx_desc_info);
342*5113495bSYour Name 
343*5113495bSYour Name 		if (cur_fragno == head_fragno) {
344*5113495bSYour Name 			dp_rx_nbuf_free(frag);
345*5113495bSYour Name 			goto insert_fail;
346*5113495bSYour Name 		} else if (head_fragno > cur_fragno) {
347*5113495bSYour Name 			qdf_nbuf_set_next(frag, cur);
348*5113495bSYour Name 			cur = frag;
349*5113495bSYour Name 			*head_addr = frag; /* head pointer to be updated */
350*5113495bSYour Name 		} else {
351*5113495bSYour Name 			while ((cur_fragno > head_fragno) && cur) {
352*5113495bSYour Name 				prev = cur;
353*5113495bSYour Name 				cur = qdf_nbuf_next(cur);
354*5113495bSYour Name 				if (cur) {
355*5113495bSYour Name 					rx_desc_info = qdf_nbuf_data(cur);
356*5113495bSYour Name 					head_fragno =
357*5113495bSYour Name 						dp_rx_frag_get_mpdu_frag_number(
358*5113495bSYour Name 								soc,
359*5113495bSYour Name 								rx_desc_info);
360*5113495bSYour Name 				}
361*5113495bSYour Name 			}
362*5113495bSYour Name 
363*5113495bSYour Name 			if (cur_fragno == head_fragno) {
364*5113495bSYour Name 				dp_rx_nbuf_free(frag);
365*5113495bSYour Name 				goto insert_fail;
366*5113495bSYour Name 			}
367*5113495bSYour Name 
368*5113495bSYour Name 			qdf_nbuf_set_next(prev, frag);
369*5113495bSYour Name 			qdf_nbuf_set_next(frag, cur);
370*5113495bSYour Name 		}
371*5113495bSYour Name 	}
372*5113495bSYour Name 
373*5113495bSYour Name 	next = qdf_nbuf_next(*head_addr);
374*5113495bSYour Name 
375*5113495bSYour Name 	rx_desc_info = qdf_nbuf_data(*tail_addr);
376*5113495bSYour Name 	last_morefrag = dp_rx_frag_get_more_frag_bit(soc, rx_desc_info);
377*5113495bSYour Name 
378*5113495bSYour Name 	/* TODO: optimize the loop */
379*5113495bSYour Name 	if (!last_morefrag) {
380*5113495bSYour Name 		/* Check if all fragments are present */
381*5113495bSYour Name 		do {
382*5113495bSYour Name 			rx_desc_info = qdf_nbuf_data(next);
383*5113495bSYour Name 			next_fragno =
384*5113495bSYour Name 				dp_rx_frag_get_mpdu_frag_number(soc,
385*5113495bSYour Name 								rx_desc_info);
386*5113495bSYour Name 			count++;
387*5113495bSYour Name 
388*5113495bSYour Name 			if (next_fragno != count)
389*5113495bSYour Name 				break;
390*5113495bSYour Name 
391*5113495bSYour Name 			next = qdf_nbuf_next(next);
392*5113495bSYour Name 		} while (next);
393*5113495bSYour Name 
394*5113495bSYour Name 		if (!next) {
395*5113495bSYour Name 			*all_frag_present = 1;
396*5113495bSYour Name 			return QDF_STATUS_SUCCESS;
397*5113495bSYour Name 		} else {
398*5113495bSYour Name 			/* revisit */
399*5113495bSYour Name 		}
400*5113495bSYour Name 	}
401*5113495bSYour Name 
402*5113495bSYour Name insert_done:
403*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
404*5113495bSYour Name 
405*5113495bSYour Name insert_fail:
406*5113495bSYour Name 	return QDF_STATUS_E_FAILURE;
407*5113495bSYour Name }
408*5113495bSYour Name 
409*5113495bSYour Name 
410*5113495bSYour Name /**
411*5113495bSYour Name  * dp_rx_defrag_tkip_decap() - decap tkip encrypted fragment
412*5113495bSYour Name  * @soc: DP SOC
413*5113495bSYour Name  * @msdu: Pointer to the fragment
414*5113495bSYour Name  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
415*5113495bSYour Name  *
416*5113495bSYour Name  * decap tkip encrypted fragment
417*5113495bSYour Name  *
418*5113495bSYour Name  * Return: QDF_STATUS
419*5113495bSYour Name  */
420*5113495bSYour Name static QDF_STATUS
dp_rx_defrag_tkip_decap(struct dp_soc * soc,qdf_nbuf_t msdu,uint16_t hdrlen)421*5113495bSYour Name dp_rx_defrag_tkip_decap(struct dp_soc *soc,
422*5113495bSYour Name 			qdf_nbuf_t msdu, uint16_t hdrlen)
423*5113495bSYour Name {
424*5113495bSYour Name 	uint8_t *ivp, *orig_hdr;
425*5113495bSYour Name 	int rx_desc_len = soc->rx_pkt_tlv_size;
426*5113495bSYour Name 
427*5113495bSYour Name 	/* start of 802.11 header info */
428*5113495bSYour Name 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
429*5113495bSYour Name 
430*5113495bSYour Name 	/* TKIP header is located post 802.11 header */
431*5113495bSYour Name 	ivp = orig_hdr + hdrlen;
432*5113495bSYour Name 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
433*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
434*5113495bSYour Name 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
435*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
436*5113495bSYour Name 	}
437*5113495bSYour Name 
438*5113495bSYour Name 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
439*5113495bSYour Name 
440*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
441*5113495bSYour Name }
442*5113495bSYour Name 
443*5113495bSYour Name /**
444*5113495bSYour Name  * dp_rx_defrag_ccmp_demic() - Remove MIC information from CCMP fragment
445*5113495bSYour Name  * @soc: DP SOC
446*5113495bSYour Name  * @nbuf: Pointer to the fragment buffer
447*5113495bSYour Name  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
448*5113495bSYour Name  *
449*5113495bSYour Name  * Remove MIC information from CCMP fragment
450*5113495bSYour Name  *
451*5113495bSYour Name  * Return: QDF_STATUS
452*5113495bSYour Name  */
453*5113495bSYour Name static QDF_STATUS
dp_rx_defrag_ccmp_demic(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrlen)454*5113495bSYour Name dp_rx_defrag_ccmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
455*5113495bSYour Name {
456*5113495bSYour Name 	uint8_t *ivp, *orig_hdr;
457*5113495bSYour Name 	int rx_desc_len = soc->rx_pkt_tlv_size;
458*5113495bSYour Name 
459*5113495bSYour Name 	/* start of the 802.11 header */
460*5113495bSYour Name 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
461*5113495bSYour Name 
462*5113495bSYour Name 	/* CCMP header is located after 802.11 header */
463*5113495bSYour Name 	ivp = orig_hdr + hdrlen;
464*5113495bSYour Name 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
465*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
466*5113495bSYour Name 
467*5113495bSYour Name 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
468*5113495bSYour Name 
469*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
470*5113495bSYour Name }
471*5113495bSYour Name 
472*5113495bSYour Name /**
473*5113495bSYour Name  * dp_rx_defrag_ccmp_decap() - decap CCMP encrypted fragment
474*5113495bSYour Name  * @soc: DP SOC
475*5113495bSYour Name  * @nbuf: Pointer to the fragment
476*5113495bSYour Name  * @hdrlen: length of the header information
477*5113495bSYour Name  *
478*5113495bSYour Name  * decap CCMP encrypted fragment
479*5113495bSYour Name  *
480*5113495bSYour Name  * Return: QDF_STATUS
481*5113495bSYour Name  */
482*5113495bSYour Name static QDF_STATUS
dp_rx_defrag_ccmp_decap(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrlen)483*5113495bSYour Name dp_rx_defrag_ccmp_decap(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
484*5113495bSYour Name {
485*5113495bSYour Name 	uint8_t *ivp, *origHdr;
486*5113495bSYour Name 	int rx_desc_len = soc->rx_pkt_tlv_size;
487*5113495bSYour Name 
488*5113495bSYour Name 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
489*5113495bSYour Name 	ivp = origHdr + hdrlen;
490*5113495bSYour Name 
491*5113495bSYour Name 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
492*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
493*5113495bSYour Name 
494*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
495*5113495bSYour Name }
496*5113495bSYour Name 
497*5113495bSYour Name /**
498*5113495bSYour Name  * dp_rx_defrag_wep_decap() - decap WEP encrypted fragment
499*5113495bSYour Name  * @soc: DP SOC
500*5113495bSYour Name  * @msdu: Pointer to the fragment
501*5113495bSYour Name  * @hdrlen: length of the header information
502*5113495bSYour Name  *
503*5113495bSYour Name  * decap WEP encrypted fragment
504*5113495bSYour Name  *
505*5113495bSYour Name  * Return: QDF_STATUS
506*5113495bSYour Name  */
507*5113495bSYour Name static QDF_STATUS
dp_rx_defrag_wep_decap(struct dp_soc * soc,qdf_nbuf_t msdu,uint16_t hdrlen)508*5113495bSYour Name dp_rx_defrag_wep_decap(struct dp_soc *soc, qdf_nbuf_t msdu, uint16_t hdrlen)
509*5113495bSYour Name {
510*5113495bSYour Name 	uint8_t *origHdr;
511*5113495bSYour Name 	int rx_desc_len = soc->rx_pkt_tlv_size;
512*5113495bSYour Name 
513*5113495bSYour Name 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
514*5113495bSYour Name 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
515*5113495bSYour Name 
516*5113495bSYour Name 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
517*5113495bSYour Name 
518*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
519*5113495bSYour Name }
520*5113495bSYour Name 
521*5113495bSYour Name /**
522*5113495bSYour Name  * dp_rx_defrag_hdrsize() - Calculate the header size of the received fragment
523*5113495bSYour Name  * @soc: soc handle
524*5113495bSYour Name  * @nbuf: Pointer to the fragment
525*5113495bSYour Name  *
526*5113495bSYour Name  * Calculate the header size of the received fragment
527*5113495bSYour Name  *
528*5113495bSYour Name  * Return: header size (uint16_t)
529*5113495bSYour Name  */
dp_rx_defrag_hdrsize(struct dp_soc * soc,qdf_nbuf_t nbuf)530*5113495bSYour Name static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf)
531*5113495bSYour Name {
532*5113495bSYour Name 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
533*5113495bSYour Name 	uint16_t size = sizeof(struct ieee80211_frame);
534*5113495bSYour Name 	uint16_t fc = 0;
535*5113495bSYour Name 	uint32_t to_ds, fr_ds;
536*5113495bSYour Name 	uint8_t frm_ctrl_valid;
537*5113495bSYour Name 	uint16_t frm_ctrl_field;
538*5113495bSYour Name 
539*5113495bSYour Name 	to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr);
540*5113495bSYour Name 	fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr);
541*5113495bSYour Name 	frm_ctrl_valid =
542*5113495bSYour Name 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
543*5113495bSYour Name 						    rx_tlv_hdr);
544*5113495bSYour Name 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_tlv_hdr);
545*5113495bSYour Name 
546*5113495bSYour Name 	if (to_ds && fr_ds)
547*5113495bSYour Name 		size += QDF_MAC_ADDR_SIZE;
548*5113495bSYour Name 
549*5113495bSYour Name 	if (frm_ctrl_valid) {
550*5113495bSYour Name 		fc = frm_ctrl_field;
551*5113495bSYour Name 
552*5113495bSYour Name 		/* use 1-st byte for validation */
553*5113495bSYour Name 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
554*5113495bSYour Name 			size += sizeof(uint16_t);
555*5113495bSYour Name 			/* use 2-nd byte for validation */
556*5113495bSYour Name 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
557*5113495bSYour Name 				size += sizeof(struct ieee80211_htc);
558*5113495bSYour Name 		}
559*5113495bSYour Name 	}
560*5113495bSYour Name 
561*5113495bSYour Name 	return size;
562*5113495bSYour Name }
563*5113495bSYour Name 
564*5113495bSYour Name /**
565*5113495bSYour Name  * dp_rx_defrag_michdr() - Calculate a pseudo MIC header
566*5113495bSYour Name  * @wh0: Pointer to the wireless header of the fragment
567*5113495bSYour Name  * @hdr: Array to hold the pseudo header
568*5113495bSYour Name  *
569*5113495bSYour Name  * Calculate a pseudo MIC header
570*5113495bSYour Name  *
571*5113495bSYour Name  * Return: None
572*5113495bSYour Name  */
dp_rx_defrag_michdr(const struct ieee80211_frame * wh0,uint8_t hdr[])573*5113495bSYour Name static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
574*5113495bSYour Name 				uint8_t hdr[])
575*5113495bSYour Name {
576*5113495bSYour Name 	const struct ieee80211_frame_addr4 *wh =
577*5113495bSYour Name 		(const struct ieee80211_frame_addr4 *)wh0;
578*5113495bSYour Name 
579*5113495bSYour Name 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
580*5113495bSYour Name 	case IEEE80211_FC1_DIR_NODS:
581*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
582*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
583*5113495bSYour Name 					   wh->i_addr2);
584*5113495bSYour Name 		break;
585*5113495bSYour Name 	case IEEE80211_FC1_DIR_TODS:
586*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
587*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
588*5113495bSYour Name 					   wh->i_addr2);
589*5113495bSYour Name 		break;
590*5113495bSYour Name 	case IEEE80211_FC1_DIR_FROMDS:
591*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
592*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
593*5113495bSYour Name 					   wh->i_addr3);
594*5113495bSYour Name 		break;
595*5113495bSYour Name 	case IEEE80211_FC1_DIR_DSTODS:
596*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
597*5113495bSYour Name 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
598*5113495bSYour Name 					   wh->i_addr4);
599*5113495bSYour Name 		break;
600*5113495bSYour Name 	}
601*5113495bSYour Name 
602*5113495bSYour Name 	/*
603*5113495bSYour Name 	 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but
604*5113495bSYour Name 	 * it could also be set for deauth, disassoc, action, etc. for
605*5113495bSYour Name 	 * a mgt type frame. It comes into picture for MFP.
606*5113495bSYour Name 	 */
607*5113495bSYour Name 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
608*5113495bSYour Name 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
609*5113495bSYour Name 				IEEE80211_FC1_DIR_DSTODS) {
610*5113495bSYour Name 			const struct ieee80211_qosframe_addr4 *qwh =
611*5113495bSYour Name 				(const struct ieee80211_qosframe_addr4 *)wh;
612*5113495bSYour Name 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
613*5113495bSYour Name 		} else {
614*5113495bSYour Name 			const struct ieee80211_qosframe *qwh =
615*5113495bSYour Name 				(const struct ieee80211_qosframe *)wh;
616*5113495bSYour Name 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
617*5113495bSYour Name 		}
618*5113495bSYour Name 	} else {
619*5113495bSYour Name 		hdr[12] = 0;
620*5113495bSYour Name 	}
621*5113495bSYour Name 
622*5113495bSYour Name 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
623*5113495bSYour Name }
624*5113495bSYour Name 
625*5113495bSYour Name /**
626*5113495bSYour Name  * dp_rx_defrag_mic() - Calculate MIC header
627*5113495bSYour Name  * @soc: DP SOC
628*5113495bSYour Name  * @key: Pointer to the key
629*5113495bSYour Name  * @wbuf: fragment buffer
630*5113495bSYour Name  * @off: Offset
631*5113495bSYour Name  * @data_len: Data length
632*5113495bSYour Name  * @mic: Array to hold MIC
633*5113495bSYour Name  *
634*5113495bSYour Name  * Calculate a pseudo MIC header
635*5113495bSYour Name  *
636*5113495bSYour Name  * Return: QDF_STATUS
637*5113495bSYour Name  */
dp_rx_defrag_mic(struct dp_soc * soc,const uint8_t * key,qdf_nbuf_t wbuf,uint16_t off,uint16_t data_len,uint8_t mic[])638*5113495bSYour Name static QDF_STATUS dp_rx_defrag_mic(struct dp_soc *soc, const uint8_t *key,
639*5113495bSYour Name 				   qdf_nbuf_t wbuf, uint16_t off,
640*5113495bSYour Name 				   uint16_t data_len, uint8_t mic[])
641*5113495bSYour Name {
642*5113495bSYour Name 	uint8_t hdr[16] = { 0, };
643*5113495bSYour Name 	uint32_t l, r;
644*5113495bSYour Name 	const uint8_t *data;
645*5113495bSYour Name 	uint32_t space;
646*5113495bSYour Name 	int rx_desc_len = soc->rx_pkt_tlv_size;
647*5113495bSYour Name 
648*5113495bSYour Name 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
649*5113495bSYour Name 		+ rx_desc_len), hdr);
650*5113495bSYour Name 
651*5113495bSYour Name 	l = dp_rx_get_le32(key);
652*5113495bSYour Name 	r = dp_rx_get_le32(key + 4);
653*5113495bSYour Name 
654*5113495bSYour Name 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
655*5113495bSYour Name 	l ^= dp_rx_get_le32(hdr);
656*5113495bSYour Name 	dp_rx_michael_block(l, r);
657*5113495bSYour Name 	l ^= dp_rx_get_le32(&hdr[4]);
658*5113495bSYour Name 	dp_rx_michael_block(l, r);
659*5113495bSYour Name 	l ^= dp_rx_get_le32(&hdr[8]);
660*5113495bSYour Name 	dp_rx_michael_block(l, r);
661*5113495bSYour Name 	l ^= dp_rx_get_le32(&hdr[12]);
662*5113495bSYour Name 	dp_rx_michael_block(l, r);
663*5113495bSYour Name 
664*5113495bSYour Name 	/* first buffer has special handling */
665*5113495bSYour Name 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
666*5113495bSYour Name 	space = qdf_nbuf_len(wbuf) - off;
667*5113495bSYour Name 
668*5113495bSYour Name 	for (;; ) {
669*5113495bSYour Name 		if (space > data_len)
670*5113495bSYour Name 			space = data_len;
671*5113495bSYour Name 
672*5113495bSYour Name 		/* collect 32-bit blocks from current buffer */
673*5113495bSYour Name 		while (space >= sizeof(uint32_t)) {
674*5113495bSYour Name 			l ^= dp_rx_get_le32(data);
675*5113495bSYour Name 			dp_rx_michael_block(l, r);
676*5113495bSYour Name 			data += sizeof(uint32_t);
677*5113495bSYour Name 			space -= sizeof(uint32_t);
678*5113495bSYour Name 			data_len -= sizeof(uint32_t);
679*5113495bSYour Name 		}
680*5113495bSYour Name 		if (data_len < sizeof(uint32_t))
681*5113495bSYour Name 			break;
682*5113495bSYour Name 
683*5113495bSYour Name 		wbuf = qdf_nbuf_next(wbuf);
684*5113495bSYour Name 		if (!wbuf)
685*5113495bSYour Name 			return QDF_STATUS_E_DEFRAG_ERROR;
686*5113495bSYour Name 
687*5113495bSYour Name 		if (space != 0) {
688*5113495bSYour Name 			const uint8_t *data_next;
689*5113495bSYour Name 			/*
690*5113495bSYour Name 			 * Block straddles buffers, split references.
691*5113495bSYour Name 			 */
692*5113495bSYour Name 			data_next =
693*5113495bSYour Name 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
694*5113495bSYour Name 			if ((qdf_nbuf_len(wbuf)) <
695*5113495bSYour Name 				sizeof(uint32_t) - space) {
696*5113495bSYour Name 				return QDF_STATUS_E_DEFRAG_ERROR;
697*5113495bSYour Name 			}
698*5113495bSYour Name 			switch (space) {
699*5113495bSYour Name 			case 1:
700*5113495bSYour Name 				l ^= dp_rx_get_le32_split(data[0],
701*5113495bSYour Name 					data_next[0], data_next[1],
702*5113495bSYour Name 					data_next[2]);
703*5113495bSYour Name 				data = data_next + 3;
704*5113495bSYour Name 				space = (qdf_nbuf_len(wbuf) - off) - 3;
705*5113495bSYour Name 				break;
706*5113495bSYour Name 			case 2:
707*5113495bSYour Name 				l ^= dp_rx_get_le32_split(data[0], data[1],
708*5113495bSYour Name 						    data_next[0], data_next[1]);
709*5113495bSYour Name 				data = data_next + 2;
710*5113495bSYour Name 				space = (qdf_nbuf_len(wbuf) - off) - 2;
711*5113495bSYour Name 				break;
712*5113495bSYour Name 			case 3:
713*5113495bSYour Name 				l ^= dp_rx_get_le32_split(data[0], data[1],
714*5113495bSYour Name 					data[2], data_next[0]);
715*5113495bSYour Name 				data = data_next + 1;
716*5113495bSYour Name 				space = (qdf_nbuf_len(wbuf) - off) - 1;
717*5113495bSYour Name 				break;
718*5113495bSYour Name 			}
719*5113495bSYour Name 			dp_rx_michael_block(l, r);
720*5113495bSYour Name 			data_len -= sizeof(uint32_t);
721*5113495bSYour Name 		} else {
722*5113495bSYour Name 			/*
723*5113495bSYour Name 			 * Setup for next buffer.
724*5113495bSYour Name 			 */
725*5113495bSYour Name 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
726*5113495bSYour Name 			space = qdf_nbuf_len(wbuf) - off;
727*5113495bSYour Name 		}
728*5113495bSYour Name 	}
729*5113495bSYour Name 	/* Last block and padding (0x5a, 4..7 x 0) */
730*5113495bSYour Name 	switch (data_len) {
731*5113495bSYour Name 	case 0:
732*5113495bSYour Name 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
733*5113495bSYour Name 		break;
734*5113495bSYour Name 	case 1:
735*5113495bSYour Name 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
736*5113495bSYour Name 		break;
737*5113495bSYour Name 	case 2:
738*5113495bSYour Name 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
739*5113495bSYour Name 		break;
740*5113495bSYour Name 	case 3:
741*5113495bSYour Name 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
742*5113495bSYour Name 		break;
743*5113495bSYour Name 	}
744*5113495bSYour Name 	dp_rx_michael_block(l, r);
745*5113495bSYour Name 	dp_rx_michael_block(l, r);
746*5113495bSYour Name 	dp_rx_put_le32(mic, l);
747*5113495bSYour Name 	dp_rx_put_le32(mic + 4, r);
748*5113495bSYour Name 
749*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
750*5113495bSYour Name }
751*5113495bSYour Name 
752*5113495bSYour Name /**
753*5113495bSYour Name  * dp_rx_defrag_tkip_demic() - Remove MIC header from the TKIP frame
754*5113495bSYour Name  * @soc: DP SOC
755*5113495bSYour Name  * @key: Pointer to the key
756*5113495bSYour Name  * @msdu: fragment buffer
757*5113495bSYour Name  * @hdrlen: Length of the header information
758*5113495bSYour Name  *
759*5113495bSYour Name  * Remove MIC information from the TKIP frame
760*5113495bSYour Name  *
761*5113495bSYour Name  * Return: QDF_STATUS
762*5113495bSYour Name  */
dp_rx_defrag_tkip_demic(struct dp_soc * soc,const uint8_t * key,qdf_nbuf_t msdu,uint16_t hdrlen)763*5113495bSYour Name static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
764*5113495bSYour Name 					  const uint8_t *key,
765*5113495bSYour Name 					  qdf_nbuf_t msdu, uint16_t hdrlen)
766*5113495bSYour Name {
767*5113495bSYour Name 	QDF_STATUS status;
768*5113495bSYour Name 	uint32_t pktlen = 0, prev_data_len;
769*5113495bSYour Name 	uint8_t mic[IEEE80211_WEP_MICLEN];
770*5113495bSYour Name 	uint8_t mic0[IEEE80211_WEP_MICLEN];
771*5113495bSYour Name 	qdf_nbuf_t prev = NULL, prev0, next;
772*5113495bSYour Name 	uint8_t len0 = 0;
773*5113495bSYour Name 
774*5113495bSYour Name 	next = msdu;
775*5113495bSYour Name 	prev0 = msdu;
776*5113495bSYour Name 	while (next) {
777*5113495bSYour Name 		pktlen += (qdf_nbuf_len(next) - hdrlen);
778*5113495bSYour Name 		prev = next;
779*5113495bSYour Name 		dp_debug("pktlen %u",
780*5113495bSYour Name 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
781*5113495bSYour Name 		next = qdf_nbuf_next(next);
782*5113495bSYour Name 		if (next && !qdf_nbuf_next(next))
783*5113495bSYour Name 			prev0 = prev;
784*5113495bSYour Name 	}
785*5113495bSYour Name 
786*5113495bSYour Name 	if (!prev) {
787*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
788*5113495bSYour Name 			  "%s Defrag chaining failed !\n", __func__);
789*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
790*5113495bSYour Name 	}
791*5113495bSYour Name 
792*5113495bSYour Name 	prev_data_len = qdf_nbuf_len(prev) - hdrlen;
793*5113495bSYour Name 	if (prev_data_len < dp_f_tkip.ic_miclen) {
794*5113495bSYour Name 		if (prev0 == prev) {
795*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
796*5113495bSYour Name 				  "%s Fragments don't have MIC header !\n", __func__);
797*5113495bSYour Name 			return QDF_STATUS_E_DEFRAG_ERROR;
798*5113495bSYour Name 		}
799*5113495bSYour Name 		len0 = dp_f_tkip.ic_miclen - (uint8_t)prev_data_len;
800*5113495bSYour Name 		qdf_nbuf_copy_bits(prev0, qdf_nbuf_len(prev0) - len0, len0,
801*5113495bSYour Name 				   (caddr_t)mic0);
802*5113495bSYour Name 		qdf_nbuf_trim_tail(prev0, len0);
803*5113495bSYour Name 	}
804*5113495bSYour Name 
805*5113495bSYour Name 	qdf_nbuf_copy_bits(prev, (qdf_nbuf_len(prev) -
806*5113495bSYour Name 			   (dp_f_tkip.ic_miclen - len0)),
807*5113495bSYour Name 			   (dp_f_tkip.ic_miclen - len0),
808*5113495bSYour Name 			   (caddr_t)(&mic0[len0]));
809*5113495bSYour Name 	qdf_nbuf_trim_tail(prev, (dp_f_tkip.ic_miclen - len0));
810*5113495bSYour Name 	pktlen -= dp_f_tkip.ic_miclen;
811*5113495bSYour Name 
812*5113495bSYour Name 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
813*5113495bSYour Name 		dp_rx_nbuf_free(prev);
814*5113495bSYour Name 		qdf_nbuf_set_next(prev0, NULL);
815*5113495bSYour Name 	}
816*5113495bSYour Name 
817*5113495bSYour Name 	status = dp_rx_defrag_mic(soc, key, msdu, hdrlen,
818*5113495bSYour Name 				  pktlen, mic);
819*5113495bSYour Name 
820*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status))
821*5113495bSYour Name 		return status;
822*5113495bSYour Name 
823*5113495bSYour Name 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
824*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
825*5113495bSYour Name 
826*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
827*5113495bSYour Name }
828*5113495bSYour Name 
829*5113495bSYour Name /**
830*5113495bSYour Name  * dp_rx_frag_pull_hdr() - Pulls the RXTLV & the 802.11 headers
831*5113495bSYour Name  * @soc: DP SOC
832*5113495bSYour Name  * @nbuf: buffer pointer
833*5113495bSYour Name  * @hdrsize: size of the header to be pulled
834*5113495bSYour Name  *
835*5113495bSYour Name  * Pull the RXTLV & the 802.11 headers
836*5113495bSYour Name  *
837*5113495bSYour Name  * Return: None
838*5113495bSYour Name  */
dp_rx_frag_pull_hdr(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrsize)839*5113495bSYour Name static void dp_rx_frag_pull_hdr(struct dp_soc *soc,
840*5113495bSYour Name 				qdf_nbuf_t nbuf, uint16_t hdrsize)
841*5113495bSYour Name {
842*5113495bSYour Name 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
843*5113495bSYour Name 
844*5113495bSYour Name 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + hdrsize);
845*5113495bSYour Name 
846*5113495bSYour Name 	dp_debug("final pktlen %d .11len %d",
847*5113495bSYour Name 		 (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
848*5113495bSYour Name }
849*5113495bSYour Name 
850*5113495bSYour Name /**
851*5113495bSYour Name  * dp_rx_defrag_pn_check() - Check the PN of current fragmented with prev PN
852*5113495bSYour Name  * @soc: DP SOC
853*5113495bSYour Name  * @msdu: msdu to get the current PN
854*5113495bSYour Name  * @cur_pn128: PN extracted from current msdu
855*5113495bSYour Name  * @prev_pn128: Prev PN
856*5113495bSYour Name  *
857*5113495bSYour Name  * Return: 0 on success, non zero on failure
858*5113495bSYour Name  */
dp_rx_defrag_pn_check(struct dp_soc * soc,qdf_nbuf_t msdu,uint64_t * cur_pn128,uint64_t * prev_pn128)859*5113495bSYour Name static int dp_rx_defrag_pn_check(struct dp_soc *soc, qdf_nbuf_t msdu,
860*5113495bSYour Name 				 uint64_t *cur_pn128, uint64_t *prev_pn128)
861*5113495bSYour Name {
862*5113495bSYour Name 	int out_of_order = 0;
863*5113495bSYour Name 
864*5113495bSYour Name 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(msdu), cur_pn128);
865*5113495bSYour Name 
866*5113495bSYour Name 	if (cur_pn128[1] == prev_pn128[1])
867*5113495bSYour Name 		out_of_order = (cur_pn128[0] - prev_pn128[0] != 1);
868*5113495bSYour Name 	else
869*5113495bSYour Name 		out_of_order = (cur_pn128[1] - prev_pn128[1] != 1);
870*5113495bSYour Name 
871*5113495bSYour Name 	return out_of_order;
872*5113495bSYour Name }
873*5113495bSYour Name 
874*5113495bSYour Name /**
875*5113495bSYour Name  * dp_rx_construct_fraglist() - Construct a nbuf fraglist
876*5113495bSYour Name  * @txrx_peer: Pointer to the txrx peer
877*5113495bSYour Name  * @tid: Transmit ID (TID)
878*5113495bSYour Name  * @head: Pointer to list of fragments
879*5113495bSYour Name  * @hdrsize: Size of the header to be pulled
880*5113495bSYour Name  *
881*5113495bSYour Name  * Construct a nbuf fraglist
882*5113495bSYour Name  *
883*5113495bSYour Name  * Return: None
884*5113495bSYour Name  */
885*5113495bSYour Name static int
dp_rx_construct_fraglist(struct dp_txrx_peer * txrx_peer,int tid,qdf_nbuf_t head,uint16_t hdrsize)886*5113495bSYour Name dp_rx_construct_fraglist(struct dp_txrx_peer *txrx_peer, int tid,
887*5113495bSYour Name 			 qdf_nbuf_t head,
888*5113495bSYour Name 			 uint16_t hdrsize)
889*5113495bSYour Name {
890*5113495bSYour Name 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
891*5113495bSYour Name 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
892*5113495bSYour Name 	qdf_nbuf_t rx_nbuf = msdu;
893*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
894*5113495bSYour Name 	uint32_t len = 0;
895*5113495bSYour Name 	uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2];
896*5113495bSYour Name 	int out_of_order = 0;
897*5113495bSYour Name 	int index;
898*5113495bSYour Name 	int needs_pn_check = 0;
899*5113495bSYour Name 	enum cdp_sec_type sec_type;
900*5113495bSYour Name 
901*5113495bSYour Name 	prev_pn128[0] = rx_tid->pn128[0];
902*5113495bSYour Name 	prev_pn128[1] = rx_tid->pn128[1];
903*5113495bSYour Name 
904*5113495bSYour Name 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu) ? dp_sec_mcast :
905*5113495bSYour Name 				dp_sec_ucast;
906*5113495bSYour Name 	sec_type = txrx_peer->security[index].sec_type;
907*5113495bSYour Name 
908*5113495bSYour Name 	if (!(sec_type == cdp_sec_type_none || sec_type == cdp_sec_type_wep128 ||
909*5113495bSYour Name 	      sec_type == cdp_sec_type_wep104 || sec_type == cdp_sec_type_wep40))
910*5113495bSYour Name 		needs_pn_check = 1;
911*5113495bSYour Name 
912*5113495bSYour Name 	while (msdu) {
913*5113495bSYour Name 		if (qdf_likely(needs_pn_check))
914*5113495bSYour Name 			out_of_order = dp_rx_defrag_pn_check(soc, msdu,
915*5113495bSYour Name 							     &cur_pn128[0],
916*5113495bSYour Name 							     &prev_pn128[0]);
917*5113495bSYour Name 
918*5113495bSYour Name 		if (qdf_unlikely(out_of_order)) {
919*5113495bSYour Name 			dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx",
920*5113495bSYour Name 				   cur_pn128[0], cur_pn128[1],
921*5113495bSYour Name 				   prev_pn128[0], prev_pn128[1]);
922*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
923*5113495bSYour Name 		}
924*5113495bSYour Name 
925*5113495bSYour Name 		prev_pn128[0] = cur_pn128[0];
926*5113495bSYour Name 		prev_pn128[1] = cur_pn128[1];
927*5113495bSYour Name 
928*5113495bSYour Name 		/*
929*5113495bSYour Name 		 * Broadcast and multicast frames should never be fragmented.
930*5113495bSYour Name 		 * Iterating through all msdus and dropping fragments if even
931*5113495bSYour Name 		 * one of them has mcast/bcast destination address.
932*5113495bSYour Name 		 */
933*5113495bSYour Name 		if (hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu)) {
934*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
935*5113495bSYour Name 				  "Dropping multicast/broadcast fragments");
936*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
937*5113495bSYour Name 		}
938*5113495bSYour Name 
939*5113495bSYour Name 		dp_rx_frag_pull_hdr(soc, msdu, hdrsize);
940*5113495bSYour Name 		len += qdf_nbuf_len(msdu);
941*5113495bSYour Name 		msdu = qdf_nbuf_next(msdu);
942*5113495bSYour Name 	}
943*5113495bSYour Name 
944*5113495bSYour Name 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
945*5113495bSYour Name 	qdf_nbuf_set_next(head, NULL);
946*5113495bSYour Name 	qdf_nbuf_set_is_frag(head, 1);
947*5113495bSYour Name 
948*5113495bSYour Name 	dp_debug("head len %d ext len %d data len %d ",
949*5113495bSYour Name 		 (uint32_t)qdf_nbuf_len(head),
950*5113495bSYour Name 		 (uint32_t)qdf_nbuf_len(rx_nbuf),
951*5113495bSYour Name 		 (uint32_t)(head->data_len));
952*5113495bSYour Name 
953*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
954*5113495bSYour Name }
955*5113495bSYour Name 
956*5113495bSYour Name /**
957*5113495bSYour Name  * dp_rx_defrag_err() - rx defragmentation error handler
958*5113495bSYour Name  * @vdev: handle to vdev object
959*5113495bSYour Name  * @nbuf: packet buffer
960*5113495bSYour Name  *
961*5113495bSYour Name  * This function handles rx error and send MIC error notification
962*5113495bSYour Name  *
963*5113495bSYour Name  * Return: None
964*5113495bSYour Name  */
dp_rx_defrag_err(struct dp_vdev * vdev,qdf_nbuf_t nbuf)965*5113495bSYour Name static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
966*5113495bSYour Name {
967*5113495bSYour Name 	struct ol_if_ops *tops = NULL;
968*5113495bSYour Name 	struct dp_pdev *pdev = vdev->pdev;
969*5113495bSYour Name 	int rx_desc_len = pdev->soc->rx_pkt_tlv_size;
970*5113495bSYour Name 	uint8_t *orig_hdr;
971*5113495bSYour Name 	struct ieee80211_frame *wh;
972*5113495bSYour Name 	struct cdp_rx_mic_err_info mic_failure_info;
973*5113495bSYour Name 
974*5113495bSYour Name 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
975*5113495bSYour Name 	wh = (struct ieee80211_frame *)orig_hdr;
976*5113495bSYour Name 
977*5113495bSYour Name 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
978*5113495bSYour Name 			 (struct qdf_mac_addr *)&wh->i_addr1);
979*5113495bSYour Name 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
980*5113495bSYour Name 			 (struct qdf_mac_addr *)&wh->i_addr2);
981*5113495bSYour Name 	mic_failure_info.key_id = 0;
982*5113495bSYour Name 	mic_failure_info.multicast =
983*5113495bSYour Name 		IEEE80211_IS_MULTICAST(wh->i_addr1);
984*5113495bSYour Name 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
985*5113495bSYour Name 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
986*5113495bSYour Name 	mic_failure_info.data = (uint8_t *)wh;
987*5113495bSYour Name 	mic_failure_info.vdev_id = vdev->vdev_id;
988*5113495bSYour Name 
989*5113495bSYour Name 	tops = pdev->soc->cdp_soc.ol_ops;
990*5113495bSYour Name 	if (tops->rx_mic_error)
991*5113495bSYour Name 		tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id,
992*5113495bSYour Name 				   &mic_failure_info);
993*5113495bSYour Name }
994*5113495bSYour Name 
995*5113495bSYour Name 
996*5113495bSYour Name /**
997*5113495bSYour Name  * dp_rx_defrag_nwifi_to_8023() - Transcap 802.11 to 802.3
998*5113495bSYour Name  * @soc: dp soc handle
999*5113495bSYour Name  * @txrx_peer: txrx_peer handle
1000*5113495bSYour Name  * @tid: Transmit ID (TID)
1001*5113495bSYour Name  * @nbuf: Pointer to the fragment buffer
1002*5113495bSYour Name  * @hdrsize: Size of headers
1003*5113495bSYour Name  *
1004*5113495bSYour Name  * Transcap the fragment from 802.11 to 802.3
1005*5113495bSYour Name  *
1006*5113495bSYour Name  * Return: None
1007*5113495bSYour Name  */
1008*5113495bSYour Name static void
dp_rx_defrag_nwifi_to_8023(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,int tid,qdf_nbuf_t nbuf,uint16_t hdrsize)1009*5113495bSYour Name dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
1010*5113495bSYour Name 			   int tid, qdf_nbuf_t nbuf, uint16_t hdrsize)
1011*5113495bSYour Name {
1012*5113495bSYour Name 	struct llc_snap_hdr_t *llchdr;
1013*5113495bSYour Name 	struct ethernet_hdr_t *eth_hdr;
1014*5113495bSYour Name 	uint8_t ether_type[2];
1015*5113495bSYour Name 	uint16_t fc = 0;
1016*5113495bSYour Name 	union dp_align_mac_addr mac_addr;
1017*5113495bSYour Name 	uint8_t *rx_desc_info = qdf_mem_malloc(soc->rx_pkt_tlv_size);
1018*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1019*5113495bSYour Name 	struct ieee80211_frame_addr4 wh = {0};
1020*5113495bSYour Name 
1021*5113495bSYour Name 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), rx_tid->pn128);
1022*5113495bSYour Name 
1023*5113495bSYour Name 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
1024*5113495bSYour Name 
1025*5113495bSYour Name 	if (!rx_desc_info) {
1026*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1027*5113495bSYour Name 			"%s: Memory alloc failed ! ", __func__);
1028*5113495bSYour Name 		QDF_ASSERT(0);
1029*5113495bSYour Name 		return;
1030*5113495bSYour Name 	}
1031*5113495bSYour Name 
1032*5113495bSYour Name 	qdf_mem_zero(&wh, sizeof(struct ieee80211_frame_addr4));
1033*5113495bSYour Name 	if (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
1034*5113495bSYour Name 		qdf_mem_copy(&wh, qdf_nbuf_data(nbuf) + soc->rx_pkt_tlv_size,
1035*5113495bSYour Name 			     hdrsize);
1036*5113495bSYour Name 
1037*5113495bSYour Name 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), soc->rx_pkt_tlv_size);
1038*5113495bSYour Name 
1039*5113495bSYour Name 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
1040*5113495bSYour Name 					soc->rx_pkt_tlv_size + hdrsize);
1041*5113495bSYour Name 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
1042*5113495bSYour Name 
1043*5113495bSYour Name 	qdf_nbuf_pull_head(nbuf, (soc->rx_pkt_tlv_size + hdrsize +
1044*5113495bSYour Name 				  sizeof(struct llc_snap_hdr_t) -
1045*5113495bSYour Name 				  sizeof(struct ethernet_hdr_t)));
1046*5113495bSYour Name 
1047*5113495bSYour Name 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
1048*5113495bSYour Name 
1049*5113495bSYour Name 	if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1050*5113495bSYour Name 						rx_desc_info))
1051*5113495bSYour Name 		fc = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_desc_info);
1052*5113495bSYour Name 
1053*5113495bSYour Name 	dp_debug("Frame control type: 0x%x", fc);
1054*5113495bSYour Name 
1055*5113495bSYour Name 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
1056*5113495bSYour Name 	case IEEE80211_FC1_DIR_NODS:
1057*5113495bSYour Name 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1058*5113495bSYour Name 				      &mac_addr.raw[0]);
1059*5113495bSYour Name 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1060*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1061*5113495bSYour Name 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1062*5113495bSYour Name 				      &mac_addr.raw[0]);
1063*5113495bSYour Name 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1064*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1065*5113495bSYour Name 		break;
1066*5113495bSYour Name 	case IEEE80211_FC1_DIR_TODS:
1067*5113495bSYour Name 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1068*5113495bSYour Name 				      &mac_addr.raw[0]);
1069*5113495bSYour Name 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1070*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1071*5113495bSYour Name 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1072*5113495bSYour Name 				      &mac_addr.raw[0]);
1073*5113495bSYour Name 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1074*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1075*5113495bSYour Name 		break;
1076*5113495bSYour Name 	case IEEE80211_FC1_DIR_FROMDS:
1077*5113495bSYour Name 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1078*5113495bSYour Name 				      &mac_addr.raw[0]);
1079*5113495bSYour Name 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1080*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1081*5113495bSYour Name 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1082*5113495bSYour Name 				      &mac_addr.raw[0]);
1083*5113495bSYour Name 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1084*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1085*5113495bSYour Name 		break;
1086*5113495bSYour Name 
1087*5113495bSYour Name 	case IEEE80211_FC1_DIR_DSTODS:
1088*5113495bSYour Name 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1089*5113495bSYour Name 				      &mac_addr.raw[0]);
1090*5113495bSYour Name 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1091*5113495bSYour Name 			QDF_MAC_ADDR_SIZE);
1092*5113495bSYour Name 		qdf_mem_copy(eth_hdr->src_addr, &wh.i_addr4[0],
1093*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE);
1094*5113495bSYour Name 		break;
1095*5113495bSYour Name 
1096*5113495bSYour Name 	default:
1097*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1098*5113495bSYour Name 		"%s: Unknown frame control type: 0x%x", __func__, fc);
1099*5113495bSYour Name 	}
1100*5113495bSYour Name 
1101*5113495bSYour Name 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
1102*5113495bSYour Name 			sizeof(ether_type));
1103*5113495bSYour Name 
1104*5113495bSYour Name 	qdf_nbuf_push_head(nbuf, soc->rx_pkt_tlv_size);
1105*5113495bSYour Name 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, soc->rx_pkt_tlv_size);
1106*5113495bSYour Name 	qdf_mem_free(rx_desc_info);
1107*5113495bSYour Name }
1108*5113495bSYour Name 
1109*5113495bSYour Name #ifdef RX_DEFRAG_DO_NOT_REINJECT
1110*5113495bSYour Name /**
1111*5113495bSYour Name  * dp_rx_defrag_deliver() - Deliver defrag packet to stack
1112*5113495bSYour Name  * @txrx_peer: Pointer to the peer
1113*5113495bSYour Name  * @tid: Transmit Identifier
1114*5113495bSYour Name  * @head: Nbuf to be delivered
1115*5113495bSYour Name  *
1116*5113495bSYour Name  * Return: None
1117*5113495bSYour Name  */
dp_rx_defrag_deliver(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t head)1118*5113495bSYour Name static inline void dp_rx_defrag_deliver(struct dp_txrx_peer *txrx_peer,
1119*5113495bSYour Name 					unsigned int tid,
1120*5113495bSYour Name 					qdf_nbuf_t head)
1121*5113495bSYour Name {
1122*5113495bSYour Name 	struct dp_vdev *vdev = txrx_peer->vdev;
1123*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
1124*5113495bSYour Name 	qdf_nbuf_t deliver_list_head = NULL;
1125*5113495bSYour Name 	qdf_nbuf_t deliver_list_tail = NULL;
1126*5113495bSYour Name 	uint8_t *rx_tlv_hdr;
1127*5113495bSYour Name 
1128*5113495bSYour Name 	rx_tlv_hdr = qdf_nbuf_data(head);
1129*5113495bSYour Name 
1130*5113495bSYour Name 	QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id;
1131*5113495bSYour Name 	qdf_nbuf_set_tid_val(head, tid);
1132*5113495bSYour Name 	qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size);
1133*5113495bSYour Name 
1134*5113495bSYour Name 	DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail,
1135*5113495bSYour Name 			  head);
1136*5113495bSYour Name 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head,
1137*5113495bSYour Name 			       deliver_list_tail);
1138*5113495bSYour Name }
1139*5113495bSYour Name 
1140*5113495bSYour Name /**
1141*5113495bSYour Name  * dp_rx_defrag_reo_reinject() - Reinject the fragment chain back into REO
1142*5113495bSYour Name  * @txrx_peer: Pointer to the peer
1143*5113495bSYour Name  * @tid: Transmit Identifier
1144*5113495bSYour Name  * @head: Buffer to be reinjected back
1145*5113495bSYour Name  *
1146*5113495bSYour Name  * Reinject the fragment chain back into REO
1147*5113495bSYour Name  *
1148*5113495bSYour Name  * Return: QDF_STATUS
1149*5113495bSYour Name  */
dp_rx_defrag_reo_reinject(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t head)1150*5113495bSYour Name static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1151*5113495bSYour Name 					    unsigned int tid, qdf_nbuf_t head)
1152*5113495bSYour Name {
1153*5113495bSYour Name 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1154*5113495bSYour Name 
1155*5113495bSYour Name 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1156*5113495bSYour Name 
1157*5113495bSYour Name 	dp_rx_defrag_deliver(txrx_peer, tid, head);
1158*5113495bSYour Name 	rx_reorder_array_elem->head = NULL;
1159*5113495bSYour Name 	rx_reorder_array_elem->tail = NULL;
1160*5113495bSYour Name 	dp_rx_return_head_frag_desc(txrx_peer, tid);
1161*5113495bSYour Name 
1162*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1163*5113495bSYour Name }
1164*5113495bSYour Name #else
1165*5113495bSYour Name #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1166*5113495bSYour Name /**
1167*5113495bSYour Name  * dp_rx_reinject_ring_record_entry() - Record reinject ring history
1168*5113495bSYour Name  * @soc: Datapath soc structure
1169*5113495bSYour Name  * @paddr: paddr of the buffer reinjected to SW2REO ring
1170*5113495bSYour Name  * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring
1171*5113495bSYour Name  * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring
1172*5113495bSYour Name  *
1173*5113495bSYour Name  * Return: None
1174*5113495bSYour Name  */
1175*5113495bSYour Name static inline void
dp_rx_reinject_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1176*5113495bSYour Name dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1177*5113495bSYour Name 				 uint32_t sw_cookie, uint8_t rbm)
1178*5113495bSYour Name {
1179*5113495bSYour Name 	struct dp_buf_info_record *record;
1180*5113495bSYour Name 	uint32_t idx;
1181*5113495bSYour Name 
1182*5113495bSYour Name 	if (qdf_unlikely(!soc->rx_reinject_ring_history))
1183*5113495bSYour Name 		return;
1184*5113495bSYour Name 
1185*5113495bSYour Name 	idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index,
1186*5113495bSYour Name 					DP_RX_REINJECT_HIST_MAX);
1187*5113495bSYour Name 
1188*5113495bSYour Name 	/* No NULL check needed for record since its an array */
1189*5113495bSYour Name 	record = &soc->rx_reinject_ring_history->entry[idx];
1190*5113495bSYour Name 
1191*5113495bSYour Name 	record->timestamp = qdf_get_log_timestamp();
1192*5113495bSYour Name 	record->hbi.paddr = paddr;
1193*5113495bSYour Name 	record->hbi.sw_cookie = sw_cookie;
1194*5113495bSYour Name 	record->hbi.rbm = rbm;
1195*5113495bSYour Name }
1196*5113495bSYour Name #else
1197*5113495bSYour Name static inline void
dp_rx_reinject_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1198*5113495bSYour Name dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1199*5113495bSYour Name 				 uint32_t sw_cookie, uint8_t rbm)
1200*5113495bSYour Name {
1201*5113495bSYour Name }
1202*5113495bSYour Name #endif
1203*5113495bSYour Name 
1204*5113495bSYour Name /**
1205*5113495bSYour Name  * dp_rx_defrag_reo_reinject() - Reinject the fragment chain back into REO
1206*5113495bSYour Name  * @txrx_peer: Pointer to the txrx_peer
1207*5113495bSYour Name  * @tid: Transmit Identifier
1208*5113495bSYour Name  * @head: Buffer to be reinjected back
1209*5113495bSYour Name  *
1210*5113495bSYour Name  * Reinject the fragment chain back into REO
1211*5113495bSYour Name  *
1212*5113495bSYour Name  * Return: QDF_STATUS
1213*5113495bSYour Name  */
dp_rx_defrag_reo_reinject(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t head)1214*5113495bSYour Name static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1215*5113495bSYour Name 					    unsigned int tid, qdf_nbuf_t head)
1216*5113495bSYour Name {
1217*5113495bSYour Name 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
1218*5113495bSYour Name 	struct dp_soc *soc = pdev->soc;
1219*5113495bSYour Name 	struct hal_buf_info buf_info;
1220*5113495bSYour Name 	struct hal_buf_info temp_buf_info;
1221*5113495bSYour Name 	void *link_desc_va;
1222*5113495bSYour Name 	void *msdu0, *msdu_desc_info;
1223*5113495bSYour Name 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
1224*5113495bSYour Name 	void *dst_mpdu_desc_info;
1225*5113495bSYour Name 	uint64_t dst_qdesc_addr;
1226*5113495bSYour Name 	qdf_dma_addr_t paddr;
1227*5113495bSYour Name 	uint32_t nbuf_len, seq_no, dst_ind;
1228*5113495bSYour Name 	uint32_t ret, cookie;
1229*5113495bSYour Name 	hal_ring_desc_t dst_ring_desc =
1230*5113495bSYour Name 		txrx_peer->rx_tid[tid].dst_ring_desc;
1231*5113495bSYour Name 	hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
1232*5113495bSYour Name 	struct dp_rx_desc *rx_desc = txrx_peer->rx_tid[tid].head_frag_desc;
1233*5113495bSYour Name 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1234*5113495bSYour Name 						txrx_peer->rx_tid[tid].array;
1235*5113495bSYour Name 	qdf_nbuf_t nbuf_head;
1236*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool = NULL;
1237*5113495bSYour Name 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc);
1238*5113495bSYour Name 	uint8_t rx_defrag_rbm_id = dp_rx_get_defrag_bm_id(soc);
1239*5113495bSYour Name 
1240*5113495bSYour Name 	/* do duplicate link desc address check */
1241*5113495bSYour Name 	dp_rx_link_desc_refill_duplicate_check(
1242*5113495bSYour Name 				soc,
1243*5113495bSYour Name 				&soc->last_op_info.reo_reinject_link_desc,
1244*5113495bSYour Name 				buf_addr_info);
1245*5113495bSYour Name 
1246*5113495bSYour Name 	nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head);
1247*5113495bSYour Name 	if (qdf_unlikely(!nbuf_head)) {
1248*5113495bSYour Name 		dp_err_rl("IPA RX REO reinject failed");
1249*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1250*5113495bSYour Name 	}
1251*5113495bSYour Name 
1252*5113495bSYour Name 	/* update new allocated skb in case IPA is enabled */
1253*5113495bSYour Name 	if (nbuf_head != head) {
1254*5113495bSYour Name 		head = nbuf_head;
1255*5113495bSYour Name 		rx_desc->nbuf = head;
1256*5113495bSYour Name 		rx_reorder_array_elem->head = head;
1257*5113495bSYour Name 	}
1258*5113495bSYour Name 
1259*5113495bSYour Name 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1260*5113495bSYour Name 	if (!ent_ring_desc) {
1261*5113495bSYour Name 		dp_err_rl("HAL src ring next entry NULL");
1262*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1263*5113495bSYour Name 	}
1264*5113495bSYour Name 
1265*5113495bSYour Name 	hal_rx_reo_buf_paddr_get(soc->hal_soc, dst_ring_desc, &buf_info);
1266*5113495bSYour Name 
1267*5113495bSYour Name 	/* buffer_addr_info is the first element of ring_desc */
1268*5113495bSYour Name 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)dst_ring_desc,
1269*5113495bSYour Name 				  &buf_info);
1270*5113495bSYour Name 
1271*5113495bSYour Name 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1272*5113495bSYour Name 
1273*5113495bSYour Name 	qdf_assert_always(link_desc_va);
1274*5113495bSYour Name 
1275*5113495bSYour Name 	msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va);
1276*5113495bSYour Name 	nbuf_len = qdf_nbuf_len(head) - soc->rx_pkt_tlv_size;
1277*5113495bSYour Name 
1278*5113495bSYour Name 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1279*5113495bSYour Name 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1280*5113495bSYour Name 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1281*5113495bSYour Name 
1282*5113495bSYour Name 	/* msdu reconfig */
1283*5113495bSYour Name 	msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0);
1284*5113495bSYour Name 
1285*5113495bSYour Name 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1286*5113495bSYour Name 
1287*5113495bSYour Name 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1288*5113495bSYour Name 
1289*5113495bSYour Name 	hal_msdu_desc_info_set(soc->hal_soc, msdu_desc_info, dst_ind, nbuf_len);
1290*5113495bSYour Name 
1291*5113495bSYour Name 	/* change RX TLV's */
1292*5113495bSYour Name 	hal_rx_tlv_msdu_len_set(soc->hal_soc, qdf_nbuf_data(head), nbuf_len);
1293*5113495bSYour Name 
1294*5113495bSYour Name 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)msdu0,
1295*5113495bSYour Name 				  &temp_buf_info);
1296*5113495bSYour Name 
1297*5113495bSYour Name 	cookie = temp_buf_info.sw_cookie;
1298*5113495bSYour Name 	rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
1299*5113495bSYour Name 
1300*5113495bSYour Name 	/* map the nbuf before reinject it into HW */
1301*5113495bSYour Name 	ret = qdf_nbuf_map_nbytes_single(soc->osdev, head,
1302*5113495bSYour Name 					 QDF_DMA_FROM_DEVICE,
1303*5113495bSYour Name 					 rx_desc_pool->buf_size);
1304*5113495bSYour Name 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1305*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1306*5113495bSYour Name 				"%s: nbuf map failed !", __func__);
1307*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1308*5113495bSYour Name 	}
1309*5113495bSYour Name 
1310*5113495bSYour Name 	dp_ipa_handle_rx_buf_smmu_mapping(soc, head,
1311*5113495bSYour Name 					  rx_desc_pool->buf_size, true,
1312*5113495bSYour Name 					  __func__, __LINE__);
1313*5113495bSYour Name 	dp_audio_smmu_map(soc->osdev,
1314*5113495bSYour Name 			  qdf_mem_paddr_from_dmaaddr(soc->osdev,
1315*5113495bSYour Name 						     QDF_NBUF_CB_PADDR(head)),
1316*5113495bSYour Name 			  QDF_NBUF_CB_PADDR(head), rx_desc_pool->buf_size);
1317*5113495bSYour Name 
1318*5113495bSYour Name 	/*
1319*5113495bSYour Name 	 * As part of rx frag handler buffer was unmapped and rx desc
1320*5113495bSYour Name 	 * unmapped is set to 1. So again for defrag reinject frame reset
1321*5113495bSYour Name 	 * it back to 0.
1322*5113495bSYour Name 	 */
1323*5113495bSYour Name 	rx_desc->unmapped = 0;
1324*5113495bSYour Name 
1325*5113495bSYour Name 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1326*5113495bSYour Name 
1327*5113495bSYour Name 	ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
1328*5113495bSYour Name 
1329*5113495bSYour Name 	if (ret == QDF_STATUS_E_FAILURE) {
1330*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1331*5113495bSYour Name 				"%s: x86 check failed !", __func__);
1332*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1333*5113495bSYour Name 	}
1334*5113495bSYour Name 
1335*5113495bSYour Name 	hal_rxdma_buff_addr_info_set(soc->hal_soc, msdu0, paddr, cookie,
1336*5113495bSYour Name 				     rx_defrag_rbm_id);
1337*5113495bSYour Name 
1338*5113495bSYour Name 	/* Lets fill entrance ring now !!! */
1339*5113495bSYour Name 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1340*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1341*5113495bSYour Name 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1342*5113495bSYour Name 		hal_srng);
1343*5113495bSYour Name 
1344*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1345*5113495bSYour Name 	}
1346*5113495bSYour Name 
1347*5113495bSYour Name 	dp_rx_reinject_ring_record_entry(soc, paddr, cookie,
1348*5113495bSYour Name 					 rx_defrag_rbm_id);
1349*5113495bSYour Name 	paddr = (uint64_t)buf_info.paddr;
1350*5113495bSYour Name 	/* buf addr */
1351*5113495bSYour Name 	hal_rxdma_buff_addr_info_set(soc->hal_soc, ent_ring_desc, paddr,
1352*5113495bSYour Name 				     buf_info.sw_cookie,
1353*5113495bSYour Name 				     soc->idle_link_bm_id);
1354*5113495bSYour Name 	/* mpdu desc info */
1355*5113495bSYour Name 	ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc,
1356*5113495bSYour Name 						    ent_ring_desc);
1357*5113495bSYour Name 	dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc,
1358*5113495bSYour Name 						    dst_ring_desc);
1359*5113495bSYour Name 
1360*5113495bSYour Name 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1361*5113495bSYour Name 				sizeof(struct rx_mpdu_desc_info));
1362*5113495bSYour Name 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1363*5113495bSYour Name 
1364*5113495bSYour Name 	seq_no = hal_rx_get_rx_sequence(soc->hal_soc, rx_desc->rx_buf_start);
1365*5113495bSYour Name 
1366*5113495bSYour Name 	hal_mpdu_desc_info_set(soc->hal_soc, ent_ring_desc, ent_mpdu_desc_info,
1367*5113495bSYour Name 			       seq_no);
1368*5113495bSYour Name 	/* qdesc addr */
1369*5113495bSYour Name 	ent_qdesc_addr = hal_get_reo_ent_desc_qdesc_addr(soc->hal_soc,
1370*5113495bSYour Name 						(uint8_t *)ent_ring_desc);
1371*5113495bSYour Name 
1372*5113495bSYour Name 	dst_qdesc_addr = soc->arch_ops.get_reo_qdesc_addr(
1373*5113495bSYour Name 						soc->hal_soc,
1374*5113495bSYour Name 						(uint8_t *)dst_ring_desc,
1375*5113495bSYour Name 						qdf_nbuf_data(head),
1376*5113495bSYour Name 						txrx_peer, tid);
1377*5113495bSYour Name 
1378*5113495bSYour Name 	qdf_mem_copy(ent_qdesc_addr, &dst_qdesc_addr, 5);
1379*5113495bSYour Name 
1380*5113495bSYour Name 	hal_set_reo_ent_desc_reo_dest_ind(soc->hal_soc,
1381*5113495bSYour Name 					  (uint8_t *)ent_ring_desc, dst_ind);
1382*5113495bSYour Name 
1383*5113495bSYour Name 	hal_srng_access_end(soc->hal_soc, hal_srng);
1384*5113495bSYour Name 
1385*5113495bSYour Name 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1386*5113495bSYour Name 	dp_debug("reinjection done !");
1387*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1388*5113495bSYour Name }
1389*5113495bSYour Name #endif
1390*5113495bSYour Name 
1391*5113495bSYour Name /**
1392*5113495bSYour Name  * dp_rx_defrag_gcmp_demic() - Remove MIC information from GCMP fragment
1393*5113495bSYour Name  * @soc: Datapath soc structure
1394*5113495bSYour Name  * @nbuf: Pointer to the fragment buffer
1395*5113495bSYour Name  * @hdrlen: 802.11 header length
1396*5113495bSYour Name  *
1397*5113495bSYour Name  * Remove MIC information from GCMP fragment
1398*5113495bSYour Name  *
1399*5113495bSYour Name  * Return: QDF_STATUS
1400*5113495bSYour Name  */
dp_rx_defrag_gcmp_demic(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrlen)1401*5113495bSYour Name static QDF_STATUS dp_rx_defrag_gcmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf,
1402*5113495bSYour Name 					  uint16_t hdrlen)
1403*5113495bSYour Name {
1404*5113495bSYour Name 	uint8_t *ivp, *orig_hdr;
1405*5113495bSYour Name 	int rx_desc_len = soc->rx_pkt_tlv_size;
1406*5113495bSYour Name 
1407*5113495bSYour Name 	/* start of the 802.11 header */
1408*5113495bSYour Name 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1409*5113495bSYour Name 
1410*5113495bSYour Name 	/*
1411*5113495bSYour Name 	 * GCMP header is located after 802.11 header and EXTIV
1412*5113495bSYour Name 	 * field should always be set to 1 for GCMP protocol.
1413*5113495bSYour Name 	 */
1414*5113495bSYour Name 	ivp = orig_hdr + hdrlen;
1415*5113495bSYour Name 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
1416*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
1417*5113495bSYour Name 
1418*5113495bSYour Name 	qdf_nbuf_trim_tail(nbuf, dp_f_gcmp.ic_trailer);
1419*5113495bSYour Name 
1420*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1421*5113495bSYour Name }
1422*5113495bSYour Name 
dp_rx_defrag(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t frag_list_head,qdf_nbuf_t frag_list_tail)1423*5113495bSYour Name QDF_STATUS dp_rx_defrag(struct dp_txrx_peer *txrx_peer, unsigned int tid,
1424*5113495bSYour Name 			qdf_nbuf_t frag_list_head,
1425*5113495bSYour Name 			qdf_nbuf_t frag_list_tail)
1426*5113495bSYour Name {
1427*5113495bSYour Name 	qdf_nbuf_t tmp_next;
1428*5113495bSYour Name 	qdf_nbuf_t cur = frag_list_head, msdu;
1429*5113495bSYour Name 	uint32_t index, tkip_demic = 0;
1430*5113495bSYour Name 	uint16_t hdr_space;
1431*5113495bSYour Name 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1432*5113495bSYour Name 	struct dp_vdev *vdev = txrx_peer->vdev;
1433*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
1434*5113495bSYour Name 	uint8_t status = 0;
1435*5113495bSYour Name 
1436*5113495bSYour Name 	if (!cur)
1437*5113495bSYour Name 		return QDF_STATUS_E_DEFRAG_ERROR;
1438*5113495bSYour Name 
1439*5113495bSYour Name 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
1440*5113495bSYour Name 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
1441*5113495bSYour Name 		dp_sec_mcast : dp_sec_ucast;
1442*5113495bSYour Name 
1443*5113495bSYour Name 	/* Remove FCS from all fragments */
1444*5113495bSYour Name 	while (cur) {
1445*5113495bSYour Name 		tmp_next = qdf_nbuf_next(cur);
1446*5113495bSYour Name 		qdf_nbuf_set_next(cur, NULL);
1447*5113495bSYour Name 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1448*5113495bSYour Name 		qdf_nbuf_set_next(cur, tmp_next);
1449*5113495bSYour Name 		cur = tmp_next;
1450*5113495bSYour Name 	}
1451*5113495bSYour Name 	cur = frag_list_head;
1452*5113495bSYour Name 
1453*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1454*5113495bSYour Name 		  "%s: index %d Security type: %d", __func__,
1455*5113495bSYour Name 		  index, txrx_peer->security[index].sec_type);
1456*5113495bSYour Name 
1457*5113495bSYour Name 	switch (txrx_peer->security[index].sec_type) {
1458*5113495bSYour Name 	case cdp_sec_type_tkip:
1459*5113495bSYour Name 		tkip_demic = 1;
1460*5113495bSYour Name 		fallthrough;
1461*5113495bSYour Name 	case cdp_sec_type_tkip_nomic:
1462*5113495bSYour Name 		while (cur) {
1463*5113495bSYour Name 			tmp_next = qdf_nbuf_next(cur);
1464*5113495bSYour Name 			if (dp_rx_defrag_tkip_decap(soc, cur, hdr_space)) {
1465*5113495bSYour Name 
1466*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1467*5113495bSYour Name 					QDF_TRACE_LEVEL_ERROR,
1468*5113495bSYour Name 					"dp_rx_defrag: TKIP decap failed");
1469*5113495bSYour Name 
1470*5113495bSYour Name 				return QDF_STATUS_E_DEFRAG_ERROR;
1471*5113495bSYour Name 			}
1472*5113495bSYour Name 			cur = tmp_next;
1473*5113495bSYour Name 		}
1474*5113495bSYour Name 
1475*5113495bSYour Name 		/* If success, increment header to be stripped later */
1476*5113495bSYour Name 		hdr_space += dp_f_tkip.ic_header;
1477*5113495bSYour Name 		break;
1478*5113495bSYour Name 
1479*5113495bSYour Name 	case cdp_sec_type_aes_ccmp:
1480*5113495bSYour Name 		while (cur) {
1481*5113495bSYour Name 			tmp_next = qdf_nbuf_next(cur);
1482*5113495bSYour Name 			if (dp_rx_defrag_ccmp_demic(soc, cur, hdr_space)) {
1483*5113495bSYour Name 
1484*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1485*5113495bSYour Name 					QDF_TRACE_LEVEL_ERROR,
1486*5113495bSYour Name 					"dp_rx_defrag: CCMP demic failed");
1487*5113495bSYour Name 
1488*5113495bSYour Name 				return QDF_STATUS_E_DEFRAG_ERROR;
1489*5113495bSYour Name 			}
1490*5113495bSYour Name 			if (dp_rx_defrag_ccmp_decap(soc, cur, hdr_space)) {
1491*5113495bSYour Name 
1492*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1493*5113495bSYour Name 					QDF_TRACE_LEVEL_ERROR,
1494*5113495bSYour Name 					"dp_rx_defrag: CCMP decap failed");
1495*5113495bSYour Name 
1496*5113495bSYour Name 				return QDF_STATUS_E_DEFRAG_ERROR;
1497*5113495bSYour Name 			}
1498*5113495bSYour Name 			cur = tmp_next;
1499*5113495bSYour Name 		}
1500*5113495bSYour Name 
1501*5113495bSYour Name 		/* If success, increment header to be stripped later */
1502*5113495bSYour Name 		hdr_space += dp_f_ccmp.ic_header;
1503*5113495bSYour Name 		break;
1504*5113495bSYour Name 
1505*5113495bSYour Name 	case cdp_sec_type_wep40:
1506*5113495bSYour Name 	case cdp_sec_type_wep104:
1507*5113495bSYour Name 	case cdp_sec_type_wep128:
1508*5113495bSYour Name 		while (cur) {
1509*5113495bSYour Name 			tmp_next = qdf_nbuf_next(cur);
1510*5113495bSYour Name 			if (dp_rx_defrag_wep_decap(soc, cur, hdr_space)) {
1511*5113495bSYour Name 
1512*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1513*5113495bSYour Name 					QDF_TRACE_LEVEL_ERROR,
1514*5113495bSYour Name 					"dp_rx_defrag: WEP decap failed");
1515*5113495bSYour Name 
1516*5113495bSYour Name 				return QDF_STATUS_E_DEFRAG_ERROR;
1517*5113495bSYour Name 			}
1518*5113495bSYour Name 			cur = tmp_next;
1519*5113495bSYour Name 		}
1520*5113495bSYour Name 
1521*5113495bSYour Name 		/* If success, increment header to be stripped later */
1522*5113495bSYour Name 		hdr_space += dp_f_wep.ic_header;
1523*5113495bSYour Name 		break;
1524*5113495bSYour Name 	case cdp_sec_type_aes_gcmp:
1525*5113495bSYour Name 	case cdp_sec_type_aes_gcmp_256:
1526*5113495bSYour Name 		while (cur) {
1527*5113495bSYour Name 			tmp_next = qdf_nbuf_next(cur);
1528*5113495bSYour Name 			if (dp_rx_defrag_gcmp_demic(soc, cur, hdr_space)) {
1529*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1530*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1531*5113495bSYour Name 					  "dp_rx_defrag: GCMP demic failed");
1532*5113495bSYour Name 
1533*5113495bSYour Name 				return QDF_STATUS_E_DEFRAG_ERROR;
1534*5113495bSYour Name 			}
1535*5113495bSYour Name 			cur = tmp_next;
1536*5113495bSYour Name 		}
1537*5113495bSYour Name 
1538*5113495bSYour Name 		hdr_space += dp_f_gcmp.ic_header;
1539*5113495bSYour Name 		break;
1540*5113495bSYour Name 	default:
1541*5113495bSYour Name 		break;
1542*5113495bSYour Name 	}
1543*5113495bSYour Name 
1544*5113495bSYour Name 	if (tkip_demic) {
1545*5113495bSYour Name 		msdu = frag_list_head;
1546*5113495bSYour Name 		qdf_mem_copy(key,
1547*5113495bSYour Name 			     &txrx_peer->security[index].michael_key[0],
1548*5113495bSYour Name 			     IEEE80211_WEP_MICLEN);
1549*5113495bSYour Name 		status = dp_rx_defrag_tkip_demic(soc, key, msdu,
1550*5113495bSYour Name 						 soc->rx_pkt_tlv_size +
1551*5113495bSYour Name 						 hdr_space);
1552*5113495bSYour Name 
1553*5113495bSYour Name 		if (status) {
1554*5113495bSYour Name 			dp_rx_defrag_err(vdev, frag_list_head);
1555*5113495bSYour Name 
1556*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1557*5113495bSYour Name 				  QDF_TRACE_LEVEL_ERROR,
1558*5113495bSYour Name 				  "%s: TKIP demic failed status %d",
1559*5113495bSYour Name 				   __func__, status);
1560*5113495bSYour Name 
1561*5113495bSYour Name 			return QDF_STATUS_E_DEFRAG_ERROR;
1562*5113495bSYour Name 		}
1563*5113495bSYour Name 	}
1564*5113495bSYour Name 
1565*5113495bSYour Name 	/* Convert the header to 802.3 header */
1566*5113495bSYour Name 	dp_rx_defrag_nwifi_to_8023(soc, txrx_peer, tid, frag_list_head,
1567*5113495bSYour Name 				   hdr_space);
1568*5113495bSYour Name 	if (qdf_nbuf_next(frag_list_head)) {
1569*5113495bSYour Name 		if (dp_rx_construct_fraglist(txrx_peer, tid, frag_list_head,
1570*5113495bSYour Name 					     hdr_space))
1571*5113495bSYour Name 			return QDF_STATUS_E_DEFRAG_ERROR;
1572*5113495bSYour Name 	}
1573*5113495bSYour Name 
1574*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1575*5113495bSYour Name }
1576*5113495bSYour Name 
dp_rx_defrag_cleanup(struct dp_txrx_peer * txrx_peer,unsigned int tid)1577*5113495bSYour Name void dp_rx_defrag_cleanup(struct dp_txrx_peer *txrx_peer, unsigned int tid)
1578*5113495bSYour Name {
1579*5113495bSYour Name 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1580*5113495bSYour Name 				txrx_peer->rx_tid[tid].array;
1581*5113495bSYour Name 
1582*5113495bSYour Name 	if (rx_reorder_array_elem) {
1583*5113495bSYour Name 		/* Free up nbufs */
1584*5113495bSYour Name 		dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1585*5113495bSYour Name 		rx_reorder_array_elem->head = NULL;
1586*5113495bSYour Name 		rx_reorder_array_elem->tail = NULL;
1587*5113495bSYour Name 	} else {
1588*5113495bSYour Name 		dp_info("Cleanup self peer %pK and TID %u",
1589*5113495bSYour Name 			txrx_peer, tid);
1590*5113495bSYour Name 	}
1591*5113495bSYour Name 
1592*5113495bSYour Name 	/* Free up saved ring descriptors */
1593*5113495bSYour Name 	dp_rx_clear_saved_desc_info(txrx_peer, tid);
1594*5113495bSYour Name 
1595*5113495bSYour Name 	txrx_peer->rx_tid[tid].defrag_timeout_ms = 0;
1596*5113495bSYour Name 	txrx_peer->rx_tid[tid].curr_frag_num = 0;
1597*5113495bSYour Name 	txrx_peer->rx_tid[tid].curr_seq_num = 0;
1598*5113495bSYour Name }
1599*5113495bSYour Name 
1600*5113495bSYour Name #ifdef DP_RX_DEFRAG_ADDR1_CHECK_WAR
1601*5113495bSYour Name #ifdef WLAN_FEATURE_11BE_MLO
1602*5113495bSYour Name /**
1603*5113495bSYour Name  * dp_rx_defrag_vdev_mac_addr_cmp() - function to check whether mac address
1604*5113495bSYour Name  *				matches VDEV mac
1605*5113495bSYour Name  * @vdev: dp_vdev object of the VDEV on which this data packet is received
1606*5113495bSYour Name  * @mac_addr: Address to compare
1607*5113495bSYour Name  *
1608*5113495bSYour Name  * Return: 1 if the mac matching,
1609*5113495bSYour Name  *         0 if this frame is not correctly destined to this VDEV/MLD
1610*5113495bSYour Name  */
dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev * vdev,uint8_t * mac_addr)1611*5113495bSYour Name static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1612*5113495bSYour Name 					  uint8_t *mac_addr)
1613*5113495bSYour Name {
1614*5113495bSYour Name 	return ((qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1615*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE) == 0) ||
1616*5113495bSYour Name 		(qdf_mem_cmp(mac_addr, &vdev->mld_mac_addr.raw[0],
1617*5113495bSYour Name 			     QDF_MAC_ADDR_SIZE) == 0));
1618*5113495bSYour Name }
1619*5113495bSYour Name 
1620*5113495bSYour Name #else
dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev * vdev,uint8_t * mac_addr)1621*5113495bSYour Name static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1622*5113495bSYour Name 					  uint8_t *mac_addr)
1623*5113495bSYour Name {
1624*5113495bSYour Name 	return (qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1625*5113495bSYour Name 			    QDF_MAC_ADDR_SIZE) == 0);
1626*5113495bSYour Name }
1627*5113495bSYour Name #endif
1628*5113495bSYour Name 
dp_rx_defrag_addr1_check(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr)1629*5113495bSYour Name static bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1630*5113495bSYour Name 				     struct dp_vdev *vdev,
1631*5113495bSYour Name 				     uint8_t *rx_tlv_hdr)
1632*5113495bSYour Name {
1633*5113495bSYour Name 	union dp_align_mac_addr mac_addr;
1634*5113495bSYour Name 
1635*5113495bSYour Name 	/* If address1 is not valid discard the fragment */
1636*5113495bSYour Name 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, rx_tlv_hdr,
1637*5113495bSYour Name 				  &mac_addr.raw[0]) != QDF_STATUS_SUCCESS) {
1638*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1639*5113495bSYour Name 		return false;
1640*5113495bSYour Name 	}
1641*5113495bSYour Name 
1642*5113495bSYour Name 	/* WAR suggested by HW team to avoid crashing incase of packet
1643*5113495bSYour Name 	 * corruption issue
1644*5113495bSYour Name 	 *
1645*5113495bSYour Name 	 * recipe is to compare VDEV mac or MLD mac address with ADDR1
1646*5113495bSYour Name 	 * in case of mismatch consider it as corrupted packet and do
1647*5113495bSYour Name 	 * not process further
1648*5113495bSYour Name 	 */
1649*5113495bSYour Name 	if (!dp_rx_defrag_vdev_mac_addr_cmp(vdev,
1650*5113495bSYour Name 					    &mac_addr.raw[0])) {
1651*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1652*5113495bSYour Name 		return false;
1653*5113495bSYour Name 	}
1654*5113495bSYour Name 
1655*5113495bSYour Name 	return true;
1656*5113495bSYour Name }
1657*5113495bSYour Name #else
dp_rx_defrag_addr1_check(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr)1658*5113495bSYour Name static inline bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1659*5113495bSYour Name 					    struct dp_vdev *vdev,
1660*5113495bSYour Name 					    uint8_t *rx_tlv_hdr)
1661*5113495bSYour Name {
1662*5113495bSYour Name 
1663*5113495bSYour Name 	return true;
1664*5113495bSYour Name }
1665*5113495bSYour Name #endif
1666*5113495bSYour Name 
dp_rx_defrag_add_last_frag(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,uint16_t tid,uint16_t rxseq,qdf_nbuf_t nbuf)1667*5113495bSYour Name QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
1668*5113495bSYour Name 				      struct dp_txrx_peer *txrx_peer,
1669*5113495bSYour Name 				      uint16_t tid,
1670*5113495bSYour Name 				      uint16_t rxseq, qdf_nbuf_t nbuf)
1671*5113495bSYour Name {
1672*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1673*5113495bSYour Name 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1674*5113495bSYour Name 	uint8_t all_frag_present;
1675*5113495bSYour Name 	uint32_t msdu_len;
1676*5113495bSYour Name 	QDF_STATUS status;
1677*5113495bSYour Name 
1678*5113495bSYour Name 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1679*5113495bSYour Name 
1680*5113495bSYour Name 	/*
1681*5113495bSYour Name 	 * HW may fill in unexpected peer_id in RX PKT TLV,
1682*5113495bSYour Name 	 * if this peer_id related peer is valid by coincidence,
1683*5113495bSYour Name 	 * but actually this peer won't do dp_peer_rx_init(like SAP vdev
1684*5113495bSYour Name 	 * self peer), then invalid access to rx_reorder_array_elem happened.
1685*5113495bSYour Name 	 */
1686*5113495bSYour Name 	if (!rx_reorder_array_elem) {
1687*5113495bSYour Name 		dp_verbose_debug(
1688*5113495bSYour Name 			"peer id:%d drop rx frame!",
1689*5113495bSYour Name 			txrx_peer->peer_id);
1690*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
1691*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
1692*5113495bSYour Name 		goto fail;
1693*5113495bSYour Name 	}
1694*5113495bSYour Name 
1695*5113495bSYour Name 	if (rx_reorder_array_elem->head &&
1696*5113495bSYour Name 	    rxseq != rx_tid->curr_seq_num) {
1697*5113495bSYour Name 		/* Drop stored fragments if out of sequence
1698*5113495bSYour Name 		 * fragment is received
1699*5113495bSYour Name 		 */
1700*5113495bSYour Name 		dp_rx_reorder_flush_frag(txrx_peer, tid);
1701*5113495bSYour Name 
1702*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1703*5113495bSYour Name 			  "%s: No list found for TID %d Seq# %d",
1704*5113495bSYour Name 				__func__, tid, rxseq);
1705*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
1706*5113495bSYour Name 		goto fail;
1707*5113495bSYour Name 	}
1708*5113495bSYour Name 
1709*5113495bSYour Name 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1710*5113495bSYour Name 						  qdf_nbuf_data(nbuf));
1711*5113495bSYour Name 
1712*5113495bSYour Name 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + soc->rx_pkt_tlv_size));
1713*5113495bSYour Name 
1714*5113495bSYour Name 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
1715*5113495bSYour Name 					      &rx_reorder_array_elem->head,
1716*5113495bSYour Name 			&rx_reorder_array_elem->tail, nbuf,
1717*5113495bSYour Name 			&all_frag_present);
1718*5113495bSYour Name 
1719*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1720*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1721*5113495bSYour Name 			  "%s Fragment insert failed", __func__);
1722*5113495bSYour Name 
1723*5113495bSYour Name 		goto fail;
1724*5113495bSYour Name 	}
1725*5113495bSYour Name 
1726*5113495bSYour Name 	if (soc->rx.flags.defrag_timeout_check)
1727*5113495bSYour Name 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
1728*5113495bSYour Name 
1729*5113495bSYour Name 	if (!all_frag_present) {
1730*5113495bSYour Name 		uint32_t now_ms =
1731*5113495bSYour Name 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1732*5113495bSYour Name 
1733*5113495bSYour Name 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
1734*5113495bSYour Name 			now_ms + soc->rx.defrag.timeout_ms;
1735*5113495bSYour Name 
1736*5113495bSYour Name 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
1737*5113495bSYour Name 
1738*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
1739*5113495bSYour Name 	}
1740*5113495bSYour Name 
1741*5113495bSYour Name 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
1742*5113495bSYour Name 			      rx_reorder_array_elem->tail);
1743*5113495bSYour Name 
1744*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
1745*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1746*5113495bSYour Name 			  "%s Fragment processing failed", __func__);
1747*5113495bSYour Name 
1748*5113495bSYour Name 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1749*5113495bSYour Name 		dp_rx_defrag_cleanup(txrx_peer, tid);
1750*5113495bSYour Name 
1751*5113495bSYour Name 		goto fail;
1752*5113495bSYour Name 	}
1753*5113495bSYour Name 
1754*5113495bSYour Name 	/* Re-inject the fragments back to REO for further processing */
1755*5113495bSYour Name 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
1756*5113495bSYour Name 					   rx_reorder_array_elem->head);
1757*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status)) {
1758*5113495bSYour Name 		rx_reorder_array_elem->head = NULL;
1759*5113495bSYour Name 		rx_reorder_array_elem->tail = NULL;
1760*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1761*5113495bSYour Name 			  "%s: Frag seq successfully reinjected",
1762*5113495bSYour Name 			__func__);
1763*5113495bSYour Name 	} else {
1764*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1765*5113495bSYour Name 			  "%s: Frag seq reinjection failed", __func__);
1766*5113495bSYour Name 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1767*5113495bSYour Name 	}
1768*5113495bSYour Name 
1769*5113495bSYour Name 	dp_rx_defrag_cleanup(txrx_peer, tid);
1770*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1771*5113495bSYour Name 
1772*5113495bSYour Name fail:
1773*5113495bSYour Name 	return QDF_STATUS_E_DEFRAG_ERROR;
1774*5113495bSYour Name }
1775*5113495bSYour Name 
1776*5113495bSYour Name #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
1777*5113495bSYour Name /**
1778*5113495bSYour Name  * dp_rx_defrag_save_info_from_ring_desc() - Save info from REO ring descriptor
1779*5113495bSYour Name  * @soc: Pointer to the SOC data structure
1780*5113495bSYour Name  * @ring_desc: Pointer to the dst ring descriptor
1781*5113495bSYour Name  * @rx_desc: Pointer to rx descriptor
1782*5113495bSYour Name  * @txrx_peer: Pointer to the peer
1783*5113495bSYour Name  * @tid: Transmit Identifier
1784*5113495bSYour Name  *
1785*5113495bSYour Name  * Return: None
1786*5113495bSYour Name  */
1787*5113495bSYour Name static QDF_STATUS
dp_rx_defrag_save_info_from_ring_desc(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc,struct dp_txrx_peer * txrx_peer,unsigned int tid)1788*5113495bSYour Name dp_rx_defrag_save_info_from_ring_desc(struct dp_soc *soc,
1789*5113495bSYour Name 				      hal_ring_desc_t ring_desc,
1790*5113495bSYour Name 				      struct dp_rx_desc *rx_desc,
1791*5113495bSYour Name 				      struct dp_txrx_peer *txrx_peer,
1792*5113495bSYour Name 				      unsigned int tid)
1793*5113495bSYour Name {
1794*5113495bSYour Name 	void *dst_ring_desc;
1795*5113495bSYour Name 
1796*5113495bSYour Name 	dst_ring_desc = qdf_mem_malloc(hal_srng_get_entrysize(soc->hal_soc,
1797*5113495bSYour Name 							      REO_DST));
1798*5113495bSYour Name 
1799*5113495bSYour Name 	if (!dst_ring_desc) {
1800*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1801*5113495bSYour Name 			"%s: Memory alloc failed !", __func__);
1802*5113495bSYour Name 		QDF_ASSERT(0);
1803*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
1804*5113495bSYour Name 	}
1805*5113495bSYour Name 
1806*5113495bSYour Name 	qdf_mem_copy(dst_ring_desc, ring_desc,
1807*5113495bSYour Name 		     hal_srng_get_entrysize(soc->hal_soc, REO_DST));
1808*5113495bSYour Name 
1809*5113495bSYour Name 	txrx_peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1810*5113495bSYour Name 	txrx_peer->rx_tid[tid].head_frag_desc = rx_desc;
1811*5113495bSYour Name 
1812*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1813*5113495bSYour Name }
1814*5113495bSYour Name 
1815*5113495bSYour Name /**
1816*5113495bSYour Name  * dp_rx_defrag_store_fragment() - Store incoming fragments
1817*5113495bSYour Name  * @soc: Pointer to the SOC data structure
1818*5113495bSYour Name  * @ring_desc: Pointer to the ring descriptor
1819*5113495bSYour Name  * @head:
1820*5113495bSYour Name  * @tail:
1821*5113495bSYour Name  * @mpdu_desc_info: MPDU descriptor info
1822*5113495bSYour Name  * @tid: Traffic Identifier
1823*5113495bSYour Name  * @rx_desc: Pointer to rx descriptor
1824*5113495bSYour Name  * @rx_bfs: Number of bfs consumed
1825*5113495bSYour Name  *
1826*5113495bSYour Name  * Return: QDF_STATUS
1827*5113495bSYour Name  */
1828*5113495bSYour Name static QDF_STATUS
dp_rx_defrag_store_fragment(struct dp_soc * soc,hal_ring_desc_t ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,struct hal_rx_mpdu_desc_info * mpdu_desc_info,unsigned int tid,struct dp_rx_desc * rx_desc,uint32_t * rx_bfs)1829*5113495bSYour Name dp_rx_defrag_store_fragment(struct dp_soc *soc,
1830*5113495bSYour Name 			    hal_ring_desc_t ring_desc,
1831*5113495bSYour Name 			    union dp_rx_desc_list_elem_t **head,
1832*5113495bSYour Name 			    union dp_rx_desc_list_elem_t **tail,
1833*5113495bSYour Name 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1834*5113495bSYour Name 			    unsigned int tid, struct dp_rx_desc *rx_desc,
1835*5113495bSYour Name 			    uint32_t *rx_bfs)
1836*5113495bSYour Name {
1837*5113495bSYour Name 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1838*5113495bSYour Name 	struct dp_pdev *pdev;
1839*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = NULL;
1840*5113495bSYour Name 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1841*5113495bSYour Name 	uint16_t peer_id;
1842*5113495bSYour Name 	uint8_t fragno, more_frag, all_frag_present = 0;
1843*5113495bSYour Name 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1844*5113495bSYour Name 	QDF_STATUS status;
1845*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid;
1846*5113495bSYour Name 	uint8_t mpdu_sequence_control_valid;
1847*5113495bSYour Name 	uint8_t mpdu_frame_control_valid;
1848*5113495bSYour Name 	qdf_nbuf_t frag = rx_desc->nbuf;
1849*5113495bSYour Name 	uint32_t msdu_len;
1850*5113495bSYour Name 
1851*5113495bSYour Name 	if (qdf_nbuf_len(frag) > 0) {
1852*5113495bSYour Name 		dp_info("Dropping unexpected packet with skb_len: %d "
1853*5113495bSYour Name 			"data len: %d cookie: %d",
1854*5113495bSYour Name 			(uint32_t)qdf_nbuf_len(frag), frag->data_len,
1855*5113495bSYour Name 			rx_desc->cookie);
1856*5113495bSYour Name 		DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1);
1857*5113495bSYour Name 		goto discard_frag;
1858*5113495bSYour Name 	}
1859*5113495bSYour Name 
1860*5113495bSYour Name 	if (dp_rx_buffer_pool_refill(soc, frag, rx_desc->pool_id)) {
1861*5113495bSYour Name 		/* fragment queued back to the pool, free the link desc */
1862*5113495bSYour Name 		goto err_free_desc;
1863*5113495bSYour Name 	}
1864*5113495bSYour Name 
1865*5113495bSYour Name 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1866*5113495bSYour Name 						  rx_desc->rx_buf_start);
1867*5113495bSYour Name 
1868*5113495bSYour Name 	qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size));
1869*5113495bSYour Name 	qdf_nbuf_append_ext_list(frag, NULL, 0);
1870*5113495bSYour Name 
1871*5113495bSYour Name 	/* Check if the packet is from a valid peer */
1872*5113495bSYour Name 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1873*5113495bSYour Name 					       mpdu_desc_info->peer_meta_data);
1874*5113495bSYour Name 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle,
1875*5113495bSYour Name 					       DP_MOD_ID_RX_ERR);
1876*5113495bSYour Name 
1877*5113495bSYour Name 	if (!txrx_peer) {
1878*5113495bSYour Name 		/* We should not receive anything from unknown peer
1879*5113495bSYour Name 		 * however, that might happen while we are in the monitor mode.
1880*5113495bSYour Name 		 * We don't need to handle that here
1881*5113495bSYour Name 		 */
1882*5113495bSYour Name 		dp_info_rl("Unknown peer with peer_id %d, dropping fragment",
1883*5113495bSYour Name 			   peer_id);
1884*5113495bSYour Name 		DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1);
1885*5113495bSYour Name 		goto discard_frag;
1886*5113495bSYour Name 	}
1887*5113495bSYour Name 
1888*5113495bSYour Name 	if (tid >= DP_MAX_TIDS) {
1889*5113495bSYour Name 		dp_info("TID out of bounds: %d", tid);
1890*5113495bSYour Name 		qdf_assert_always(0);
1891*5113495bSYour Name 		goto discard_frag;
1892*5113495bSYour Name 	}
1893*5113495bSYour Name 
1894*5113495bSYour Name 	if (!dp_rx_defrag_addr1_check(soc, txrx_peer->vdev,
1895*5113495bSYour Name 				      rx_desc->rx_buf_start)) {
1896*5113495bSYour Name 		dp_info("Invalid address 1");
1897*5113495bSYour Name 		goto discard_frag;
1898*5113495bSYour Name 	}
1899*5113495bSYour Name 
1900*5113495bSYour Name 	mpdu_sequence_control_valid =
1901*5113495bSYour Name 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
1902*5113495bSYour Name 						       rx_desc->rx_buf_start);
1903*5113495bSYour Name 
1904*5113495bSYour Name 	/* Invalid MPDU sequence control field, MPDU is of no use */
1905*5113495bSYour Name 	if (!mpdu_sequence_control_valid) {
1906*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1907*5113495bSYour Name 			"Invalid MPDU seq control field, dropping MPDU");
1908*5113495bSYour Name 
1909*5113495bSYour Name 		qdf_assert(0);
1910*5113495bSYour Name 		goto discard_frag;
1911*5113495bSYour Name 	}
1912*5113495bSYour Name 
1913*5113495bSYour Name 	mpdu_frame_control_valid =
1914*5113495bSYour Name 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1915*5113495bSYour Name 						    rx_desc->rx_buf_start);
1916*5113495bSYour Name 
1917*5113495bSYour Name 	/* Invalid frame control field */
1918*5113495bSYour Name 	if (!mpdu_frame_control_valid) {
1919*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1920*5113495bSYour Name 			"Invalid frame control field, dropping MPDU");
1921*5113495bSYour Name 
1922*5113495bSYour Name 		qdf_assert(0);
1923*5113495bSYour Name 		goto discard_frag;
1924*5113495bSYour Name 	}
1925*5113495bSYour Name 
1926*5113495bSYour Name 	/* Current mpdu sequence */
1927*5113495bSYour Name 	more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_desc->rx_buf_start);
1928*5113495bSYour Name 
1929*5113495bSYour Name 	/* HW does not populate the fragment number as of now
1930*5113495bSYour Name 	 * need to get from the 802.11 header
1931*5113495bSYour Name 	 */
1932*5113495bSYour Name 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
1933*5113495bSYour Name 
1934*5113495bSYour Name 	pdev = txrx_peer->vdev->pdev;
1935*5113495bSYour Name 	rx_tid = &txrx_peer->rx_tid[tid];
1936*5113495bSYour Name 
1937*5113495bSYour Name 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, frag,
1938*5113495bSYour Name 			      QDF_TX_RX_STATUS_OK, false);
1939*5113495bSYour Name 
1940*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->defrag_tid_lock);
1941*5113495bSYour Name 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1942*5113495bSYour Name 	if (!rx_reorder_array_elem) {
1943*5113495bSYour Name 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
1944*5113495bSYour Name 			  txrx_peer);
1945*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1946*5113495bSYour Name 		goto discard_frag;
1947*5113495bSYour Name 	}
1948*5113495bSYour Name 
1949*5113495bSYour Name 	/*
1950*5113495bSYour Name 	 * !more_frag: no more fragments to be delivered
1951*5113495bSYour Name 	 * !frag_no: packet is not fragmented
1952*5113495bSYour Name 	 * !rx_reorder_array_elem->head: no saved fragments so far
1953*5113495bSYour Name 	 */
1954*5113495bSYour Name 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1955*5113495bSYour Name 		/* We should not get into this situation here.
1956*5113495bSYour Name 		 * It means an unfragmented packet with fragment flag
1957*5113495bSYour Name 		 * is delivered over the REO exception ring.
1958*5113495bSYour Name 		 * Typically it follows normal rx path.
1959*5113495bSYour Name 		 */
1960*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1961*5113495bSYour Name 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1962*5113495bSYour Name 
1963*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1964*5113495bSYour Name 		qdf_assert(0);
1965*5113495bSYour Name 		goto discard_frag;
1966*5113495bSYour Name 	}
1967*5113495bSYour Name 
1968*5113495bSYour Name 	/* Check if the fragment is for the same sequence or a different one */
1969*5113495bSYour Name 	dp_debug("rx_tid %d", tid);
1970*5113495bSYour Name 	if (rx_reorder_array_elem->head) {
1971*5113495bSYour Name 		dp_debug("rxseq %d", rxseq);
1972*5113495bSYour Name 		if (rxseq != rx_tid->curr_seq_num) {
1973*5113495bSYour Name 
1974*5113495bSYour Name 			dp_debug("mismatch cur_seq %d rxseq %d",
1975*5113495bSYour Name 				 rx_tid->curr_seq_num, rxseq);
1976*5113495bSYour Name 			/* Drop stored fragments if out of sequence
1977*5113495bSYour Name 			 * fragment is received
1978*5113495bSYour Name 			 */
1979*5113495bSYour Name 			dp_rx_reorder_flush_frag(txrx_peer, tid);
1980*5113495bSYour Name 
1981*5113495bSYour Name 			DP_STATS_INC(soc, rx.rx_frag_oor, 1);
1982*5113495bSYour Name 
1983*5113495bSYour Name 			dp_debug("cur rxseq %d", rxseq);
1984*5113495bSYour Name 			/*
1985*5113495bSYour Name 			 * The sequence number for this fragment becomes the
1986*5113495bSYour Name 			 * new sequence number to be processed
1987*5113495bSYour Name 			 */
1988*5113495bSYour Name 			rx_tid->curr_seq_num = rxseq;
1989*5113495bSYour Name 		}
1990*5113495bSYour Name 	} else {
1991*5113495bSYour Name 		/* Check if we are processing first fragment if it is
1992*5113495bSYour Name 		 * not first fragment discard fragment.
1993*5113495bSYour Name 		 */
1994*5113495bSYour Name 		if (fragno) {
1995*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1996*5113495bSYour Name 			goto discard_frag;
1997*5113495bSYour Name 		}
1998*5113495bSYour Name 		dp_debug("cur rxseq %d", rxseq);
1999*5113495bSYour Name 		/* Start of a new sequence */
2000*5113495bSYour Name 		dp_rx_defrag_cleanup(txrx_peer, tid);
2001*5113495bSYour Name 		rx_tid->curr_seq_num = rxseq;
2002*5113495bSYour Name 		/* store PN number also */
2003*5113495bSYour Name 	}
2004*5113495bSYour Name 
2005*5113495bSYour Name 	/*
2006*5113495bSYour Name 	 * If the earlier sequence was dropped, this will be the fresh start.
2007*5113495bSYour Name 	 * Else, continue with next fragment in a given sequence
2008*5113495bSYour Name 	 */
2009*5113495bSYour Name 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
2010*5113495bSYour Name 					      &rx_reorder_array_elem->head,
2011*5113495bSYour Name 					      &rx_reorder_array_elem->tail,
2012*5113495bSYour Name 					      frag, &all_frag_present);
2013*5113495bSYour Name 
2014*5113495bSYour Name 	/*
2015*5113495bSYour Name 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
2016*5113495bSYour Name 	 * packet sequence has more than 6 MSDUs for some reason, we will
2017*5113495bSYour Name 	 * have to use the next MSDU link descriptor and chain them together
2018*5113495bSYour Name 	 * before reinjection.
2019*5113495bSYour Name 	 * ring_desc is validated in dp_rx_err_process.
2020*5113495bSYour Name 	 */
2021*5113495bSYour Name 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
2022*5113495bSYour Name 			(rx_reorder_array_elem->head == frag)) {
2023*5113495bSYour Name 
2024*5113495bSYour Name 		status = dp_rx_defrag_save_info_from_ring_desc(soc, ring_desc,
2025*5113495bSYour Name 							       rx_desc,
2026*5113495bSYour Name 							       txrx_peer, tid);
2027*5113495bSYour Name 
2028*5113495bSYour Name 		if (status != QDF_STATUS_SUCCESS) {
2029*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2030*5113495bSYour Name 				"%s: Unable to store ring desc !", __func__);
2031*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2032*5113495bSYour Name 			goto discard_frag;
2033*5113495bSYour Name 		}
2034*5113495bSYour Name 	} else {
2035*5113495bSYour Name 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2036*5113495bSYour Name 		(*rx_bfs)++;
2037*5113495bSYour Name 
2038*5113495bSYour Name 		/* Return the non-head link desc */
2039*5113495bSYour Name 		if (dp_rx_link_desc_return(soc, ring_desc,
2040*5113495bSYour Name 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2041*5113495bSYour Name 		    QDF_STATUS_SUCCESS)
2042*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2043*5113495bSYour Name 				  "%s: Failed to return link desc", __func__);
2044*5113495bSYour Name 
2045*5113495bSYour Name 	}
2046*5113495bSYour Name 
2047*5113495bSYour Name 	if (pdev->soc->rx.flags.defrag_timeout_check)
2048*5113495bSYour Name 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
2049*5113495bSYour Name 
2050*5113495bSYour Name 	/* Yet to receive more fragments for this sequence number */
2051*5113495bSYour Name 	if (!all_frag_present) {
2052*5113495bSYour Name 		uint32_t now_ms =
2053*5113495bSYour Name 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2054*5113495bSYour Name 
2055*5113495bSYour Name 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
2056*5113495bSYour Name 			now_ms + pdev->soc->rx.defrag.timeout_ms;
2057*5113495bSYour Name 
2058*5113495bSYour Name 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
2059*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2060*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2061*5113495bSYour Name 
2062*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
2063*5113495bSYour Name 	}
2064*5113495bSYour Name 
2065*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2066*5113495bSYour Name 		  "All fragments received for sequence: %d", rxseq);
2067*5113495bSYour Name 
2068*5113495bSYour Name 	/* Process the fragments */
2069*5113495bSYour Name 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
2070*5113495bSYour Name 			      rx_reorder_array_elem->tail);
2071*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(status)) {
2072*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2073*5113495bSYour Name 			"Fragment processing failed");
2074*5113495bSYour Name 
2075*5113495bSYour Name 		dp_rx_add_to_free_desc_list(head, tail,
2076*5113495bSYour Name 				txrx_peer->rx_tid[tid].head_frag_desc);
2077*5113495bSYour Name 		(*rx_bfs)++;
2078*5113495bSYour Name 
2079*5113495bSYour Name 		if (dp_rx_link_desc_return(soc,
2080*5113495bSYour Name 					txrx_peer->rx_tid[tid].dst_ring_desc,
2081*5113495bSYour Name 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2082*5113495bSYour Name 				QDF_STATUS_SUCCESS)
2083*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2084*5113495bSYour Name 					"%s: Failed to return link desc",
2085*5113495bSYour Name 					__func__);
2086*5113495bSYour Name 		dp_rx_defrag_cleanup(txrx_peer, tid);
2087*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2088*5113495bSYour Name 		goto end;
2089*5113495bSYour Name 	}
2090*5113495bSYour Name 
2091*5113495bSYour Name 	/* Re-inject the fragments back to REO for further processing */
2092*5113495bSYour Name 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
2093*5113495bSYour Name 					   rx_reorder_array_elem->head);
2094*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status)) {
2095*5113495bSYour Name 		rx_reorder_array_elem->head = NULL;
2096*5113495bSYour Name 		rx_reorder_array_elem->tail = NULL;
2097*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2098*5113495bSYour Name 			  "Fragmented sequence successfully reinjected");
2099*5113495bSYour Name 	} else {
2100*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2101*5113495bSYour Name 		"Fragmented sequence reinjection failed");
2102*5113495bSYour Name 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2103*5113495bSYour Name 	}
2104*5113495bSYour Name 
2105*5113495bSYour Name 	dp_rx_defrag_cleanup(txrx_peer, tid);
2106*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2107*5113495bSYour Name 
2108*5113495bSYour Name 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2109*5113495bSYour Name 
2110*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2111*5113495bSYour Name 
2112*5113495bSYour Name discard_frag:
2113*5113495bSYour Name 	dp_rx_nbuf_free(frag);
2114*5113495bSYour Name err_free_desc:
2115*5113495bSYour Name 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2116*5113495bSYour Name 	if (dp_rx_link_desc_return(soc, ring_desc,
2117*5113495bSYour Name 				   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2118*5113495bSYour Name 	    QDF_STATUS_SUCCESS)
2119*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2120*5113495bSYour Name 			  "%s: Failed to return link desc", __func__);
2121*5113495bSYour Name 	(*rx_bfs)++;
2122*5113495bSYour Name 
2123*5113495bSYour Name end:
2124*5113495bSYour Name 	if (txrx_peer)
2125*5113495bSYour Name 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2126*5113495bSYour Name 
2127*5113495bSYour Name 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
2128*5113495bSYour Name 	return QDF_STATUS_E_DEFRAG_ERROR;
2129*5113495bSYour Name }
2130*5113495bSYour Name 
dp_rx_frag_handle(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,struct dp_rx_desc * rx_desc,uint8_t * mac_id,uint32_t quota)2131*5113495bSYour Name uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
2132*5113495bSYour Name 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
2133*5113495bSYour Name 			   struct dp_rx_desc *rx_desc,
2134*5113495bSYour Name 			   uint8_t *mac_id,
2135*5113495bSYour Name 			   uint32_t quota)
2136*5113495bSYour Name {
2137*5113495bSYour Name 	uint32_t rx_bufs_used = 0;
2138*5113495bSYour Name 	qdf_nbuf_t msdu = NULL;
2139*5113495bSYour Name 	uint32_t tid;
2140*5113495bSYour Name 	uint32_t rx_bfs = 0;
2141*5113495bSYour Name 	struct dp_pdev *pdev;
2142*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2143*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
2144*5113495bSYour Name 
2145*5113495bSYour Name 	qdf_assert(soc);
2146*5113495bSYour Name 	qdf_assert(mpdu_desc_info);
2147*5113495bSYour Name 	qdf_assert(rx_desc);
2148*5113495bSYour Name 
2149*5113495bSYour Name 	dp_debug("Number of MSDUs to process, num_msdus: %d",
2150*5113495bSYour Name 		 mpdu_desc_info->msdu_count);
2151*5113495bSYour Name 
2152*5113495bSYour Name 
2153*5113495bSYour Name 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
2154*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2155*5113495bSYour Name 			"Not sufficient MSDUs to process");
2156*5113495bSYour Name 		return rx_bufs_used;
2157*5113495bSYour Name 	}
2158*5113495bSYour Name 
2159*5113495bSYour Name 	/* all buffers in MSDU link belong to same pdev */
2160*5113495bSYour Name 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2161*5113495bSYour Name 	if (!pdev) {
2162*5113495bSYour Name 		dp_nofl_debug("pdev is null for pool_id = %d",
2163*5113495bSYour Name 			      rx_desc->pool_id);
2164*5113495bSYour Name 		return rx_bufs_used;
2165*5113495bSYour Name 	}
2166*5113495bSYour Name 
2167*5113495bSYour Name 	*mac_id = rx_desc->pool_id;
2168*5113495bSYour Name 
2169*5113495bSYour Name 	msdu = rx_desc->nbuf;
2170*5113495bSYour Name 
2171*5113495bSYour Name 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2172*5113495bSYour Name 
2173*5113495bSYour Name 	if (rx_desc->unmapped)
2174*5113495bSYour Name 		return rx_bufs_used;
2175*5113495bSYour Name 
2176*5113495bSYour Name 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2177*5113495bSYour Name 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2178*5113495bSYour Name 	rx_desc->unmapped = 1;
2179*5113495bSYour Name 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2180*5113495bSYour Name 
2181*5113495bSYour Name 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
2182*5113495bSYour Name 
2183*5113495bSYour Name 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start);
2184*5113495bSYour Name 
2185*5113495bSYour Name 	/* Process fragment-by-fragment */
2186*5113495bSYour Name 	status = dp_rx_defrag_store_fragment(soc, ring_desc,
2187*5113495bSYour Name 					     &pdev->free_list_head,
2188*5113495bSYour Name 					     &pdev->free_list_tail,
2189*5113495bSYour Name 					     mpdu_desc_info,
2190*5113495bSYour Name 					     tid, rx_desc, &rx_bfs);
2191*5113495bSYour Name 
2192*5113495bSYour Name 	if (rx_bfs)
2193*5113495bSYour Name 		rx_bufs_used += rx_bfs;
2194*5113495bSYour Name 
2195*5113495bSYour Name 	if (!QDF_IS_STATUS_SUCCESS(status))
2196*5113495bSYour Name 		dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
2197*5113495bSYour Name 			   mpdu_desc_info->mpdu_seq,
2198*5113495bSYour Name 			   mpdu_desc_info->msdu_count,
2199*5113495bSYour Name 			   mpdu_desc_info->mpdu_flags);
2200*5113495bSYour Name 
2201*5113495bSYour Name 	return rx_bufs_used;
2202*5113495bSYour Name }
2203*5113495bSYour Name 
2204*5113495bSYour Name #endif /* WLAN_SOFTUMAC_SUPPORT */
2205