xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_defrag.c (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #ifndef RX_DEFRAG_DO_NOT_REINJECT
22 #ifndef DP_BE_WAR
23 #include "li/hal_li_rx.h"
24 #endif
25 #endif
26 #include "dp_types.h"
27 #include "dp_rx.h"
28 #include "dp_peer.h"
29 #include "hal_api.h"
30 #include "qdf_trace.h"
31 #include "qdf_nbuf.h"
32 #include "dp_internal.h"
33 #include "dp_rx_defrag.h"
34 #include <enet.h>	/* LLC_SNAP_HDR_LEN */
35 #include "dp_rx_defrag.h"
36 #include "dp_ipa.h"
37 #include "dp_rx_buffer_pool.h"
38 
39 const struct dp_rx_defrag_cipher dp_f_ccmp = {
40 	"AES-CCM",
41 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
42 	IEEE80211_WEP_MICLEN,
43 	0,
44 };
45 
46 const struct dp_rx_defrag_cipher dp_f_tkip = {
47 	"TKIP",
48 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
49 	IEEE80211_WEP_CRCLEN,
50 	IEEE80211_WEP_MICLEN,
51 };
52 
53 const struct dp_rx_defrag_cipher dp_f_wep = {
54 	"WEP",
55 	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
56 	IEEE80211_WEP_CRCLEN,
57 	0,
58 };
59 
60 /*
61  * The header and mic length are same for both
62  * GCMP-128 and GCMP-256.
63  */
64 const struct dp_rx_defrag_cipher dp_f_gcmp = {
65 	"AES-GCMP",
66 	WLAN_IEEE80211_GCMP_HEADERLEN,
67 	WLAN_IEEE80211_GCMP_MICLEN,
68 	WLAN_IEEE80211_GCMP_MICLEN,
69 };
70 
71 /**
72  * dp_rx_defrag_frames_free() - Free fragment chain
73  * @frames: Fragment chain
74  *
75  * Iterates through the fragment chain and frees them
76  * Return: None
77  */
dp_rx_defrag_frames_free(qdf_nbuf_t frames)78 static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
79 {
80 	qdf_nbuf_t next, frag = frames;
81 
82 	while (frag) {
83 		next = qdf_nbuf_next(frag);
84 		dp_rx_nbuf_free(frag);
85 		frag = next;
86 	}
87 }
88 
89 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
90 /**
91  * dp_rx_clear_saved_desc_info() - Clears descriptor info
92  * @txrx_peer: Pointer to the peer data structure
93  * @tid: Transmit ID (TID)
94  *
95  * Saves MPDU descriptor info and MSDU link pointer from REO
96  * ring descriptor. The cache is created per peer, per TID
97  *
98  * Return: None
99  */
dp_rx_clear_saved_desc_info(struct dp_txrx_peer * txrx_peer,unsigned int tid)100 static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
101 					unsigned int tid)
102 {
103 	if (txrx_peer->rx_tid[tid].dst_ring_desc)
104 		qdf_mem_free(txrx_peer->rx_tid[tid].dst_ring_desc);
105 
106 	txrx_peer->rx_tid[tid].dst_ring_desc = NULL;
107 	txrx_peer->rx_tid[tid].head_frag_desc = NULL;
108 }
109 
dp_rx_return_head_frag_desc(struct dp_txrx_peer * txrx_peer,unsigned int tid)110 static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
111 					unsigned int tid)
112 {
113 	struct dp_soc *soc;
114 	struct dp_pdev *pdev;
115 	struct dp_srng *dp_rxdma_srng;
116 	struct rx_desc_pool *rx_desc_pool;
117 	union dp_rx_desc_list_elem_t *head = NULL;
118 	union dp_rx_desc_list_elem_t *tail = NULL;
119 	uint8_t pool_id;
120 
121 	pdev = txrx_peer->vdev->pdev;
122 	soc = pdev->soc;
123 
124 	if (txrx_peer->rx_tid[tid].head_frag_desc) {
125 		pool_id = txrx_peer->rx_tid[tid].head_frag_desc->pool_id;
126 		dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
127 		rx_desc_pool = &soc->rx_desc_buf[pool_id];
128 
129 		dp_rx_add_to_free_desc_list(&head, &tail,
130 					    txrx_peer->rx_tid[tid].head_frag_desc);
131 		dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
132 					1, &head, &tail, false);
133 	}
134 
135 	if (txrx_peer->rx_tid[tid].dst_ring_desc) {
136 		if (dp_rx_link_desc_return(soc,
137 					   txrx_peer->rx_tid[tid].dst_ring_desc,
138 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
139 		    QDF_STATUS_SUCCESS)
140 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
141 				  "%s: Failed to return link desc", __func__);
142 	}
143 }
144 #else
145 
dp_rx_clear_saved_desc_info(struct dp_txrx_peer * txrx_peer,unsigned int tid)146 static void dp_rx_clear_saved_desc_info(struct dp_txrx_peer *txrx_peer,
147 					unsigned int tid)
148 {
149 }
150 
dp_rx_return_head_frag_desc(struct dp_txrx_peer * txrx_peer,unsigned int tid)151 static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
152 					unsigned int tid)
153 {
154 }
155 #endif /* WLAN_SOFTUMAC_SUPPORT */
156 
dp_rx_reorder_flush_frag(struct dp_txrx_peer * txrx_peer,unsigned int tid)157 void dp_rx_reorder_flush_frag(struct dp_txrx_peer *txrx_peer,
158 			      unsigned int tid)
159 {
160 	dp_info_rl("Flushing TID %d", tid);
161 
162 	if (!txrx_peer) {
163 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
164 					"%s: NULL peer", __func__);
165 		return;
166 	}
167 
168 	dp_rx_return_head_frag_desc(txrx_peer, tid);
169 	dp_rx_defrag_cleanup(txrx_peer, tid);
170 }
171 
dp_rx_defrag_waitlist_flush(struct dp_soc * soc)172 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
173 {
174 	struct dp_rx_tid_defrag *waitlist_elem = NULL;
175 	struct dp_rx_tid_defrag *tmp;
176 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
177 	TAILQ_HEAD(, dp_rx_tid_defrag) temp_list;
178 	dp_txrx_ref_handle txrx_ref_handle = NULL;
179 
180 	TAILQ_INIT(&temp_list);
181 
182 	dp_debug("Current time  %u", now_ms);
183 
184 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
185 	TAILQ_FOREACH_SAFE(waitlist_elem, &soc->rx.defrag.waitlist,
186 			   defrag_waitlist_elem, tmp) {
187 		uint32_t tid;
188 
189 		if (waitlist_elem->defrag_timeout_ms > now_ms)
190 			break;
191 
192 		tid = waitlist_elem->tid;
193 		if (tid >= DP_MAX_TIDS) {
194 			qdf_assert(0);
195 			continue;
196 		}
197 
198 		TAILQ_REMOVE(&soc->rx.defrag.waitlist, waitlist_elem,
199 			     defrag_waitlist_elem);
200 		DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
201 
202 		/* Move to temp list and clean-up later */
203 		TAILQ_INSERT_TAIL(&temp_list, waitlist_elem,
204 				  defrag_waitlist_elem);
205 	}
206 	if (waitlist_elem) {
207 		soc->rx.defrag.next_flush_ms =
208 			waitlist_elem->defrag_timeout_ms;
209 	} else {
210 		soc->rx.defrag.next_flush_ms =
211 			now_ms + soc->rx.defrag.timeout_ms;
212 	}
213 
214 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
215 
216 	TAILQ_FOREACH_SAFE(waitlist_elem, &temp_list,
217 			   defrag_waitlist_elem, tmp) {
218 		struct dp_txrx_peer *txrx_peer, *temp_peer = NULL;
219 
220 		qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
221 		TAILQ_REMOVE(&temp_list, waitlist_elem,
222 			     defrag_waitlist_elem);
223 		/* get address of current peer */
224 		txrx_peer = waitlist_elem->defrag_peer;
225 		qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
226 
227 		temp_peer = dp_txrx_peer_get_ref_by_id(soc, txrx_peer->peer_id,
228 						       &txrx_ref_handle,
229 						       DP_MOD_ID_RX_ERR);
230 		if (temp_peer == txrx_peer) {
231 			qdf_spin_lock_bh(&waitlist_elem->defrag_tid_lock);
232 			dp_rx_reorder_flush_frag(txrx_peer, waitlist_elem->tid);
233 			qdf_spin_unlock_bh(&waitlist_elem->defrag_tid_lock);
234 		}
235 
236 		if (temp_peer)
237 			dp_txrx_peer_unref_delete(txrx_ref_handle,
238 						  DP_MOD_ID_RX_ERR);
239 
240 	}
241 }
242 
dp_rx_defrag_waitlist_add(struct dp_txrx_peer * txrx_peer,unsigned int tid)243 void dp_rx_defrag_waitlist_add(struct dp_txrx_peer *txrx_peer,
244 			       unsigned int tid)
245 {
246 	struct dp_soc *psoc = txrx_peer->vdev->pdev->soc;
247 	struct dp_rx_tid_defrag *waitlist_elem = &txrx_peer->rx_tid[tid];
248 
249 	dp_debug("Adding TID %u to waitlist for peer %pK with peer_id = %d ",
250 		 tid, txrx_peer, txrx_peer->peer_id);
251 
252 	/* TODO: use LIST macros instead of TAIL macros */
253 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
254 	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
255 		psoc->rx.defrag.next_flush_ms =
256 			waitlist_elem->defrag_timeout_ms;
257 
258 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, waitlist_elem,
259 			  defrag_waitlist_elem);
260 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
261 	qdf_spin_unlock_bh(&psoc->rx.defrag.defrag_lock);
262 }
263 
dp_rx_defrag_waitlist_remove(struct dp_txrx_peer * txrx_peer,unsigned int tid)264 void dp_rx_defrag_waitlist_remove(struct dp_txrx_peer *txrx_peer,
265 				  unsigned int tid)
266 {
267 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
268 	struct dp_soc *soc = pdev->soc;
269 	struct dp_rx_tid_defrag *waitlist_elm;
270 	struct dp_rx_tid_defrag *tmp;
271 
272 	dp_debug("Removing TID %u to waitlist for peer %pK peer_id = %d ",
273 		 tid, txrx_peer, txrx_peer->peer_id);
274 
275 	if (tid >= DP_MAX_TIDS) {
276 		dp_err("TID out of bounds: %d", tid);
277 		qdf_assert_always(0);
278 	}
279 
280 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
281 	TAILQ_FOREACH_SAFE(waitlist_elm, &soc->rx.defrag.waitlist,
282 			   defrag_waitlist_elem, tmp) {
283 		struct dp_txrx_peer *peer_on_waitlist;
284 
285 		/* get address of current peer */
286 		peer_on_waitlist = waitlist_elm->defrag_peer;
287 
288 		/* Ensure it is TID for same peer */
289 		if (peer_on_waitlist == txrx_peer && waitlist_elm->tid == tid) {
290 			TAILQ_REMOVE(&soc->rx.defrag.waitlist,
291 				     waitlist_elm, defrag_waitlist_elem);
292 			DP_STATS_DEC(soc, rx.rx_frag_wait, 1);
293 		}
294 	}
295 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
296 }
297 
298 QDF_STATUS
dp_rx_defrag_fraglist_insert(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t * head_addr,qdf_nbuf_t * tail_addr,qdf_nbuf_t frag,uint8_t * all_frag_present)299 dp_rx_defrag_fraglist_insert(struct dp_txrx_peer *txrx_peer, unsigned int tid,
300 			     qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr,
301 			     qdf_nbuf_t frag, uint8_t *all_frag_present)
302 {
303 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
304 	qdf_nbuf_t next;
305 	qdf_nbuf_t prev = NULL;
306 	qdf_nbuf_t cur;
307 	uint16_t head_fragno, cur_fragno, next_fragno;
308 	uint8_t last_morefrag = 1, count = 0;
309 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
310 	uint8_t *rx_desc_info;
311 
312 	qdf_assert(frag);
313 	qdf_assert(head_addr);
314 	qdf_assert(tail_addr);
315 
316 	*all_frag_present = 0;
317 	rx_desc_info = qdf_nbuf_data(frag);
318 	cur_fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc_info);
319 
320 	dp_debug("cur_fragno %d", cur_fragno);
321 	/* If this is the first fragment */
322 	if (!(*head_addr)) {
323 		*head_addr = *tail_addr = frag;
324 		qdf_nbuf_set_next(*tail_addr, NULL);
325 		rx_tid->curr_frag_num = cur_fragno;
326 
327 		goto insert_done;
328 	}
329 
330 	/* In sequence fragment */
331 	if (cur_fragno > rx_tid->curr_frag_num) {
332 		qdf_nbuf_set_next(*tail_addr, frag);
333 		*tail_addr = frag;
334 		qdf_nbuf_set_next(*tail_addr, NULL);
335 		rx_tid->curr_frag_num = cur_fragno;
336 	} else {
337 		/* Out of sequence fragment */
338 		cur = *head_addr;
339 		rx_desc_info = qdf_nbuf_data(cur);
340 		head_fragno = dp_rx_frag_get_mpdu_frag_number(soc,
341 							      rx_desc_info);
342 
343 		if (cur_fragno == head_fragno) {
344 			dp_rx_nbuf_free(frag);
345 			goto insert_fail;
346 		} else if (head_fragno > cur_fragno) {
347 			qdf_nbuf_set_next(frag, cur);
348 			cur = frag;
349 			*head_addr = frag; /* head pointer to be updated */
350 		} else {
351 			while ((cur_fragno > head_fragno) && cur) {
352 				prev = cur;
353 				cur = qdf_nbuf_next(cur);
354 				if (cur) {
355 					rx_desc_info = qdf_nbuf_data(cur);
356 					head_fragno =
357 						dp_rx_frag_get_mpdu_frag_number(
358 								soc,
359 								rx_desc_info);
360 				}
361 			}
362 
363 			if (cur_fragno == head_fragno) {
364 				dp_rx_nbuf_free(frag);
365 				goto insert_fail;
366 			}
367 
368 			qdf_nbuf_set_next(prev, frag);
369 			qdf_nbuf_set_next(frag, cur);
370 		}
371 	}
372 
373 	next = qdf_nbuf_next(*head_addr);
374 
375 	rx_desc_info = qdf_nbuf_data(*tail_addr);
376 	last_morefrag = dp_rx_frag_get_more_frag_bit(soc, rx_desc_info);
377 
378 	/* TODO: optimize the loop */
379 	if (!last_morefrag) {
380 		/* Check if all fragments are present */
381 		do {
382 			rx_desc_info = qdf_nbuf_data(next);
383 			next_fragno =
384 				dp_rx_frag_get_mpdu_frag_number(soc,
385 								rx_desc_info);
386 			count++;
387 
388 			if (next_fragno != count)
389 				break;
390 
391 			next = qdf_nbuf_next(next);
392 		} while (next);
393 
394 		if (!next) {
395 			*all_frag_present = 1;
396 			return QDF_STATUS_SUCCESS;
397 		} else {
398 			/* revisit */
399 		}
400 	}
401 
402 insert_done:
403 	return QDF_STATUS_SUCCESS;
404 
405 insert_fail:
406 	return QDF_STATUS_E_FAILURE;
407 }
408 
409 
410 /**
411  * dp_rx_defrag_tkip_decap() - decap tkip encrypted fragment
412  * @soc: DP SOC
413  * @msdu: Pointer to the fragment
414  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
415  *
416  * decap tkip encrypted fragment
417  *
418  * Return: QDF_STATUS
419  */
420 static QDF_STATUS
dp_rx_defrag_tkip_decap(struct dp_soc * soc,qdf_nbuf_t msdu,uint16_t hdrlen)421 dp_rx_defrag_tkip_decap(struct dp_soc *soc,
422 			qdf_nbuf_t msdu, uint16_t hdrlen)
423 {
424 	uint8_t *ivp, *orig_hdr;
425 	int rx_desc_len = soc->rx_pkt_tlv_size;
426 
427 	/* start of 802.11 header info */
428 	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
429 
430 	/* TKIP header is located post 802.11 header */
431 	ivp = orig_hdr + hdrlen;
432 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
433 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
434 			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
435 		return QDF_STATUS_E_DEFRAG_ERROR;
436 	}
437 
438 	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
439 
440 	return QDF_STATUS_SUCCESS;
441 }
442 
443 /**
444  * dp_rx_defrag_ccmp_demic() - Remove MIC information from CCMP fragment
445  * @soc: DP SOC
446  * @nbuf: Pointer to the fragment buffer
447  * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
448  *
449  * Remove MIC information from CCMP fragment
450  *
451  * Return: QDF_STATUS
452  */
453 static QDF_STATUS
dp_rx_defrag_ccmp_demic(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrlen)454 dp_rx_defrag_ccmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
455 {
456 	uint8_t *ivp, *orig_hdr;
457 	int rx_desc_len = soc->rx_pkt_tlv_size;
458 
459 	/* start of the 802.11 header */
460 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
461 
462 	/* CCMP header is located after 802.11 header */
463 	ivp = orig_hdr + hdrlen;
464 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
465 		return QDF_STATUS_E_DEFRAG_ERROR;
466 
467 	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
468 
469 	return QDF_STATUS_SUCCESS;
470 }
471 
472 /**
473  * dp_rx_defrag_ccmp_decap() - decap CCMP encrypted fragment
474  * @soc: DP SOC
475  * @nbuf: Pointer to the fragment
476  * @hdrlen: length of the header information
477  *
478  * decap CCMP encrypted fragment
479  *
480  * Return: QDF_STATUS
481  */
482 static QDF_STATUS
dp_rx_defrag_ccmp_decap(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrlen)483 dp_rx_defrag_ccmp_decap(struct dp_soc *soc, qdf_nbuf_t nbuf, uint16_t hdrlen)
484 {
485 	uint8_t *ivp, *origHdr;
486 	int rx_desc_len = soc->rx_pkt_tlv_size;
487 
488 	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
489 	ivp = origHdr + hdrlen;
490 
491 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
492 		return QDF_STATUS_E_DEFRAG_ERROR;
493 
494 	return QDF_STATUS_SUCCESS;
495 }
496 
497 /**
498  * dp_rx_defrag_wep_decap() - decap WEP encrypted fragment
499  * @soc: DP SOC
500  * @msdu: Pointer to the fragment
501  * @hdrlen: length of the header information
502  *
503  * decap WEP encrypted fragment
504  *
505  * Return: QDF_STATUS
506  */
507 static QDF_STATUS
dp_rx_defrag_wep_decap(struct dp_soc * soc,qdf_nbuf_t msdu,uint16_t hdrlen)508 dp_rx_defrag_wep_decap(struct dp_soc *soc, qdf_nbuf_t msdu, uint16_t hdrlen)
509 {
510 	uint8_t *origHdr;
511 	int rx_desc_len = soc->rx_pkt_tlv_size;
512 
513 	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
514 	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
515 
516 	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
517 
518 	return QDF_STATUS_SUCCESS;
519 }
520 
521 /**
522  * dp_rx_defrag_hdrsize() - Calculate the header size of the received fragment
523  * @soc: soc handle
524  * @nbuf: Pointer to the fragment
525  *
526  * Calculate the header size of the received fragment
527  *
528  * Return: header size (uint16_t)
529  */
dp_rx_defrag_hdrsize(struct dp_soc * soc,qdf_nbuf_t nbuf)530 static uint16_t dp_rx_defrag_hdrsize(struct dp_soc *soc, qdf_nbuf_t nbuf)
531 {
532 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
533 	uint16_t size = sizeof(struct ieee80211_frame);
534 	uint16_t fc = 0;
535 	uint32_t to_ds, fr_ds;
536 	uint8_t frm_ctrl_valid;
537 	uint16_t frm_ctrl_field;
538 
539 	to_ds = hal_rx_mpdu_get_to_ds(soc->hal_soc, rx_tlv_hdr);
540 	fr_ds = hal_rx_mpdu_get_fr_ds(soc->hal_soc, rx_tlv_hdr);
541 	frm_ctrl_valid =
542 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
543 						    rx_tlv_hdr);
544 	frm_ctrl_field = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_tlv_hdr);
545 
546 	if (to_ds && fr_ds)
547 		size += QDF_MAC_ADDR_SIZE;
548 
549 	if (frm_ctrl_valid) {
550 		fc = frm_ctrl_field;
551 
552 		/* use 1-st byte for validation */
553 		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
554 			size += sizeof(uint16_t);
555 			/* use 2-nd byte for validation */
556 			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
557 				size += sizeof(struct ieee80211_htc);
558 		}
559 	}
560 
561 	return size;
562 }
563 
564 /**
565  * dp_rx_defrag_michdr() - Calculate a pseudo MIC header
566  * @wh0: Pointer to the wireless header of the fragment
567  * @hdr: Array to hold the pseudo header
568  *
569  * Calculate a pseudo MIC header
570  *
571  * Return: None
572  */
dp_rx_defrag_michdr(const struct ieee80211_frame * wh0,uint8_t hdr[])573 static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
574 				uint8_t hdr[])
575 {
576 	const struct ieee80211_frame_addr4 *wh =
577 		(const struct ieee80211_frame_addr4 *)wh0;
578 
579 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
580 	case IEEE80211_FC1_DIR_NODS:
581 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
582 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
583 					   wh->i_addr2);
584 		break;
585 	case IEEE80211_FC1_DIR_TODS:
586 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
587 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
588 					   wh->i_addr2);
589 		break;
590 	case IEEE80211_FC1_DIR_FROMDS:
591 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
592 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
593 					   wh->i_addr3);
594 		break;
595 	case IEEE80211_FC1_DIR_DSTODS:
596 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
597 		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + QDF_MAC_ADDR_SIZE,
598 					   wh->i_addr4);
599 		break;
600 	}
601 
602 	/*
603 	 * Bit 7 is QDF_IEEE80211_FC0_SUBTYPE_QOS for data frame, but
604 	 * it could also be set for deauth, disassoc, action, etc. for
605 	 * a mgt type frame. It comes into picture for MFP.
606 	 */
607 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
608 		if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
609 				IEEE80211_FC1_DIR_DSTODS) {
610 			const struct ieee80211_qosframe_addr4 *qwh =
611 				(const struct ieee80211_qosframe_addr4 *)wh;
612 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
613 		} else {
614 			const struct ieee80211_qosframe *qwh =
615 				(const struct ieee80211_qosframe *)wh;
616 			hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
617 		}
618 	} else {
619 		hdr[12] = 0;
620 	}
621 
622 	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
623 }
624 
625 /**
626  * dp_rx_defrag_mic() - Calculate MIC header
627  * @soc: DP SOC
628  * @key: Pointer to the key
629  * @wbuf: fragment buffer
630  * @off: Offset
631  * @data_len: Data length
632  * @mic: Array to hold MIC
633  *
634  * Calculate a pseudo MIC header
635  *
636  * Return: QDF_STATUS
637  */
dp_rx_defrag_mic(struct dp_soc * soc,const uint8_t * key,qdf_nbuf_t wbuf,uint16_t off,uint16_t data_len,uint8_t mic[])638 static QDF_STATUS dp_rx_defrag_mic(struct dp_soc *soc, const uint8_t *key,
639 				   qdf_nbuf_t wbuf, uint16_t off,
640 				   uint16_t data_len, uint8_t mic[])
641 {
642 	uint8_t hdr[16] = { 0, };
643 	uint32_t l, r;
644 	const uint8_t *data;
645 	uint32_t space;
646 	int rx_desc_len = soc->rx_pkt_tlv_size;
647 
648 	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
649 		+ rx_desc_len), hdr);
650 
651 	l = dp_rx_get_le32(key);
652 	r = dp_rx_get_le32(key + 4);
653 
654 	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
655 	l ^= dp_rx_get_le32(hdr);
656 	dp_rx_michael_block(l, r);
657 	l ^= dp_rx_get_le32(&hdr[4]);
658 	dp_rx_michael_block(l, r);
659 	l ^= dp_rx_get_le32(&hdr[8]);
660 	dp_rx_michael_block(l, r);
661 	l ^= dp_rx_get_le32(&hdr[12]);
662 	dp_rx_michael_block(l, r);
663 
664 	/* first buffer has special handling */
665 	data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
666 	space = qdf_nbuf_len(wbuf) - off;
667 
668 	for (;; ) {
669 		if (space > data_len)
670 			space = data_len;
671 
672 		/* collect 32-bit blocks from current buffer */
673 		while (space >= sizeof(uint32_t)) {
674 			l ^= dp_rx_get_le32(data);
675 			dp_rx_michael_block(l, r);
676 			data += sizeof(uint32_t);
677 			space -= sizeof(uint32_t);
678 			data_len -= sizeof(uint32_t);
679 		}
680 		if (data_len < sizeof(uint32_t))
681 			break;
682 
683 		wbuf = qdf_nbuf_next(wbuf);
684 		if (!wbuf)
685 			return QDF_STATUS_E_DEFRAG_ERROR;
686 
687 		if (space != 0) {
688 			const uint8_t *data_next;
689 			/*
690 			 * Block straddles buffers, split references.
691 			 */
692 			data_next =
693 				(uint8_t *)qdf_nbuf_data(wbuf) + off;
694 			if ((qdf_nbuf_len(wbuf)) <
695 				sizeof(uint32_t) - space) {
696 				return QDF_STATUS_E_DEFRAG_ERROR;
697 			}
698 			switch (space) {
699 			case 1:
700 				l ^= dp_rx_get_le32_split(data[0],
701 					data_next[0], data_next[1],
702 					data_next[2]);
703 				data = data_next + 3;
704 				space = (qdf_nbuf_len(wbuf) - off) - 3;
705 				break;
706 			case 2:
707 				l ^= dp_rx_get_le32_split(data[0], data[1],
708 						    data_next[0], data_next[1]);
709 				data = data_next + 2;
710 				space = (qdf_nbuf_len(wbuf) - off) - 2;
711 				break;
712 			case 3:
713 				l ^= dp_rx_get_le32_split(data[0], data[1],
714 					data[2], data_next[0]);
715 				data = data_next + 1;
716 				space = (qdf_nbuf_len(wbuf) - off) - 1;
717 				break;
718 			}
719 			dp_rx_michael_block(l, r);
720 			data_len -= sizeof(uint32_t);
721 		} else {
722 			/*
723 			 * Setup for next buffer.
724 			 */
725 			data = (uint8_t *)qdf_nbuf_data(wbuf) + off;
726 			space = qdf_nbuf_len(wbuf) - off;
727 		}
728 	}
729 	/* Last block and padding (0x5a, 4..7 x 0) */
730 	switch (data_len) {
731 	case 0:
732 		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
733 		break;
734 	case 1:
735 		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
736 		break;
737 	case 2:
738 		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
739 		break;
740 	case 3:
741 		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
742 		break;
743 	}
744 	dp_rx_michael_block(l, r);
745 	dp_rx_michael_block(l, r);
746 	dp_rx_put_le32(mic, l);
747 	dp_rx_put_le32(mic + 4, r);
748 
749 	return QDF_STATUS_SUCCESS;
750 }
751 
752 /**
753  * dp_rx_defrag_tkip_demic() - Remove MIC header from the TKIP frame
754  * @soc: DP SOC
755  * @key: Pointer to the key
756  * @msdu: fragment buffer
757  * @hdrlen: Length of the header information
758  *
759  * Remove MIC information from the TKIP frame
760  *
761  * Return: QDF_STATUS
762  */
dp_rx_defrag_tkip_demic(struct dp_soc * soc,const uint8_t * key,qdf_nbuf_t msdu,uint16_t hdrlen)763 static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
764 					  const uint8_t *key,
765 					  qdf_nbuf_t msdu, uint16_t hdrlen)
766 {
767 	QDF_STATUS status;
768 	uint32_t pktlen = 0, prev_data_len;
769 	uint8_t mic[IEEE80211_WEP_MICLEN];
770 	uint8_t mic0[IEEE80211_WEP_MICLEN];
771 	qdf_nbuf_t prev = NULL, prev0, next;
772 	uint8_t len0 = 0;
773 
774 	next = msdu;
775 	prev0 = msdu;
776 	while (next) {
777 		pktlen += (qdf_nbuf_len(next) - hdrlen);
778 		prev = next;
779 		dp_debug("pktlen %u",
780 			 (uint32_t)(qdf_nbuf_len(next) - hdrlen));
781 		next = qdf_nbuf_next(next);
782 		if (next && !qdf_nbuf_next(next))
783 			prev0 = prev;
784 	}
785 
786 	if (!prev) {
787 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
788 			  "%s Defrag chaining failed !\n", __func__);
789 		return QDF_STATUS_E_DEFRAG_ERROR;
790 	}
791 
792 	prev_data_len = qdf_nbuf_len(prev) - hdrlen;
793 	if (prev_data_len < dp_f_tkip.ic_miclen) {
794 		if (prev0 == prev) {
795 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
796 				  "%s Fragments don't have MIC header !\n", __func__);
797 			return QDF_STATUS_E_DEFRAG_ERROR;
798 		}
799 		len0 = dp_f_tkip.ic_miclen - (uint8_t)prev_data_len;
800 		qdf_nbuf_copy_bits(prev0, qdf_nbuf_len(prev0) - len0, len0,
801 				   (caddr_t)mic0);
802 		qdf_nbuf_trim_tail(prev0, len0);
803 	}
804 
805 	qdf_nbuf_copy_bits(prev, (qdf_nbuf_len(prev) -
806 			   (dp_f_tkip.ic_miclen - len0)),
807 			   (dp_f_tkip.ic_miclen - len0),
808 			   (caddr_t)(&mic0[len0]));
809 	qdf_nbuf_trim_tail(prev, (dp_f_tkip.ic_miclen - len0));
810 	pktlen -= dp_f_tkip.ic_miclen;
811 
812 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
813 		dp_rx_nbuf_free(prev);
814 		qdf_nbuf_set_next(prev0, NULL);
815 	}
816 
817 	status = dp_rx_defrag_mic(soc, key, msdu, hdrlen,
818 				  pktlen, mic);
819 
820 	if (QDF_IS_STATUS_ERROR(status))
821 		return status;
822 
823 	if (qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
824 		return QDF_STATUS_E_DEFRAG_ERROR;
825 
826 	return QDF_STATUS_SUCCESS;
827 }
828 
829 /**
830  * dp_rx_frag_pull_hdr() - Pulls the RXTLV & the 802.11 headers
831  * @soc: DP SOC
832  * @nbuf: buffer pointer
833  * @hdrsize: size of the header to be pulled
834  *
835  * Pull the RXTLV & the 802.11 headers
836  *
837  * Return: None
838  */
dp_rx_frag_pull_hdr(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrsize)839 static void dp_rx_frag_pull_hdr(struct dp_soc *soc,
840 				qdf_nbuf_t nbuf, uint16_t hdrsize)
841 {
842 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
843 
844 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + hdrsize);
845 
846 	dp_debug("final pktlen %d .11len %d",
847 		 (uint32_t)qdf_nbuf_len(nbuf), hdrsize);
848 }
849 
850 /**
851  * dp_rx_defrag_pn_check() - Check the PN of current fragmented with prev PN
852  * @soc: DP SOC
853  * @msdu: msdu to get the current PN
854  * @cur_pn128: PN extracted from current msdu
855  * @prev_pn128: Prev PN
856  *
857  * Return: 0 on success, non zero on failure
858  */
dp_rx_defrag_pn_check(struct dp_soc * soc,qdf_nbuf_t msdu,uint64_t * cur_pn128,uint64_t * prev_pn128)859 static int dp_rx_defrag_pn_check(struct dp_soc *soc, qdf_nbuf_t msdu,
860 				 uint64_t *cur_pn128, uint64_t *prev_pn128)
861 {
862 	int out_of_order = 0;
863 
864 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(msdu), cur_pn128);
865 
866 	if (cur_pn128[1] == prev_pn128[1])
867 		out_of_order = (cur_pn128[0] - prev_pn128[0] != 1);
868 	else
869 		out_of_order = (cur_pn128[1] - prev_pn128[1] != 1);
870 
871 	return out_of_order;
872 }
873 
874 /**
875  * dp_rx_construct_fraglist() - Construct a nbuf fraglist
876  * @txrx_peer: Pointer to the txrx peer
877  * @tid: Transmit ID (TID)
878  * @head: Pointer to list of fragments
879  * @hdrsize: Size of the header to be pulled
880  *
881  * Construct a nbuf fraglist
882  *
883  * Return: None
884  */
885 static int
dp_rx_construct_fraglist(struct dp_txrx_peer * txrx_peer,int tid,qdf_nbuf_t head,uint16_t hdrsize)886 dp_rx_construct_fraglist(struct dp_txrx_peer *txrx_peer, int tid,
887 			 qdf_nbuf_t head,
888 			 uint16_t hdrsize)
889 {
890 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
891 	qdf_nbuf_t msdu = qdf_nbuf_next(head);
892 	qdf_nbuf_t rx_nbuf = msdu;
893 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
894 	uint32_t len = 0;
895 	uint64_t cur_pn128[2] = {0, 0}, prev_pn128[2];
896 	int out_of_order = 0;
897 	int index;
898 	int needs_pn_check = 0;
899 	enum cdp_sec_type sec_type;
900 
901 	prev_pn128[0] = rx_tid->pn128[0];
902 	prev_pn128[1] = rx_tid->pn128[1];
903 
904 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu) ? dp_sec_mcast :
905 				dp_sec_ucast;
906 	sec_type = txrx_peer->security[index].sec_type;
907 
908 	if (!(sec_type == cdp_sec_type_none || sec_type == cdp_sec_type_wep128 ||
909 	      sec_type == cdp_sec_type_wep104 || sec_type == cdp_sec_type_wep40))
910 		needs_pn_check = 1;
911 
912 	while (msdu) {
913 		if (qdf_likely(needs_pn_check))
914 			out_of_order = dp_rx_defrag_pn_check(soc, msdu,
915 							     &cur_pn128[0],
916 							     &prev_pn128[0]);
917 
918 		if (qdf_unlikely(out_of_order)) {
919 			dp_info_rl("cur_pn128[0] 0x%llx cur_pn128[1] 0x%llx prev_pn128[0] 0x%llx prev_pn128[1] 0x%llx",
920 				   cur_pn128[0], cur_pn128[1],
921 				   prev_pn128[0], prev_pn128[1]);
922 			return QDF_STATUS_E_FAILURE;
923 		}
924 
925 		prev_pn128[0] = cur_pn128[0];
926 		prev_pn128[1] = cur_pn128[1];
927 
928 		/*
929 		 * Broadcast and multicast frames should never be fragmented.
930 		 * Iterating through all msdus and dropping fragments if even
931 		 * one of them has mcast/bcast destination address.
932 		 */
933 		if (hal_rx_msdu_is_wlan_mcast(soc->hal_soc, msdu)) {
934 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
935 				  "Dropping multicast/broadcast fragments");
936 			return QDF_STATUS_E_FAILURE;
937 		}
938 
939 		dp_rx_frag_pull_hdr(soc, msdu, hdrsize);
940 		len += qdf_nbuf_len(msdu);
941 		msdu = qdf_nbuf_next(msdu);
942 	}
943 
944 	qdf_nbuf_append_ext_list(head, rx_nbuf, len);
945 	qdf_nbuf_set_next(head, NULL);
946 	qdf_nbuf_set_is_frag(head, 1);
947 
948 	dp_debug("head len %d ext len %d data len %d ",
949 		 (uint32_t)qdf_nbuf_len(head),
950 		 (uint32_t)qdf_nbuf_len(rx_nbuf),
951 		 (uint32_t)(head->data_len));
952 
953 	return QDF_STATUS_SUCCESS;
954 }
955 
956 /**
957  * dp_rx_defrag_err() - rx defragmentation error handler
958  * @vdev: handle to vdev object
959  * @nbuf: packet buffer
960  *
961  * This function handles rx error and send MIC error notification
962  *
963  * Return: None
964  */
dp_rx_defrag_err(struct dp_vdev * vdev,qdf_nbuf_t nbuf)965 static void dp_rx_defrag_err(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
966 {
967 	struct ol_if_ops *tops = NULL;
968 	struct dp_pdev *pdev = vdev->pdev;
969 	int rx_desc_len = pdev->soc->rx_pkt_tlv_size;
970 	uint8_t *orig_hdr;
971 	struct ieee80211_frame *wh;
972 	struct cdp_rx_mic_err_info mic_failure_info;
973 
974 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
975 	wh = (struct ieee80211_frame *)orig_hdr;
976 
977 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.da_mac_addr,
978 			 (struct qdf_mac_addr *)&wh->i_addr1);
979 	qdf_copy_macaddr((struct qdf_mac_addr *)&mic_failure_info.ta_mac_addr,
980 			 (struct qdf_mac_addr *)&wh->i_addr2);
981 	mic_failure_info.key_id = 0;
982 	mic_failure_info.multicast =
983 		IEEE80211_IS_MULTICAST(wh->i_addr1);
984 	qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
985 	mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
986 	mic_failure_info.data = (uint8_t *)wh;
987 	mic_failure_info.vdev_id = vdev->vdev_id;
988 
989 	tops = pdev->soc->cdp_soc.ol_ops;
990 	if (tops->rx_mic_error)
991 		tops->rx_mic_error(pdev->soc->ctrl_psoc, pdev->pdev_id,
992 				   &mic_failure_info);
993 }
994 
995 
996 /**
997  * dp_rx_defrag_nwifi_to_8023() - Transcap 802.11 to 802.3
998  * @soc: dp soc handle
999  * @txrx_peer: txrx_peer handle
1000  * @tid: Transmit ID (TID)
1001  * @nbuf: Pointer to the fragment buffer
1002  * @hdrsize: Size of headers
1003  *
1004  * Transcap the fragment from 802.11 to 802.3
1005  *
1006  * Return: None
1007  */
1008 static void
dp_rx_defrag_nwifi_to_8023(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,int tid,qdf_nbuf_t nbuf,uint16_t hdrsize)1009 dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
1010 			   int tid, qdf_nbuf_t nbuf, uint16_t hdrsize)
1011 {
1012 	struct llc_snap_hdr_t *llchdr;
1013 	struct ethernet_hdr_t *eth_hdr;
1014 	uint8_t ether_type[2];
1015 	uint16_t fc = 0;
1016 	union dp_align_mac_addr mac_addr;
1017 	uint8_t *rx_desc_info = qdf_mem_malloc(soc->rx_pkt_tlv_size);
1018 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1019 	struct ieee80211_frame_addr4 wh = {0};
1020 
1021 	hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), rx_tid->pn128);
1022 
1023 	hal_rx_print_pn(soc->hal_soc, qdf_nbuf_data(nbuf));
1024 
1025 	if (!rx_desc_info) {
1026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1027 			"%s: Memory alloc failed ! ", __func__);
1028 		QDF_ASSERT(0);
1029 		return;
1030 	}
1031 
1032 	qdf_mem_zero(&wh, sizeof(struct ieee80211_frame_addr4));
1033 	if (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
1034 		qdf_mem_copy(&wh, qdf_nbuf_data(nbuf) + soc->rx_pkt_tlv_size,
1035 			     hdrsize);
1036 
1037 	qdf_mem_copy(rx_desc_info, qdf_nbuf_data(nbuf), soc->rx_pkt_tlv_size);
1038 
1039 	llchdr = (struct llc_snap_hdr_t *)(qdf_nbuf_data(nbuf) +
1040 					soc->rx_pkt_tlv_size + hdrsize);
1041 	qdf_mem_copy(ether_type, llchdr->ethertype, 2);
1042 
1043 	qdf_nbuf_pull_head(nbuf, (soc->rx_pkt_tlv_size + hdrsize +
1044 				  sizeof(struct llc_snap_hdr_t) -
1045 				  sizeof(struct ethernet_hdr_t)));
1046 
1047 	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(nbuf));
1048 
1049 	if (hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1050 						rx_desc_info))
1051 		fc = hal_rx_get_frame_ctrl_field(soc->hal_soc, rx_desc_info);
1052 
1053 	dp_debug("Frame control type: 0x%x", fc);
1054 
1055 	switch (((fc & 0xff00) >> 8) & IEEE80211_FC1_DIR_MASK) {
1056 	case IEEE80211_FC1_DIR_NODS:
1057 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1058 				      &mac_addr.raw[0]);
1059 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1060 			QDF_MAC_ADDR_SIZE);
1061 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1062 				      &mac_addr.raw[0]);
1063 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1064 			QDF_MAC_ADDR_SIZE);
1065 		break;
1066 	case IEEE80211_FC1_DIR_TODS:
1067 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1068 				      &mac_addr.raw[0]);
1069 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1070 			QDF_MAC_ADDR_SIZE);
1071 		hal_rx_mpdu_get_addr2(soc->hal_soc, rx_desc_info,
1072 				      &mac_addr.raw[0]);
1073 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1074 			QDF_MAC_ADDR_SIZE);
1075 		break;
1076 	case IEEE80211_FC1_DIR_FROMDS:
1077 		hal_rx_mpdu_get_addr1(soc->hal_soc, rx_desc_info,
1078 				      &mac_addr.raw[0]);
1079 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1080 			QDF_MAC_ADDR_SIZE);
1081 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1082 				      &mac_addr.raw[0]);
1083 		qdf_mem_copy(eth_hdr->src_addr, &mac_addr.raw[0],
1084 			QDF_MAC_ADDR_SIZE);
1085 		break;
1086 
1087 	case IEEE80211_FC1_DIR_DSTODS:
1088 		hal_rx_mpdu_get_addr3(soc->hal_soc, rx_desc_info,
1089 				      &mac_addr.raw[0]);
1090 		qdf_mem_copy(eth_hdr->dest_addr, &mac_addr.raw[0],
1091 			QDF_MAC_ADDR_SIZE);
1092 		qdf_mem_copy(eth_hdr->src_addr, &wh.i_addr4[0],
1093 			     QDF_MAC_ADDR_SIZE);
1094 		break;
1095 
1096 	default:
1097 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1098 		"%s: Unknown frame control type: 0x%x", __func__, fc);
1099 	}
1100 
1101 	qdf_mem_copy(eth_hdr->ethertype, ether_type,
1102 			sizeof(ether_type));
1103 
1104 	qdf_nbuf_push_head(nbuf, soc->rx_pkt_tlv_size);
1105 	qdf_mem_copy(qdf_nbuf_data(nbuf), rx_desc_info, soc->rx_pkt_tlv_size);
1106 	qdf_mem_free(rx_desc_info);
1107 }
1108 
1109 #ifdef RX_DEFRAG_DO_NOT_REINJECT
1110 /**
1111  * dp_rx_defrag_deliver() - Deliver defrag packet to stack
1112  * @txrx_peer: Pointer to the peer
1113  * @tid: Transmit Identifier
1114  * @head: Nbuf to be delivered
1115  *
1116  * Return: None
1117  */
dp_rx_defrag_deliver(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t head)1118 static inline void dp_rx_defrag_deliver(struct dp_txrx_peer *txrx_peer,
1119 					unsigned int tid,
1120 					qdf_nbuf_t head)
1121 {
1122 	struct dp_vdev *vdev = txrx_peer->vdev;
1123 	struct dp_soc *soc = vdev->pdev->soc;
1124 	qdf_nbuf_t deliver_list_head = NULL;
1125 	qdf_nbuf_t deliver_list_tail = NULL;
1126 	uint8_t *rx_tlv_hdr;
1127 
1128 	rx_tlv_hdr = qdf_nbuf_data(head);
1129 
1130 	QDF_NBUF_CB_RX_VDEV_ID(head) = vdev->vdev_id;
1131 	qdf_nbuf_set_tid_val(head, tid);
1132 	qdf_nbuf_pull_head(head, soc->rx_pkt_tlv_size);
1133 
1134 	DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail,
1135 			  head);
1136 	dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head,
1137 			       deliver_list_tail);
1138 }
1139 
1140 /**
1141  * dp_rx_defrag_reo_reinject() - Reinject the fragment chain back into REO
1142  * @txrx_peer: Pointer to the peer
1143  * @tid: Transmit Identifier
1144  * @head: Buffer to be reinjected back
1145  *
1146  * Reinject the fragment chain back into REO
1147  *
1148  * Return: QDF_STATUS
1149  */
dp_rx_defrag_reo_reinject(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t head)1150 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1151 					    unsigned int tid, qdf_nbuf_t head)
1152 {
1153 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1154 
1155 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1156 
1157 	dp_rx_defrag_deliver(txrx_peer, tid, head);
1158 	rx_reorder_array_elem->head = NULL;
1159 	rx_reorder_array_elem->tail = NULL;
1160 	dp_rx_return_head_frag_desc(txrx_peer, tid);
1161 
1162 	return QDF_STATUS_SUCCESS;
1163 }
1164 #else
1165 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1166 /**
1167  * dp_rx_reinject_ring_record_entry() - Record reinject ring history
1168  * @soc: Datapath soc structure
1169  * @paddr: paddr of the buffer reinjected to SW2REO ring
1170  * @sw_cookie: SW cookie of the buffer reinjected to SW2REO ring
1171  * @rbm: Return buffer manager of the buffer reinjected to SW2REO ring
1172  *
1173  * Return: None
1174  */
1175 static inline void
dp_rx_reinject_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1176 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1177 				 uint32_t sw_cookie, uint8_t rbm)
1178 {
1179 	struct dp_buf_info_record *record;
1180 	uint32_t idx;
1181 
1182 	if (qdf_unlikely(!soc->rx_reinject_ring_history))
1183 		return;
1184 
1185 	idx = dp_history_get_next_index(&soc->rx_reinject_ring_history->index,
1186 					DP_RX_REINJECT_HIST_MAX);
1187 
1188 	/* No NULL check needed for record since its an array */
1189 	record = &soc->rx_reinject_ring_history->entry[idx];
1190 
1191 	record->timestamp = qdf_get_log_timestamp();
1192 	record->hbi.paddr = paddr;
1193 	record->hbi.sw_cookie = sw_cookie;
1194 	record->hbi.rbm = rbm;
1195 }
1196 #else
1197 static inline void
dp_rx_reinject_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1198 dp_rx_reinject_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1199 				 uint32_t sw_cookie, uint8_t rbm)
1200 {
1201 }
1202 #endif
1203 
1204 /**
1205  * dp_rx_defrag_reo_reinject() - Reinject the fragment chain back into REO
1206  * @txrx_peer: Pointer to the txrx_peer
1207  * @tid: Transmit Identifier
1208  * @head: Buffer to be reinjected back
1209  *
1210  * Reinject the fragment chain back into REO
1211  *
1212  * Return: QDF_STATUS
1213  */
dp_rx_defrag_reo_reinject(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t head)1214 static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
1215 					    unsigned int tid, qdf_nbuf_t head)
1216 {
1217 	struct dp_pdev *pdev = txrx_peer->vdev->pdev;
1218 	struct dp_soc *soc = pdev->soc;
1219 	struct hal_buf_info buf_info;
1220 	struct hal_buf_info temp_buf_info;
1221 	void *link_desc_va;
1222 	void *msdu0, *msdu_desc_info;
1223 	void *ent_ring_desc, *ent_mpdu_desc_info, *ent_qdesc_addr;
1224 	void *dst_mpdu_desc_info;
1225 	uint64_t dst_qdesc_addr;
1226 	qdf_dma_addr_t paddr;
1227 	uint32_t nbuf_len, seq_no, dst_ind;
1228 	uint32_t ret, cookie;
1229 	hal_ring_desc_t dst_ring_desc =
1230 		txrx_peer->rx_tid[tid].dst_ring_desc;
1231 	hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
1232 	struct dp_rx_desc *rx_desc = txrx_peer->rx_tid[tid].head_frag_desc;
1233 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1234 						txrx_peer->rx_tid[tid].array;
1235 	qdf_nbuf_t nbuf_head;
1236 	struct rx_desc_pool *rx_desc_pool = NULL;
1237 	void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(dst_ring_desc);
1238 	uint8_t rx_defrag_rbm_id = dp_rx_get_defrag_bm_id(soc);
1239 
1240 	/* do duplicate link desc address check */
1241 	dp_rx_link_desc_refill_duplicate_check(
1242 				soc,
1243 				&soc->last_op_info.reo_reinject_link_desc,
1244 				buf_addr_info);
1245 
1246 	nbuf_head = dp_ipa_handle_rx_reo_reinject(soc, head);
1247 	if (qdf_unlikely(!nbuf_head)) {
1248 		dp_err_rl("IPA RX REO reinject failed");
1249 		return QDF_STATUS_E_FAILURE;
1250 	}
1251 
1252 	/* update new allocated skb in case IPA is enabled */
1253 	if (nbuf_head != head) {
1254 		head = nbuf_head;
1255 		rx_desc->nbuf = head;
1256 		rx_reorder_array_elem->head = head;
1257 	}
1258 
1259 	ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
1260 	if (!ent_ring_desc) {
1261 		dp_err_rl("HAL src ring next entry NULL");
1262 		return QDF_STATUS_E_FAILURE;
1263 	}
1264 
1265 	hal_rx_reo_buf_paddr_get(soc->hal_soc, dst_ring_desc, &buf_info);
1266 
1267 	/* buffer_addr_info is the first element of ring_desc */
1268 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)dst_ring_desc,
1269 				  &buf_info);
1270 
1271 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1272 
1273 	qdf_assert_always(link_desc_va);
1274 
1275 	msdu0 = hal_rx_msdu0_buffer_addr_lsb(soc->hal_soc, link_desc_va);
1276 	nbuf_len = qdf_nbuf_len(head) - soc->rx_pkt_tlv_size;
1277 
1278 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, OWNER, UNI_DESC_OWNER_SW);
1279 	HAL_RX_UNIFORM_HDR_SET(link_desc_va, BUFFER_TYPE,
1280 			UNI_DESC_BUF_TYPE_RX_MSDU_LINK);
1281 
1282 	/* msdu reconfig */
1283 	msdu_desc_info = hal_rx_msdu_desc_info_ptr_get(soc->hal_soc, msdu0);
1284 
1285 	dst_ind = hal_rx_msdu_reo_dst_ind_get(soc->hal_soc, link_desc_va);
1286 
1287 	qdf_mem_zero(msdu_desc_info, sizeof(struct rx_msdu_desc_info));
1288 
1289 	hal_msdu_desc_info_set(soc->hal_soc, msdu_desc_info, dst_ind, nbuf_len);
1290 
1291 	/* change RX TLV's */
1292 	hal_rx_tlv_msdu_len_set(soc->hal_soc, qdf_nbuf_data(head), nbuf_len);
1293 
1294 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)msdu0,
1295 				  &temp_buf_info);
1296 
1297 	cookie = temp_buf_info.sw_cookie;
1298 	rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
1299 
1300 	/* map the nbuf before reinject it into HW */
1301 	ret = qdf_nbuf_map_nbytes_single(soc->osdev, head,
1302 					 QDF_DMA_FROM_DEVICE,
1303 					 rx_desc_pool->buf_size);
1304 	if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1305 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1306 				"%s: nbuf map failed !", __func__);
1307 		return QDF_STATUS_E_FAILURE;
1308 	}
1309 
1310 	dp_ipa_handle_rx_buf_smmu_mapping(soc, head,
1311 					  rx_desc_pool->buf_size, true,
1312 					  __func__, __LINE__);
1313 	dp_audio_smmu_map(soc->osdev,
1314 			  qdf_mem_paddr_from_dmaaddr(soc->osdev,
1315 						     QDF_NBUF_CB_PADDR(head)),
1316 			  QDF_NBUF_CB_PADDR(head), rx_desc_pool->buf_size);
1317 
1318 	/*
1319 	 * As part of rx frag handler buffer was unmapped and rx desc
1320 	 * unmapped is set to 1. So again for defrag reinject frame reset
1321 	 * it back to 0.
1322 	 */
1323 	rx_desc->unmapped = 0;
1324 
1325 	paddr = qdf_nbuf_get_frag_paddr(head, 0);
1326 
1327 	ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
1328 
1329 	if (ret == QDF_STATUS_E_FAILURE) {
1330 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1331 				"%s: x86 check failed !", __func__);
1332 		return QDF_STATUS_E_FAILURE;
1333 	}
1334 
1335 	hal_rxdma_buff_addr_info_set(soc->hal_soc, msdu0, paddr, cookie,
1336 				     rx_defrag_rbm_id);
1337 
1338 	/* Lets fill entrance ring now !!! */
1339 	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
1340 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1341 		"HAL RING Access For REO entrance SRNG Failed: %pK",
1342 		hal_srng);
1343 
1344 		return QDF_STATUS_E_FAILURE;
1345 	}
1346 
1347 	dp_rx_reinject_ring_record_entry(soc, paddr, cookie,
1348 					 rx_defrag_rbm_id);
1349 	paddr = (uint64_t)buf_info.paddr;
1350 	/* buf addr */
1351 	hal_rxdma_buff_addr_info_set(soc->hal_soc, ent_ring_desc, paddr,
1352 				     buf_info.sw_cookie,
1353 				     soc->idle_link_bm_id);
1354 	/* mpdu desc info */
1355 	ent_mpdu_desc_info = hal_ent_mpdu_desc_info(soc->hal_soc,
1356 						    ent_ring_desc);
1357 	dst_mpdu_desc_info = hal_dst_mpdu_desc_info(soc->hal_soc,
1358 						    dst_ring_desc);
1359 
1360 	qdf_mem_copy(ent_mpdu_desc_info, dst_mpdu_desc_info,
1361 				sizeof(struct rx_mpdu_desc_info));
1362 	qdf_mem_zero(ent_mpdu_desc_info, sizeof(uint32_t));
1363 
1364 	seq_no = hal_rx_get_rx_sequence(soc->hal_soc, rx_desc->rx_buf_start);
1365 
1366 	hal_mpdu_desc_info_set(soc->hal_soc, ent_ring_desc, ent_mpdu_desc_info,
1367 			       seq_no);
1368 	/* qdesc addr */
1369 	ent_qdesc_addr = hal_get_reo_ent_desc_qdesc_addr(soc->hal_soc,
1370 						(uint8_t *)ent_ring_desc);
1371 
1372 	dst_qdesc_addr = soc->arch_ops.get_reo_qdesc_addr(
1373 						soc->hal_soc,
1374 						(uint8_t *)dst_ring_desc,
1375 						qdf_nbuf_data(head),
1376 						txrx_peer, tid);
1377 
1378 	qdf_mem_copy(ent_qdesc_addr, &dst_qdesc_addr, 5);
1379 
1380 	hal_set_reo_ent_desc_reo_dest_ind(soc->hal_soc,
1381 					  (uint8_t *)ent_ring_desc, dst_ind);
1382 
1383 	hal_srng_access_end(soc->hal_soc, hal_srng);
1384 
1385 	DP_STATS_INC(soc, rx.reo_reinject, 1);
1386 	dp_debug("reinjection done !");
1387 	return QDF_STATUS_SUCCESS;
1388 }
1389 #endif
1390 
1391 /**
1392  * dp_rx_defrag_gcmp_demic() - Remove MIC information from GCMP fragment
1393  * @soc: Datapath soc structure
1394  * @nbuf: Pointer to the fragment buffer
1395  * @hdrlen: 802.11 header length
1396  *
1397  * Remove MIC information from GCMP fragment
1398  *
1399  * Return: QDF_STATUS
1400  */
dp_rx_defrag_gcmp_demic(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t hdrlen)1401 static QDF_STATUS dp_rx_defrag_gcmp_demic(struct dp_soc *soc, qdf_nbuf_t nbuf,
1402 					  uint16_t hdrlen)
1403 {
1404 	uint8_t *ivp, *orig_hdr;
1405 	int rx_desc_len = soc->rx_pkt_tlv_size;
1406 
1407 	/* start of the 802.11 header */
1408 	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
1409 
1410 	/*
1411 	 * GCMP header is located after 802.11 header and EXTIV
1412 	 * field should always be set to 1 for GCMP protocol.
1413 	 */
1414 	ivp = orig_hdr + hdrlen;
1415 	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
1416 		return QDF_STATUS_E_DEFRAG_ERROR;
1417 
1418 	qdf_nbuf_trim_tail(nbuf, dp_f_gcmp.ic_trailer);
1419 
1420 	return QDF_STATUS_SUCCESS;
1421 }
1422 
dp_rx_defrag(struct dp_txrx_peer * txrx_peer,unsigned int tid,qdf_nbuf_t frag_list_head,qdf_nbuf_t frag_list_tail)1423 QDF_STATUS dp_rx_defrag(struct dp_txrx_peer *txrx_peer, unsigned int tid,
1424 			qdf_nbuf_t frag_list_head,
1425 			qdf_nbuf_t frag_list_tail)
1426 {
1427 	qdf_nbuf_t tmp_next;
1428 	qdf_nbuf_t cur = frag_list_head, msdu;
1429 	uint32_t index, tkip_demic = 0;
1430 	uint16_t hdr_space;
1431 	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
1432 	struct dp_vdev *vdev = txrx_peer->vdev;
1433 	struct dp_soc *soc = vdev->pdev->soc;
1434 	uint8_t status = 0;
1435 
1436 	if (!cur)
1437 		return QDF_STATUS_E_DEFRAG_ERROR;
1438 
1439 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
1440 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
1441 		dp_sec_mcast : dp_sec_ucast;
1442 
1443 	/* Remove FCS from all fragments */
1444 	while (cur) {
1445 		tmp_next = qdf_nbuf_next(cur);
1446 		qdf_nbuf_set_next(cur, NULL);
1447 		qdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
1448 		qdf_nbuf_set_next(cur, tmp_next);
1449 		cur = tmp_next;
1450 	}
1451 	cur = frag_list_head;
1452 
1453 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1454 		  "%s: index %d Security type: %d", __func__,
1455 		  index, txrx_peer->security[index].sec_type);
1456 
1457 	switch (txrx_peer->security[index].sec_type) {
1458 	case cdp_sec_type_tkip:
1459 		tkip_demic = 1;
1460 		fallthrough;
1461 	case cdp_sec_type_tkip_nomic:
1462 		while (cur) {
1463 			tmp_next = qdf_nbuf_next(cur);
1464 			if (dp_rx_defrag_tkip_decap(soc, cur, hdr_space)) {
1465 
1466 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1467 					QDF_TRACE_LEVEL_ERROR,
1468 					"dp_rx_defrag: TKIP decap failed");
1469 
1470 				return QDF_STATUS_E_DEFRAG_ERROR;
1471 			}
1472 			cur = tmp_next;
1473 		}
1474 
1475 		/* If success, increment header to be stripped later */
1476 		hdr_space += dp_f_tkip.ic_header;
1477 		break;
1478 
1479 	case cdp_sec_type_aes_ccmp:
1480 		while (cur) {
1481 			tmp_next = qdf_nbuf_next(cur);
1482 			if (dp_rx_defrag_ccmp_demic(soc, cur, hdr_space)) {
1483 
1484 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1485 					QDF_TRACE_LEVEL_ERROR,
1486 					"dp_rx_defrag: CCMP demic failed");
1487 
1488 				return QDF_STATUS_E_DEFRAG_ERROR;
1489 			}
1490 			if (dp_rx_defrag_ccmp_decap(soc, cur, hdr_space)) {
1491 
1492 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1493 					QDF_TRACE_LEVEL_ERROR,
1494 					"dp_rx_defrag: CCMP decap failed");
1495 
1496 				return QDF_STATUS_E_DEFRAG_ERROR;
1497 			}
1498 			cur = tmp_next;
1499 		}
1500 
1501 		/* If success, increment header to be stripped later */
1502 		hdr_space += dp_f_ccmp.ic_header;
1503 		break;
1504 
1505 	case cdp_sec_type_wep40:
1506 	case cdp_sec_type_wep104:
1507 	case cdp_sec_type_wep128:
1508 		while (cur) {
1509 			tmp_next = qdf_nbuf_next(cur);
1510 			if (dp_rx_defrag_wep_decap(soc, cur, hdr_space)) {
1511 
1512 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1513 					QDF_TRACE_LEVEL_ERROR,
1514 					"dp_rx_defrag: WEP decap failed");
1515 
1516 				return QDF_STATUS_E_DEFRAG_ERROR;
1517 			}
1518 			cur = tmp_next;
1519 		}
1520 
1521 		/* If success, increment header to be stripped later */
1522 		hdr_space += dp_f_wep.ic_header;
1523 		break;
1524 	case cdp_sec_type_aes_gcmp:
1525 	case cdp_sec_type_aes_gcmp_256:
1526 		while (cur) {
1527 			tmp_next = qdf_nbuf_next(cur);
1528 			if (dp_rx_defrag_gcmp_demic(soc, cur, hdr_space)) {
1529 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1530 					  QDF_TRACE_LEVEL_ERROR,
1531 					  "dp_rx_defrag: GCMP demic failed");
1532 
1533 				return QDF_STATUS_E_DEFRAG_ERROR;
1534 			}
1535 			cur = tmp_next;
1536 		}
1537 
1538 		hdr_space += dp_f_gcmp.ic_header;
1539 		break;
1540 	default:
1541 		break;
1542 	}
1543 
1544 	if (tkip_demic) {
1545 		msdu = frag_list_head;
1546 		qdf_mem_copy(key,
1547 			     &txrx_peer->security[index].michael_key[0],
1548 			     IEEE80211_WEP_MICLEN);
1549 		status = dp_rx_defrag_tkip_demic(soc, key, msdu,
1550 						 soc->rx_pkt_tlv_size +
1551 						 hdr_space);
1552 
1553 		if (status) {
1554 			dp_rx_defrag_err(vdev, frag_list_head);
1555 
1556 			QDF_TRACE(QDF_MODULE_ID_TXRX,
1557 				  QDF_TRACE_LEVEL_ERROR,
1558 				  "%s: TKIP demic failed status %d",
1559 				   __func__, status);
1560 
1561 			return QDF_STATUS_E_DEFRAG_ERROR;
1562 		}
1563 	}
1564 
1565 	/* Convert the header to 802.3 header */
1566 	dp_rx_defrag_nwifi_to_8023(soc, txrx_peer, tid, frag_list_head,
1567 				   hdr_space);
1568 	if (qdf_nbuf_next(frag_list_head)) {
1569 		if (dp_rx_construct_fraglist(txrx_peer, tid, frag_list_head,
1570 					     hdr_space))
1571 			return QDF_STATUS_E_DEFRAG_ERROR;
1572 	}
1573 
1574 	return QDF_STATUS_SUCCESS;
1575 }
1576 
dp_rx_defrag_cleanup(struct dp_txrx_peer * txrx_peer,unsigned int tid)1577 void dp_rx_defrag_cleanup(struct dp_txrx_peer *txrx_peer, unsigned int tid)
1578 {
1579 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
1580 				txrx_peer->rx_tid[tid].array;
1581 
1582 	if (rx_reorder_array_elem) {
1583 		/* Free up nbufs */
1584 		dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
1585 		rx_reorder_array_elem->head = NULL;
1586 		rx_reorder_array_elem->tail = NULL;
1587 	} else {
1588 		dp_info("Cleanup self peer %pK and TID %u",
1589 			txrx_peer, tid);
1590 	}
1591 
1592 	/* Free up saved ring descriptors */
1593 	dp_rx_clear_saved_desc_info(txrx_peer, tid);
1594 
1595 	txrx_peer->rx_tid[tid].defrag_timeout_ms = 0;
1596 	txrx_peer->rx_tid[tid].curr_frag_num = 0;
1597 	txrx_peer->rx_tid[tid].curr_seq_num = 0;
1598 }
1599 
1600 #ifdef DP_RX_DEFRAG_ADDR1_CHECK_WAR
1601 #ifdef WLAN_FEATURE_11BE_MLO
1602 /**
1603  * dp_rx_defrag_vdev_mac_addr_cmp() - function to check whether mac address
1604  *				matches VDEV mac
1605  * @vdev: dp_vdev object of the VDEV on which this data packet is received
1606  * @mac_addr: Address to compare
1607  *
1608  * Return: 1 if the mac matching,
1609  *         0 if this frame is not correctly destined to this VDEV/MLD
1610  */
dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev * vdev,uint8_t * mac_addr)1611 static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1612 					  uint8_t *mac_addr)
1613 {
1614 	return ((qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1615 			     QDF_MAC_ADDR_SIZE) == 0) ||
1616 		(qdf_mem_cmp(mac_addr, &vdev->mld_mac_addr.raw[0],
1617 			     QDF_MAC_ADDR_SIZE) == 0));
1618 }
1619 
1620 #else
dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev * vdev,uint8_t * mac_addr)1621 static int dp_rx_defrag_vdev_mac_addr_cmp(struct dp_vdev *vdev,
1622 					  uint8_t *mac_addr)
1623 {
1624 	return (qdf_mem_cmp(mac_addr, &vdev->mac_addr.raw[0],
1625 			    QDF_MAC_ADDR_SIZE) == 0);
1626 }
1627 #endif
1628 
dp_rx_defrag_addr1_check(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr)1629 static bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1630 				     struct dp_vdev *vdev,
1631 				     uint8_t *rx_tlv_hdr)
1632 {
1633 	union dp_align_mac_addr mac_addr;
1634 
1635 	/* If address1 is not valid discard the fragment */
1636 	if (hal_rx_mpdu_get_addr1(soc->hal_soc, rx_tlv_hdr,
1637 				  &mac_addr.raw[0]) != QDF_STATUS_SUCCESS) {
1638 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1639 		return false;
1640 	}
1641 
1642 	/* WAR suggested by HW team to avoid crashing incase of packet
1643 	 * corruption issue
1644 	 *
1645 	 * recipe is to compare VDEV mac or MLD mac address with ADDR1
1646 	 * in case of mismatch consider it as corrupted packet and do
1647 	 * not process further
1648 	 */
1649 	if (!dp_rx_defrag_vdev_mac_addr_cmp(vdev,
1650 					    &mac_addr.raw[0])) {
1651 		DP_STATS_INC(soc, rx.err.defrag_ad1_invalid, 1);
1652 		return false;
1653 	}
1654 
1655 	return true;
1656 }
1657 #else
dp_rx_defrag_addr1_check(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr)1658 static inline bool dp_rx_defrag_addr1_check(struct dp_soc *soc,
1659 					    struct dp_vdev *vdev,
1660 					    uint8_t *rx_tlv_hdr)
1661 {
1662 
1663 	return true;
1664 }
1665 #endif
1666 
dp_rx_defrag_add_last_frag(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,uint16_t tid,uint16_t rxseq,qdf_nbuf_t nbuf)1667 QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
1668 				      struct dp_txrx_peer *txrx_peer,
1669 				      uint16_t tid,
1670 				      uint16_t rxseq, qdf_nbuf_t nbuf)
1671 {
1672 	struct dp_rx_tid_defrag *rx_tid = &txrx_peer->rx_tid[tid];
1673 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1674 	uint8_t all_frag_present;
1675 	uint32_t msdu_len;
1676 	QDF_STATUS status;
1677 
1678 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1679 
1680 	/*
1681 	 * HW may fill in unexpected peer_id in RX PKT TLV,
1682 	 * if this peer_id related peer is valid by coincidence,
1683 	 * but actually this peer won't do dp_peer_rx_init(like SAP vdev
1684 	 * self peer), then invalid access to rx_reorder_array_elem happened.
1685 	 */
1686 	if (!rx_reorder_array_elem) {
1687 		dp_verbose_debug(
1688 			"peer id:%d drop rx frame!",
1689 			txrx_peer->peer_id);
1690 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
1691 		dp_rx_nbuf_free(nbuf);
1692 		goto fail;
1693 	}
1694 
1695 	if (rx_reorder_array_elem->head &&
1696 	    rxseq != rx_tid->curr_seq_num) {
1697 		/* Drop stored fragments if out of sequence
1698 		 * fragment is received
1699 		 */
1700 		dp_rx_reorder_flush_frag(txrx_peer, tid);
1701 
1702 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1703 			  "%s: No list found for TID %d Seq# %d",
1704 				__func__, tid, rxseq);
1705 		dp_rx_nbuf_free(nbuf);
1706 		goto fail;
1707 	}
1708 
1709 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1710 						  qdf_nbuf_data(nbuf));
1711 
1712 	qdf_nbuf_set_pktlen(nbuf, (msdu_len + soc->rx_pkt_tlv_size));
1713 
1714 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
1715 					      &rx_reorder_array_elem->head,
1716 			&rx_reorder_array_elem->tail, nbuf,
1717 			&all_frag_present);
1718 
1719 	if (QDF_IS_STATUS_ERROR(status)) {
1720 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1721 			  "%s Fragment insert failed", __func__);
1722 
1723 		goto fail;
1724 	}
1725 
1726 	if (soc->rx.flags.defrag_timeout_check)
1727 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
1728 
1729 	if (!all_frag_present) {
1730 		uint32_t now_ms =
1731 			qdf_system_ticks_to_msecs(qdf_system_ticks());
1732 
1733 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
1734 			now_ms + soc->rx.defrag.timeout_ms;
1735 
1736 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
1737 
1738 		return QDF_STATUS_SUCCESS;
1739 	}
1740 
1741 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
1742 			      rx_reorder_array_elem->tail);
1743 
1744 	if (QDF_IS_STATUS_ERROR(status)) {
1745 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1746 			  "%s Fragment processing failed", __func__);
1747 
1748 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1749 		dp_rx_defrag_cleanup(txrx_peer, tid);
1750 
1751 		goto fail;
1752 	}
1753 
1754 	/* Re-inject the fragments back to REO for further processing */
1755 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
1756 					   rx_reorder_array_elem->head);
1757 	if (QDF_IS_STATUS_SUCCESS(status)) {
1758 		rx_reorder_array_elem->head = NULL;
1759 		rx_reorder_array_elem->tail = NULL;
1760 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1761 			  "%s: Frag seq successfully reinjected",
1762 			__func__);
1763 	} else {
1764 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1765 			  "%s: Frag seq reinjection failed", __func__);
1766 		dp_rx_return_head_frag_desc(txrx_peer, tid);
1767 	}
1768 
1769 	dp_rx_defrag_cleanup(txrx_peer, tid);
1770 	return QDF_STATUS_SUCCESS;
1771 
1772 fail:
1773 	return QDF_STATUS_E_DEFRAG_ERROR;
1774 }
1775 
1776 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
1777 /**
1778  * dp_rx_defrag_save_info_from_ring_desc() - Save info from REO ring descriptor
1779  * @soc: Pointer to the SOC data structure
1780  * @ring_desc: Pointer to the dst ring descriptor
1781  * @rx_desc: Pointer to rx descriptor
1782  * @txrx_peer: Pointer to the peer
1783  * @tid: Transmit Identifier
1784  *
1785  * Return: None
1786  */
1787 static QDF_STATUS
dp_rx_defrag_save_info_from_ring_desc(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc,struct dp_txrx_peer * txrx_peer,unsigned int tid)1788 dp_rx_defrag_save_info_from_ring_desc(struct dp_soc *soc,
1789 				      hal_ring_desc_t ring_desc,
1790 				      struct dp_rx_desc *rx_desc,
1791 				      struct dp_txrx_peer *txrx_peer,
1792 				      unsigned int tid)
1793 {
1794 	void *dst_ring_desc;
1795 
1796 	dst_ring_desc = qdf_mem_malloc(hal_srng_get_entrysize(soc->hal_soc,
1797 							      REO_DST));
1798 
1799 	if (!dst_ring_desc) {
1800 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1801 			"%s: Memory alloc failed !", __func__);
1802 		QDF_ASSERT(0);
1803 		return QDF_STATUS_E_NOMEM;
1804 	}
1805 
1806 	qdf_mem_copy(dst_ring_desc, ring_desc,
1807 		     hal_srng_get_entrysize(soc->hal_soc, REO_DST));
1808 
1809 	txrx_peer->rx_tid[tid].dst_ring_desc = dst_ring_desc;
1810 	txrx_peer->rx_tid[tid].head_frag_desc = rx_desc;
1811 
1812 	return QDF_STATUS_SUCCESS;
1813 }
1814 
1815 /**
1816  * dp_rx_defrag_store_fragment() - Store incoming fragments
1817  * @soc: Pointer to the SOC data structure
1818  * @ring_desc: Pointer to the ring descriptor
1819  * @head:
1820  * @tail:
1821  * @mpdu_desc_info: MPDU descriptor info
1822  * @tid: Traffic Identifier
1823  * @rx_desc: Pointer to rx descriptor
1824  * @rx_bfs: Number of bfs consumed
1825  *
1826  * Return: QDF_STATUS
1827  */
1828 static QDF_STATUS
dp_rx_defrag_store_fragment(struct dp_soc * soc,hal_ring_desc_t ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,struct hal_rx_mpdu_desc_info * mpdu_desc_info,unsigned int tid,struct dp_rx_desc * rx_desc,uint32_t * rx_bfs)1829 dp_rx_defrag_store_fragment(struct dp_soc *soc,
1830 			    hal_ring_desc_t ring_desc,
1831 			    union dp_rx_desc_list_elem_t **head,
1832 			    union dp_rx_desc_list_elem_t **tail,
1833 			    struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1834 			    unsigned int tid, struct dp_rx_desc *rx_desc,
1835 			    uint32_t *rx_bfs)
1836 {
1837 	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
1838 	struct dp_pdev *pdev;
1839 	struct dp_txrx_peer *txrx_peer = NULL;
1840 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1841 	uint16_t peer_id;
1842 	uint8_t fragno, more_frag, all_frag_present = 0;
1843 	uint16_t rxseq = mpdu_desc_info->mpdu_seq;
1844 	QDF_STATUS status;
1845 	struct dp_rx_tid_defrag *rx_tid;
1846 	uint8_t mpdu_sequence_control_valid;
1847 	uint8_t mpdu_frame_control_valid;
1848 	qdf_nbuf_t frag = rx_desc->nbuf;
1849 	uint32_t msdu_len;
1850 
1851 	if (qdf_nbuf_len(frag) > 0) {
1852 		dp_info("Dropping unexpected packet with skb_len: %d "
1853 			"data len: %d cookie: %d",
1854 			(uint32_t)qdf_nbuf_len(frag), frag->data_len,
1855 			rx_desc->cookie);
1856 		DP_STATS_INC(soc, rx.rx_frag_err_len_error, 1);
1857 		goto discard_frag;
1858 	}
1859 
1860 	if (dp_rx_buffer_pool_refill(soc, frag, rx_desc->pool_id)) {
1861 		/* fragment queued back to the pool, free the link desc */
1862 		goto err_free_desc;
1863 	}
1864 
1865 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
1866 						  rx_desc->rx_buf_start);
1867 
1868 	qdf_nbuf_set_pktlen(frag, (msdu_len + soc->rx_pkt_tlv_size));
1869 	qdf_nbuf_append_ext_list(frag, NULL, 0);
1870 
1871 	/* Check if the packet is from a valid peer */
1872 	peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1873 					       mpdu_desc_info->peer_meta_data);
1874 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle,
1875 					       DP_MOD_ID_RX_ERR);
1876 
1877 	if (!txrx_peer) {
1878 		/* We should not receive anything from unknown peer
1879 		 * however, that might happen while we are in the monitor mode.
1880 		 * We don't need to handle that here
1881 		 */
1882 		dp_info_rl("Unknown peer with peer_id %d, dropping fragment",
1883 			   peer_id);
1884 		DP_STATS_INC(soc, rx.rx_frag_err_no_peer, 1);
1885 		goto discard_frag;
1886 	}
1887 
1888 	if (tid >= DP_MAX_TIDS) {
1889 		dp_info("TID out of bounds: %d", tid);
1890 		qdf_assert_always(0);
1891 		goto discard_frag;
1892 	}
1893 
1894 	if (!dp_rx_defrag_addr1_check(soc, txrx_peer->vdev,
1895 				      rx_desc->rx_buf_start)) {
1896 		dp_info("Invalid address 1");
1897 		goto discard_frag;
1898 	}
1899 
1900 	mpdu_sequence_control_valid =
1901 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
1902 						       rx_desc->rx_buf_start);
1903 
1904 	/* Invalid MPDU sequence control field, MPDU is of no use */
1905 	if (!mpdu_sequence_control_valid) {
1906 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1907 			"Invalid MPDU seq control field, dropping MPDU");
1908 
1909 		qdf_assert(0);
1910 		goto discard_frag;
1911 	}
1912 
1913 	mpdu_frame_control_valid =
1914 		hal_rx_get_mpdu_frame_control_valid(soc->hal_soc,
1915 						    rx_desc->rx_buf_start);
1916 
1917 	/* Invalid frame control field */
1918 	if (!mpdu_frame_control_valid) {
1919 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1920 			"Invalid frame control field, dropping MPDU");
1921 
1922 		qdf_assert(0);
1923 		goto discard_frag;
1924 	}
1925 
1926 	/* Current mpdu sequence */
1927 	more_frag = dp_rx_frag_get_more_frag_bit(soc, rx_desc->rx_buf_start);
1928 
1929 	/* HW does not populate the fragment number as of now
1930 	 * need to get from the 802.11 header
1931 	 */
1932 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
1933 
1934 	pdev = txrx_peer->vdev->pdev;
1935 	rx_tid = &txrx_peer->rx_tid[tid];
1936 
1937 	dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, frag,
1938 			      QDF_TX_RX_STATUS_OK, false);
1939 
1940 	qdf_spin_lock_bh(&rx_tid->defrag_tid_lock);
1941 	rx_reorder_array_elem = txrx_peer->rx_tid[tid].array;
1942 	if (!rx_reorder_array_elem) {
1943 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
1944 			  txrx_peer);
1945 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1946 		goto discard_frag;
1947 	}
1948 
1949 	/*
1950 	 * !more_frag: no more fragments to be delivered
1951 	 * !frag_no: packet is not fragmented
1952 	 * !rx_reorder_array_elem->head: no saved fragments so far
1953 	 */
1954 	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
1955 		/* We should not get into this situation here.
1956 		 * It means an unfragmented packet with fragment flag
1957 		 * is delivered over the REO exception ring.
1958 		 * Typically it follows normal rx path.
1959 		 */
1960 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1961 			"Rcvd unfragmented pkt on REO Err srng, dropping");
1962 
1963 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1964 		qdf_assert(0);
1965 		goto discard_frag;
1966 	}
1967 
1968 	/* Check if the fragment is for the same sequence or a different one */
1969 	dp_debug("rx_tid %d", tid);
1970 	if (rx_reorder_array_elem->head) {
1971 		dp_debug("rxseq %d", rxseq);
1972 		if (rxseq != rx_tid->curr_seq_num) {
1973 
1974 			dp_debug("mismatch cur_seq %d rxseq %d",
1975 				 rx_tid->curr_seq_num, rxseq);
1976 			/* Drop stored fragments if out of sequence
1977 			 * fragment is received
1978 			 */
1979 			dp_rx_reorder_flush_frag(txrx_peer, tid);
1980 
1981 			DP_STATS_INC(soc, rx.rx_frag_oor, 1);
1982 
1983 			dp_debug("cur rxseq %d", rxseq);
1984 			/*
1985 			 * The sequence number for this fragment becomes the
1986 			 * new sequence number to be processed
1987 			 */
1988 			rx_tid->curr_seq_num = rxseq;
1989 		}
1990 	} else {
1991 		/* Check if we are processing first fragment if it is
1992 		 * not first fragment discard fragment.
1993 		 */
1994 		if (fragno) {
1995 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
1996 			goto discard_frag;
1997 		}
1998 		dp_debug("cur rxseq %d", rxseq);
1999 		/* Start of a new sequence */
2000 		dp_rx_defrag_cleanup(txrx_peer, tid);
2001 		rx_tid->curr_seq_num = rxseq;
2002 		/* store PN number also */
2003 	}
2004 
2005 	/*
2006 	 * If the earlier sequence was dropped, this will be the fresh start.
2007 	 * Else, continue with next fragment in a given sequence
2008 	 */
2009 	status = dp_rx_defrag_fraglist_insert(txrx_peer, tid,
2010 					      &rx_reorder_array_elem->head,
2011 					      &rx_reorder_array_elem->tail,
2012 					      frag, &all_frag_present);
2013 
2014 	/*
2015 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
2016 	 * packet sequence has more than 6 MSDUs for some reason, we will
2017 	 * have to use the next MSDU link descriptor and chain them together
2018 	 * before reinjection.
2019 	 * ring_desc is validated in dp_rx_err_process.
2020 	 */
2021 	if ((fragno == 0) && (status == QDF_STATUS_SUCCESS) &&
2022 			(rx_reorder_array_elem->head == frag)) {
2023 
2024 		status = dp_rx_defrag_save_info_from_ring_desc(soc, ring_desc,
2025 							       rx_desc,
2026 							       txrx_peer, tid);
2027 
2028 		if (status != QDF_STATUS_SUCCESS) {
2029 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2030 				"%s: Unable to store ring desc !", __func__);
2031 			qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2032 			goto discard_frag;
2033 		}
2034 	} else {
2035 		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2036 		(*rx_bfs)++;
2037 
2038 		/* Return the non-head link desc */
2039 		if (dp_rx_link_desc_return(soc, ring_desc,
2040 					   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2041 		    QDF_STATUS_SUCCESS)
2042 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2043 				  "%s: Failed to return link desc", __func__);
2044 
2045 	}
2046 
2047 	if (pdev->soc->rx.flags.defrag_timeout_check)
2048 		dp_rx_defrag_waitlist_remove(txrx_peer, tid);
2049 
2050 	/* Yet to receive more fragments for this sequence number */
2051 	if (!all_frag_present) {
2052 		uint32_t now_ms =
2053 			qdf_system_ticks_to_msecs(qdf_system_ticks());
2054 
2055 		txrx_peer->rx_tid[tid].defrag_timeout_ms =
2056 			now_ms + pdev->soc->rx.defrag.timeout_ms;
2057 
2058 		dp_rx_defrag_waitlist_add(txrx_peer, tid);
2059 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2060 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2061 
2062 		return QDF_STATUS_SUCCESS;
2063 	}
2064 
2065 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2066 		  "All fragments received for sequence: %d", rxseq);
2067 
2068 	/* Process the fragments */
2069 	status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head,
2070 			      rx_reorder_array_elem->tail);
2071 	if (QDF_IS_STATUS_ERROR(status)) {
2072 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2073 			"Fragment processing failed");
2074 
2075 		dp_rx_add_to_free_desc_list(head, tail,
2076 				txrx_peer->rx_tid[tid].head_frag_desc);
2077 		(*rx_bfs)++;
2078 
2079 		if (dp_rx_link_desc_return(soc,
2080 					txrx_peer->rx_tid[tid].dst_ring_desc,
2081 					HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2082 				QDF_STATUS_SUCCESS)
2083 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2084 					"%s: Failed to return link desc",
2085 					__func__);
2086 		dp_rx_defrag_cleanup(txrx_peer, tid);
2087 		qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2088 		goto end;
2089 	}
2090 
2091 	/* Re-inject the fragments back to REO for further processing */
2092 	status = dp_rx_defrag_reo_reinject(txrx_peer, tid,
2093 					   rx_reorder_array_elem->head);
2094 	if (QDF_IS_STATUS_SUCCESS(status)) {
2095 		rx_reorder_array_elem->head = NULL;
2096 		rx_reorder_array_elem->tail = NULL;
2097 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
2098 			  "Fragmented sequence successfully reinjected");
2099 	} else {
2100 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2101 		"Fragmented sequence reinjection failed");
2102 		dp_rx_return_head_frag_desc(txrx_peer, tid);
2103 	}
2104 
2105 	dp_rx_defrag_cleanup(txrx_peer, tid);
2106 	qdf_spin_unlock_bh(&rx_tid->defrag_tid_lock);
2107 
2108 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2109 
2110 	return QDF_STATUS_SUCCESS;
2111 
2112 discard_frag:
2113 	dp_rx_nbuf_free(frag);
2114 err_free_desc:
2115 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
2116 	if (dp_rx_link_desc_return(soc, ring_desc,
2117 				   HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
2118 	    QDF_STATUS_SUCCESS)
2119 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2120 			  "%s: Failed to return link desc", __func__);
2121 	(*rx_bfs)++;
2122 
2123 end:
2124 	if (txrx_peer)
2125 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2126 
2127 	DP_STATS_INC(soc, rx.rx_frag_err, 1);
2128 	return QDF_STATUS_E_DEFRAG_ERROR;
2129 }
2130 
dp_rx_frag_handle(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,struct dp_rx_desc * rx_desc,uint8_t * mac_id,uint32_t quota)2131 uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
2132 			   struct hal_rx_mpdu_desc_info *mpdu_desc_info,
2133 			   struct dp_rx_desc *rx_desc,
2134 			   uint8_t *mac_id,
2135 			   uint32_t quota)
2136 {
2137 	uint32_t rx_bufs_used = 0;
2138 	qdf_nbuf_t msdu = NULL;
2139 	uint32_t tid;
2140 	uint32_t rx_bfs = 0;
2141 	struct dp_pdev *pdev;
2142 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2143 	struct rx_desc_pool *rx_desc_pool;
2144 
2145 	qdf_assert(soc);
2146 	qdf_assert(mpdu_desc_info);
2147 	qdf_assert(rx_desc);
2148 
2149 	dp_debug("Number of MSDUs to process, num_msdus: %d",
2150 		 mpdu_desc_info->msdu_count);
2151 
2152 
2153 	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
2154 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2155 			"Not sufficient MSDUs to process");
2156 		return rx_bufs_used;
2157 	}
2158 
2159 	/* all buffers in MSDU link belong to same pdev */
2160 	pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
2161 	if (!pdev) {
2162 		dp_nofl_debug("pdev is null for pool_id = %d",
2163 			      rx_desc->pool_id);
2164 		return rx_bufs_used;
2165 	}
2166 
2167 	*mac_id = rx_desc->pool_id;
2168 
2169 	msdu = rx_desc->nbuf;
2170 
2171 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2172 
2173 	if (rx_desc->unmapped)
2174 		return rx_bufs_used;
2175 
2176 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
2177 	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
2178 	rx_desc->unmapped = 1;
2179 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2180 
2181 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
2182 
2183 	tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_desc->rx_buf_start);
2184 
2185 	/* Process fragment-by-fragment */
2186 	status = dp_rx_defrag_store_fragment(soc, ring_desc,
2187 					     &pdev->free_list_head,
2188 					     &pdev->free_list_tail,
2189 					     mpdu_desc_info,
2190 					     tid, rx_desc, &rx_bfs);
2191 
2192 	if (rx_bfs)
2193 		rx_bufs_used += rx_bfs;
2194 
2195 	if (!QDF_IS_STATUS_SUCCESS(status))
2196 		dp_info_rl("Rx Defrag err seq#:0x%x msdu_count:%d flags:%d",
2197 			   mpdu_desc_info->mpdu_seq,
2198 			   mpdu_desc_info->msdu_count,
2199 			   mpdu_desc_info->mpdu_flags);
2200 
2201 	return rx_bufs_used;
2202 }
2203 
2204 #endif /* WLAN_SOFTUMAC_SUPPORT */
2205