xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_rx_reorder.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*=== header file includes ===*/
21 /* generic utilities */
22 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
23 #include <qdf_mem.h>         /* qdf_mem_malloc */
24 
25 /* external interfaces */
26 #include <ol_txrx_api.h>        /* ol_txrx_pdev_handle */
27 #include <ol_txrx_htt_api.h>    /* ol_rx_addba_handler, etc. */
28 #include <ol_ctrl_txrx_api.h>   /* ol_ctrl_rx_addba_complete */
29 #include <ol_htt_rx_api.h>      /* htt_rx_desc_frame_free */
30 #include <ol_ctrl_txrx_api.h>   /* ol_rx_err */
31 
32 /* datapath internal interfaces */
33 #include <ol_txrx_peer_find.h>  /* ol_txrx_peer_find_by_id */
34 #include <ol_txrx_internal.h>   /* TXRX_ASSERT */
35 #include <ol_rx_reorder_timeout.h>      /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
36 #include <ol_rx_reorder.h>
37 #include <ol_rx_defrag.h>
38 
39 /*=== data types and defines ===*/
40 #define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
41 
42 /*=== global variables ===*/
43 
44 static char g_log2ceil[] = {
45 	1,                      /* 0 -> 1 */
46 	1,                      /* 1 -> 1 */
47 	2,                      /* 2 -> 2 */
48 	4, 4,                   /* 3-4 -> 4 */
49 	8, 8, 8, 8,             /* 5-8 -> 8 */
50 	16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
51 	32, 32, 32, 32, 32, 32, 32, 32,
52 	32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
53 	64, 64, 64, 64, 64, 64, 64, 64,
54 	64, 64, 64, 64, 64, 64, 64, 64,
55 	64, 64, 64, 64, 64, 64, 64, 64,
56 	64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
57 };
58 
59 /*=== function definitions ===*/
60 
61 /*---*/
62 
63 #define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
64 #define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start)  /* no-op */
65 #define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
66 #define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
67 #define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0  /* n/a */
68 #define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
69 #define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr)   /* n/a */
70 #define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr)   /* n/a */
71 
72 /*---*/
73 
74 /* reorder array elements are known to be non-NULL */
75 #define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
76 	do {								\
77 		if (tail_msdu) {					\
78 			qdf_nbuf_set_next(tail_msdu,			\
79 					  rx_reorder_array_elem->head); \
80 		}							\
81 	} while (0)
82 
83 /* functions called by txrx components */
84 
ol_rx_reorder_init(struct ol_rx_reorder_t * rx_reorder,uint8_t tid)85 void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
86 {
87 	rx_reorder->win_sz = 1;
88 	rx_reorder->win_sz_mask = 0;
89 	rx_reorder->array = &rx_reorder->base;
90 	rx_reorder->base.head = rx_reorder->base.tail = NULL;
91 	rx_reorder->tid = tid;
92 	rx_reorder->defrag_timeout_ms = 0;
93 
94 	rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
95 	rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
96 }
97 
98 static enum htt_rx_status
ol_rx_reorder_seq_num_check(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,unsigned int tid,unsigned int seq_num)99 ol_rx_reorder_seq_num_check(
100 			    struct ol_txrx_pdev_t *pdev,
101 			    struct ol_txrx_peer_t *peer,
102 			    unsigned int tid, unsigned int seq_num)
103 {
104 	unsigned int seq_num_delta;
105 
106 	/* don't check the new seq_num against last_seq
107 	   if last_seq is not valid */
108 	if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
109 		return htt_rx_status_ok;
110 
111 	/*
112 	 * For duplicate detection, it might be helpful to also check
113 	 * whether the retry bit is set or not - a strict duplicate packet
114 	 * should be the one with retry bit set.
115 	 * However, since many implementations do not set the retry bit,
116 	 * and since this same function is also used for filtering out
117 	 * late-arriving frames (frames that arrive after their rx reorder
118 	 * timeout has expired) which are not retries, don't bother checking
119 	 * the retry bit for now.
120 	 */
121 	/* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
122 	seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
123 		(IEEE80211_SEQ_MAX - 1);     /* account for wraparound */
124 
125 	if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
126 		return htt_rx_status_err_replay;
127 		/* or maybe htt_rx_status_err_dup */
128 	}
129 	return htt_rx_status_ok;
130 }
131 
132 /**
133  * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
134  *                           duplicate detection & check for out-of-order
135  *                           packets for unicast packets.
136  * @pdev:                        Pointer to pdev maintained by OL
137  * @peer:                        Pointer to peer structure maintained by OL
138  * @tid:                         TID value passed as part of HTT msg by f/w
139  * @rx_mpdu_desc:                Pointer to Rx Descriptor for the given MPDU
140  *
141  *  This function
142  *      1) For Multicast Frames -- does duplicate detection
143  *          A frame is considered duplicate & dropped if it has a seq.number
144  *          which is received twice in succession and with the retry bit set
145  *          in the second case.
146  *          A frame which is older than the last sequence number received
147  *          is not considered duplicate but out-of-order. This function does
148  *          perform out-of-order check for multicast frames, which is in
149  *          keeping with the 802.11 2012 spec section 9.3.2.10
150  *      2) For Unicast Frames -- does duplicate detection & out-of-order check
151  *          only for non-aggregation tids.
152  *
153  * Return:        Returns htt_rx_status_err_replay, if packet needs to be
154  *                dropped, htt_rx_status_ok otherwise.
155  */
156 enum htt_rx_status
ol_rx_seq_num_check(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,uint8_t tid,void * rx_mpdu_desc)157 ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
158 					struct ol_txrx_peer_t *peer,
159 					uint8_t tid,
160 					void *rx_mpdu_desc)
161 {
162 	uint16_t pkt_tid = 0xffff;
163 	uint16_t seq_num = IEEE80211_SEQ_MAX;
164 	bool retry = 0;
165 
166 	seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc, false);
167 
168 	 /* For mcast packets, we only the dup-detection, not re-order check */
169 
170 	if (qdf_unlikely(OL_RX_MCAST_TID == tid)) {
171 
172 		pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
173 
174 		/* Invalid packet TID, expected only for HL */
175 		/* Pass the packet on */
176 		if (qdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
177 			return htt_rx_status_ok;
178 
179 		retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
180 
181 		/*
182 		 * At this point, we define frames to be duplicate if they
183 		 * arrive "ONLY" in succession with the same sequence number
184 		 * and the last one has the retry bit set. For an older frame,
185 		 * we consider that as an out of order frame, and hence do not
186 		 * perform the dup-detection or out-of-order check for multicast
187 		 * frames as per discussions & spec.
188 		 * Hence "seq_num <= last_seq_num" check is not necessary.
189 		 */
190 		if (qdf_unlikely(retry &&
191 			(seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {
192 			/* drop mcast */
193 			TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
194 			return htt_rx_status_err_replay;
195 		}
196 
197 		/*
198 		 * This is a multicast packet likely to be passed on...
199 		 * Set the mcast last seq number here
200 		 * This is fairly accurate since:
201 		 * a) f/w sends multicast as separate PPDU/HTT messages
202 		 * b) Mcast packets are not aggregated & hence single
203 		 * c) Result of b) is that, flush / release bit is set
204 		 *    always on the mcast packets, so likely to be
205 		 *    immediatedly released.
206 		 */
207 		peer->tids_mcast_last_seq[pkt_tid] = seq_num;
208 		return htt_rx_status_ok;
209 	} else
210 		return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
211 }
212 
213 
214 void
ol_rx_reorder_store(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer,unsigned int tid,unsigned int idx,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)215 ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
216 		    struct ol_txrx_peer_t *peer,
217 		    unsigned int tid,
218 		    unsigned int idx, qdf_nbuf_t head_msdu,
219 		    qdf_nbuf_t tail_msdu)
220 {
221 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
222 
223 	idx &= peer->tids_rx_reorder[tid].win_sz_mask;
224 	rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
225 	if (rx_reorder_array_elem->head) {
226 		qdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
227 	} else {
228 		rx_reorder_array_elem->head = head_msdu;
229 		OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
230 	}
231 	rx_reorder_array_elem->tail = tail_msdu;
232 }
233 
234 void
ol_rx_reorder_release(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,unsigned int idx_start,unsigned int idx_end)235 ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
236 		      struct ol_txrx_peer_t *peer,
237 		      unsigned int tid, unsigned int idx_start,
238 		      unsigned int idx_end)
239 {
240 	unsigned int idx;
241 	unsigned int win_sz, win_sz_mask;
242 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
243 	qdf_nbuf_t head_msdu;
244 	qdf_nbuf_t tail_msdu;
245 
246 	OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
247 	/* may get reset below */
248 	peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
249 
250 	win_sz = peer->tids_rx_reorder[tid].win_sz;
251 	win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
252 	idx_start &= win_sz_mask;
253 	idx_end &= win_sz_mask;
254 	rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
255 
256 	head_msdu = rx_reorder_array_elem->head;
257 	tail_msdu = rx_reorder_array_elem->tail;
258 	rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
259 	if (head_msdu)
260 		OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
261 
262 	idx = (idx_start + 1);
263 	OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
264 	while (idx != idx_end) {
265 		rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
266 		if (rx_reorder_array_elem->head) {
267 			OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
268 						    1);
269 			OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
270 						  rx_reorder_array_elem);
271 			tail_msdu = rx_reorder_array_elem->tail;
272 		}
273 		rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
274 						      NULL;
275 		idx++;
276 		OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
277 	}
278 	if (head_msdu) {
279 		uint16_t seq_num;
280 		htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
281 
282 		/*
283 		 * This logic is not quite correct - the last_seq value should
284 		 * be the sequence number of the final MPDU released rather than
285 		 * the initial MPDU released.
286 		 * However, tracking the sequence number of the first MPDU in
287 		 * the released batch works well enough:
288 		 * For Peregrine and Rome, the last_seq is checked only for
289 		 * non-aggregate cases, where only one MPDU at a time is
290 		 * released.
291 		 * For Riva, Pronto, and Northstar, the last_seq is checked to
292 		 * filter out late-arriving rx frames, whose sequence number
293 		 * will be less than the first MPDU in this release batch.
294 		 */
295 		seq_num = htt_rx_mpdu_desc_seq_num(
296 			htt_pdev,
297 			htt_rx_msdu_desc_retrieve(htt_pdev,
298 						  head_msdu), false);
299 		peer->tids_last_seq[tid] = seq_num;
300 		/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
301 		qdf_nbuf_set_next(tail_msdu, NULL);
302 		peer->rx_opt_proc(vdev, peer, tid, head_msdu);
303 	}
304 	/*
305 	 * If the rx reorder timeout is handled by host SW rather than the
306 	 * target's rx reorder logic, then stop the timer here.
307 	 * (If there are remaining rx holes, then the timer will be restarted.)
308 	 */
309 	OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
310 }
311 
312 void
ol_rx_reorder_flush(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer,unsigned int tid,unsigned int idx_start,unsigned int idx_end,enum htt_rx_flush_action action)313 ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
314 		    struct ol_txrx_peer_t *peer,
315 		    unsigned int tid,
316 		    unsigned int idx_start,
317 		    unsigned int idx_end, enum htt_rx_flush_action action)
318 {
319 	struct ol_txrx_pdev_t *pdev;
320 	unsigned int win_sz;
321 	uint8_t win_sz_mask;
322 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
323 	qdf_nbuf_t head_msdu = NULL;
324 	qdf_nbuf_t tail_msdu = NULL;
325 
326 	pdev = vdev->pdev;
327 	win_sz = peer->tids_rx_reorder[tid].win_sz;
328 	win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
329 
330 	OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
331 	/* a idx_end value of 0xffff means to flush the entire array */
332 	if (idx_end == 0xffff) {
333 		idx_end = idx_start;
334 		/*
335 		 * The array is being flushed in entirety because the block
336 		 * ack window has been shifted to a new position that does not
337 		 * overlap with the old position.  (Or due to reception of a
338 		 * DELBA.)
339 		 * Thus, since the block ack window is essentially being reset,
340 		 * reset the "next release index".
341 		 */
342 		peer->tids_next_rel_idx[tid] =
343 			OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
344 	} else {
345 		peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
346 	}
347 
348 	idx_start &= win_sz_mask;
349 	idx_end &= win_sz_mask;
350 
351 	do {
352 		rx_reorder_array_elem =
353 			&peer->tids_rx_reorder[tid].array[idx_start];
354 		idx_start = (idx_start + 1);
355 		OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
356 
357 		if (rx_reorder_array_elem->head) {
358 			OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
359 						    1);
360 			if (!head_msdu) {
361 				head_msdu = rx_reorder_array_elem->head;
362 				tail_msdu = rx_reorder_array_elem->tail;
363 				rx_reorder_array_elem->head = NULL;
364 				rx_reorder_array_elem->tail = NULL;
365 				continue;
366 			}
367 			qdf_nbuf_set_next(tail_msdu,
368 					  rx_reorder_array_elem->head);
369 			tail_msdu = rx_reorder_array_elem->tail;
370 			rx_reorder_array_elem->head =
371 				rx_reorder_array_elem->tail = NULL;
372 		}
373 	} while (idx_start != idx_end);
374 
375 	ol_rx_defrag_waitlist_remove(peer, tid);
376 
377 	if (head_msdu) {
378 		uint16_t seq_num;
379 		htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
380 
381 		seq_num = htt_rx_mpdu_desc_seq_num(
382 			htt_pdev,
383 			htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu), false);
384 		peer->tids_last_seq[tid] = seq_num;
385 		/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
386 		qdf_nbuf_set_next(tail_msdu, NULL);
387 		if (action == htt_rx_flush_release) {
388 			peer->rx_opt_proc(vdev, peer, tid, head_msdu);
389 		} else {
390 			do {
391 				qdf_nbuf_t next;
392 
393 				next = qdf_nbuf_next(head_msdu);
394 				htt_rx_desc_frame_free(pdev->htt_pdev,
395 						       head_msdu);
396 				head_msdu = next;
397 			} while (head_msdu);
398 		}
399 	}
400 	/*
401 	 * If the rx reorder array is empty, then reset the last_seq value -
402 	 * it is likely that a BAR or a sequence number shift caused the
403 	 * sequence number to jump, so the old last_seq value is not relevant.
404 	 */
405 	if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
406 		peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX;   /* invalid */
407 
408 	OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
409 }
410 
411 void
ol_rx_reorder_first_hole(struct ol_txrx_peer_t * peer,unsigned int tid,unsigned int * idx_end)412 ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
413 			 unsigned int tid, unsigned int *idx_end)
414 {
415 	unsigned int win_sz, win_sz_mask;
416 	unsigned int idx_start = 0, tmp_idx = 0;
417 
418 	win_sz = peer->tids_rx_reorder[tid].win_sz;
419 	win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
420 
421 	OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
422 	tmp_idx++;
423 	OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
424 	/* bypass the initial hole */
425 	while (tmp_idx != idx_start &&
426 	       !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
427 		tmp_idx++;
428 		OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
429 	}
430 	/* bypass the present frames following the initial hole */
431 	while (tmp_idx != idx_start &&
432 	       peer->tids_rx_reorder[tid].array[tmp_idx].head) {
433 		tmp_idx++;
434 		OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
435 	}
436 	/*
437 	 * idx_end is exclusive rather than inclusive.
438 	 * In other words, it is the index of the first slot of the second
439 	 * hole, rather than the index of the final present frame following
440 	 * the first hole.
441 	 */
442 	*idx_end = tmp_idx;
443 }
444 
445 #ifdef HL_RX_AGGREGATION_HOLE_DETECTION
446 
447 /**
448  * ol_rx_reorder_detect_hole - ol rx reorder detect hole
449  * @peer: ol_txrx_peer_t
450  * @tid: tid
451  * @idx_start: idx_start
452  *
453  * Return: void
454  */
ol_rx_reorder_detect_hole(struct ol_txrx_peer_t * peer,uint32_t tid,uint32_t idx_start)455 static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
456 					uint32_t tid,
457 					uint32_t idx_start)
458 {
459 	uint32_t win_sz_mask, next_rel_idx, hole_size;
460 
461 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
462 		ol_txrx_err("Invalid tid: %u", tid);
463 		return;
464 	}
465 
466 	if (peer->tids_next_rel_idx[tid] == INVALID_REORDER_INDEX)
467 		return;
468 
469 	win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
470 	/* Return directly if block-ack not enable */
471 	if (win_sz_mask == 0)
472 		return;
473 
474 	idx_start &= win_sz_mask;
475 	next_rel_idx = peer->tids_next_rel_idx[tid] & win_sz_mask;
476 
477 	if (idx_start != next_rel_idx) {
478 		hole_size = ((int)idx_start - (int)next_rel_idx) & win_sz_mask;
479 
480 		ol_rx_aggregation_hole(hole_size);
481 	}
482 
483 	return;
484 }
485 
486 #else
487 
488 /**
489  * ol_rx_reorder_detect_hole - ol rx reorder detect hole
490  * @peer: ol_txrx_peer_t
491  * @tid: tid
492  * @idx_start: idx_start
493  *
494  * Return: void
495  */
ol_rx_reorder_detect_hole(struct ol_txrx_peer_t * peer,uint32_t tid,uint32_t idx_start)496 static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
497 					uint32_t tid,
498 					uint32_t idx_start)
499 {
500 	/* no-op */
501 }
502 
503 #endif
504 
505 void
ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t * vdev,struct ol_txrx_peer_t * peer)506 ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
507 			   struct ol_txrx_peer_t *peer)
508 {
509 	int tid;
510 
511 	for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
512 		ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
513 				    htt_rx_flush_discard);
514 	}
515 	OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
516 }
517 
518 /* functions called by HTT */
519 
520 void
ol_rx_addba_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,uint8_t tid,uint8_t win_sz,uint16_t start_seq_num,uint8_t failed)521 ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
522 		    uint16_t peer_id,
523 		    uint8_t tid,
524 		    uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
525 {
526 	uint8_t round_pwr2_win_sz;
527 	unsigned int array_size;
528 	struct ol_txrx_peer_t *peer;
529 	struct ol_rx_reorder_t *rx_reorder;
530 	void *array_mem = NULL;
531 
532 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
533 		ol_txrx_err("invalid tid, %u", tid);
534 		WARN_ON(1);
535 		return;
536 	}
537 
538 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
539 	if (!peer) {
540 		ol_txrx_err("not able to find peer, %u", peer_id);
541 		return;
542 	}
543 
544 	if (pdev->cfg.host_addba) {
545 		ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
546 					  &peer->mac_addr.raw[0], tid, failed);
547 	}
548 	if (failed)
549 		return;
550 
551 	peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX;   /* invalid */
552 	rx_reorder = &peer->tids_rx_reorder[tid];
553 
554 	TXRX_ASSERT2(win_sz <= 64);
555 	round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
556 	array_size =
557 		round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
558 
559 	array_mem = qdf_mem_malloc(array_size);
560 	if (!array_mem)
561 		return;
562 
563 	if (rx_reorder->array != &rx_reorder->base) {
564 		ol_txrx_info("delete array for tid %d", tid);
565 		qdf_mem_free(rx_reorder->array);
566 	}
567 
568 	rx_reorder->array = array_mem;
569 	rx_reorder->win_sz = win_sz;
570 	TXRX_ASSERT1(rx_reorder->array);
571 
572 	rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
573 	rx_reorder->num_mpdus = 0;
574 
575 	peer->tids_next_rel_idx[tid] =
576 		OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
577 				       rx_reorder->win_sz_mask);
578 }
579 
580 void
ol_rx_delba_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,uint8_t tid)581 ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
582 {
583 	struct ol_txrx_peer_t *peer;
584 	struct ol_rx_reorder_t *rx_reorder;
585 
586 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
587 		ol_txrx_err("invalid tid, %u", tid);
588 		WARN_ON(1);
589 		return;
590 	}
591 
592 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
593 	if (!peer) {
594 		ol_txrx_err("not able to find peer, %u", peer_id);
595 		return;
596 	}
597 
598 	peer->tids_next_rel_idx[tid] = INVALID_REORDER_INDEX;
599 	rx_reorder = &peer->tids_rx_reorder[tid];
600 
601 	/* check that there really was a block ack agreement */
602 	TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
603 	/*
604 	 * Deallocate the old rx reorder array.
605 	 * The call to ol_rx_reorder_init below
606 	 * will reset rx_reorder->array to point to
607 	 * the single-element statically-allocated reorder array
608 	 * used for non block-ack cases.
609 	 */
610 	if (rx_reorder->array != &rx_reorder->base) {
611 		ol_txrx_dbg("delete reorder array, tid:%d",
612 			    tid);
613 		qdf_mem_free(rx_reorder->array);
614 	}
615 
616 	/* set up the TID with default parameters (ARQ window size = 1) */
617 	ol_rx_reorder_init(rx_reorder, tid);
618 }
619 
620 void
ol_rx_flush_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,uint8_t tid,uint16_t idx_start,uint16_t idx_end,enum htt_rx_flush_action action)621 ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
622 		    uint16_t peer_id,
623 		    uint8_t tid,
624 		    uint16_t idx_start,
625 		    uint16_t idx_end, enum htt_rx_flush_action action)
626 {
627 	struct ol_txrx_vdev_t *vdev = NULL;
628 	void *rx_desc;
629 	struct ol_txrx_peer_t *peer;
630 	int idx;
631 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
632 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
633 
634 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
635 		ol_txrx_err("Invalid tid: %u", tid);
636 		return;
637 	}
638 
639 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
640 	if (peer)
641 		vdev = peer->vdev;
642 	else
643 		return;
644 
645 	OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
646 
647 	idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
648 	rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
649 	if (rx_reorder_array_elem->head) {
650 		rx_desc =
651 			htt_rx_msdu_desc_retrieve(htt_pdev,
652 						  rx_reorder_array_elem->head);
653 		if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
654 			ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
655 						 idx_start);
656 			/*
657 			 * Assuming flush message sent separately for frags
658 			 * and for normal frames
659 			 */
660 			OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
661 			return;
662 		}
663 	}
664 
665 	if (action == htt_rx_flush_release)
666 		ol_rx_reorder_detect_hole(peer, tid, idx_start);
667 
668 	ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
669 	/*
670 	 * If the rx reorder timeout is handled by host SW, see if there are
671 	 * remaining rx holes that require the timer to be restarted.
672 	 */
673 	OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
674 	OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
675 }
676 
677 void
ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,uint8_t tid,uint16_t seq_num_start,uint16_t seq_num_end,uint8_t pn_ie_cnt,uint8_t * pn_ie)678 ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
679 		     uint16_t peer_id,
680 		     uint8_t tid,
681 		     uint16_t seq_num_start,
682 		     uint16_t seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
683 {
684 	struct ol_txrx_vdev_t *vdev = NULL;
685 	void *rx_desc;
686 	struct ol_txrx_peer_t *peer;
687 	struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
688 	unsigned int win_sz_mask;
689 	qdf_nbuf_t head_msdu = NULL;
690 	qdf_nbuf_t tail_msdu = NULL;
691 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
692 	uint16_t seq_num;
693 	int i = 0;
694 
695 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
696 		ol_txrx_err("Invalid tid: %u", tid);
697 		WARN_ON(1);
698 		return;
699 	}
700 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
701 
702 	if (!peer) {
703 		/*
704 		 * If we can't find a peer send this packet to OCB interface
705 		 * using OCB self peer
706 		 */
707 		if (!ol_txrx_get_ocb_peer(pdev, &peer))
708 			peer = NULL;
709 	}
710 
711 	if (peer)
712 		vdev = peer->vdev;
713 	else
714 		return;
715 
716 	qdf_atomic_set(&peer->fw_pn_check, 1);
717 	/*TODO: Fragmentation case */
718 	win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
719 	seq_num_start &= win_sz_mask;
720 	seq_num_end &= win_sz_mask;
721 	seq_num = seq_num_start;
722 
723 	do {
724 		rx_reorder_array_elem =
725 			&peer->tids_rx_reorder[tid].array[seq_num];
726 
727 		if (rx_reorder_array_elem->head) {
728 			if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
729 				qdf_nbuf_t msdu, next_msdu, mpdu_head,
730 					   mpdu_tail;
731 				static uint32_t last_pncheck_print_time;
732 				/* Do not need to initialize as C does it */
733 
734 				uint32_t current_time_ms;
735 				union htt_rx_pn_t pn = { 0 };
736 				int index, pn_len;
737 
738 				mpdu_head = msdu = rx_reorder_array_elem->head;
739 				mpdu_tail = rx_reorder_array_elem->tail;
740 
741 				pn_ie_cnt--;
742 				i++;
743 				rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
744 								    msdu);
745 				index = htt_rx_msdu_is_wlan_mcast(
746 					pdev->htt_pdev, rx_desc)
747 					? txrx_sec_mcast
748 					: txrx_sec_ucast;
749 				pn_len = pdev->rx_pn[peer->security[index].
750 						     sec_type].len;
751 				htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
752 						    pn_len);
753 
754 				current_time_ms = qdf_system_ticks_to_msecs(
755 					qdf_system_ticks());
756 				if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
757 				    (current_time_ms -
758 				     last_pncheck_print_time)) {
759 					last_pncheck_print_time =
760 						current_time_ms;
761 					ol_txrx_warn(
762 					   "Tgt PN check failed - TID %d, peer %pK "
763 					   "("QDF_MAC_ADDR_FMT")\n"
764 					   "    PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
765 					   "    new seq num = %d\n",
766 					   tid, peer,
767 					   QDF_MAC_ADDR_REF(peer->mac_addr.raw),
768 					   pn.pn128[1],
769 					   pn.pn128[0],
770 					   pn.pn128[0] & 0xffffffffffffULL,
771 					   htt_rx_mpdu_desc_seq_num(htt_pdev,
772 								    rx_desc,
773 								    false));
774 				} else {
775 					ol_txrx_dbg(
776 					   "Tgt PN check failed - TID %d, peer %pK "
777 					   "("QDF_MAC_ADDR_FMT")\n"
778 					   "    PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
779 					   "    new seq num = %d\n",
780 					   tid, peer,
781 					   QDF_MAC_ADDR_REF(peer->mac_addr.raw),
782 					   pn.pn128[1],
783 					   pn.pn128[0],
784 					   pn.pn128[0] & 0xffffffffffffULL,
785 					   htt_rx_mpdu_desc_seq_num(htt_pdev,
786 								    rx_desc,
787 								    false));
788 				}
789 				ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
790 					  peer->mac_addr.raw, tid,
791 					  htt_rx_mpdu_desc_tsf32(htt_pdev,
792 								 rx_desc),
793 					  OL_RX_ERR_PN, mpdu_head, NULL, 0);
794 
795 				/* free all MSDUs within this MPDU */
796 				do {
797 					next_msdu = qdf_nbuf_next(msdu);
798 					htt_rx_desc_frame_free(htt_pdev, msdu);
799 					if (msdu == mpdu_tail)
800 						break;
801 					msdu = next_msdu;
802 				} while (1);
803 
804 			} else {
805 				if (!head_msdu) {
806 					head_msdu = rx_reorder_array_elem->head;
807 					tail_msdu = rx_reorder_array_elem->tail;
808 				} else {
809 					qdf_nbuf_set_next(
810 						tail_msdu,
811 						rx_reorder_array_elem->head);
812 					tail_msdu = rx_reorder_array_elem->tail;
813 				}
814 			}
815 			rx_reorder_array_elem->head = NULL;
816 			rx_reorder_array_elem->tail = NULL;
817 		}
818 		seq_num = (seq_num + 1) & win_sz_mask;
819 	} while (seq_num != seq_num_end);
820 
821 	if (head_msdu) {
822 		/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
823 		qdf_nbuf_set_next(tail_msdu, NULL);
824 		peer->rx_opt_proc(vdev, peer, tid, head_msdu);
825 	}
826 }
827 
828 #if defined(ENABLE_RX_REORDER_TRACE)
829 
ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)830 A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
831 {
832 	int num_elems;
833 
834 	num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
835 	pdev->rx_reorder_trace.idx = 0;
836 	pdev->rx_reorder_trace.cnt = 0;
837 	pdev->rx_reorder_trace.mask = num_elems - 1;
838 	pdev->rx_reorder_trace.data = qdf_mem_malloc(
839 		sizeof(*pdev->rx_reorder_trace.data) * num_elems);
840 	if (!pdev->rx_reorder_trace.data)
841 		return A_NO_MEMORY;
842 
843 	while (--num_elems >= 0)
844 		pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
845 
846 	return A_OK;
847 }
848 
ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)849 void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
850 {
851 	qdf_mem_free(pdev->rx_reorder_trace.data);
852 }
853 
854 void
ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,uint8_t tid,uint16_t reorder_idx,uint16_t seq_num,int num_mpdus)855 ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
856 			uint8_t tid,
857 			uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
858 {
859 	uint32_t idx = pdev->rx_reorder_trace.idx;
860 
861 	pdev->rx_reorder_trace.data[idx].tid = tid;
862 	pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
863 	pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
864 	pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
865 	pdev->rx_reorder_trace.cnt++;
866 	idx++;
867 	pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
868 }
869 
870 void
ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev,int just_once,int limit)871 ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
872 {
873 	static int print_count;
874 	uint32_t i, start, end;
875 	uint64_t cnt;
876 	int elems;
877 
878 	if (print_count != 0 && just_once)
879 		return;
880 
881 	print_count++;
882 
883 	end = pdev->rx_reorder_trace.idx;
884 	if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
885 		/* trace log has not yet wrapped around - start at the top */
886 		start = 0;
887 		cnt = 0;
888 	} else {
889 		start = end;
890 		cnt = pdev->rx_reorder_trace.cnt -
891 			(pdev->rx_reorder_trace.mask + 1);
892 	}
893 	elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
894 	if (limit > 0 && elems > limit) {
895 		int delta;
896 
897 		delta = elems - limit;
898 		start += delta;
899 		start &= pdev->rx_reorder_trace.mask;
900 		cnt += delta;
901 	}
902 
903 	i = start;
904 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
905 		  "           log       array seq");
906 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
907 		  "   count   idx  tid   idx  num (LSBs)");
908 	do {
909 		uint16_t seq_num, reorder_idx;
910 
911 		seq_num = pdev->rx_reorder_trace.data[i].seq_num;
912 		reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
913 		if (seq_num < (1 << 14)) {
914 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
915 				  "  %6lld  %4d  %3d  %4d  %4d (%d)",
916 				  cnt, i, pdev->rx_reorder_trace.data[i].tid,
917 				  reorder_idx, seq_num, seq_num & 63);
918 		} else {
919 			int err = TXRX_SEQ_NUM_ERR(seq_num);
920 
921 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
922 				  "  %6lld  %4d err %d (%d MPDUs)",
923 				  cnt, i, err,
924 				  pdev->rx_reorder_trace.data[i].num_mpdus);
925 		}
926 		cnt++;
927 		i++;
928 		i &= pdev->rx_reorder_trace.mask;
929 	} while (i != end);
930 }
931 
932 #endif /* ENABLE_RX_REORDER_TRACE */
933