xref: /wlan-driver/qca-wifi-host-cmn/umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /**
19  *  DOC: wlan_mgmt_txrx_rx_reo.c
20  *  This file contains mgmt rx re-ordering related function definitions
21  */
22 
23 #include "wlan_mgmt_txrx_rx_reo_i.h"
24 #include <wlan_mgmt_txrx_rx_reo_tgt_api.h>
25 #include "wlan_mgmt_txrx_main_i.h"
26 #include <qdf_util.h>
27 #include <wlan_mlo_mgr_cmn.h>
28 #include <wlan_mlo_mgr_setup.h>
29 #include <qdf_platform.h>
30 #include <qdf_types.h>
31 
32 static struct mgmt_rx_reo_context *g_rx_reo_ctx[WLAN_MAX_MLO_GROUPS];
33 
34 #define mgmt_rx_reo_get_context(_grp_id) (g_rx_reo_ctx[_grp_id])
35 #define mgmt_rx_reo_set_context(grp_id, c)       (g_rx_reo_ctx[grp_id] = c)
36 
37 #define MGMT_RX_REO_PKT_CTR_HALF_RANGE (0x8000)
38 #define MGMT_RX_REO_PKT_CTR_FULL_RANGE (MGMT_RX_REO_PKT_CTR_HALF_RANGE << 1)
39 
40 /**
41  * wlan_mgmt_rx_reo_get_ctx_from_pdev - Get MGMT Rx REO Context from pdev
42  * @pdev: Pointer to pdev structure object
43  *
44  * API to get the MGMT RX reo context of the pdev using the appropriate
45  * MLO group id.
46  *
47  * Return: Mgmt rx reo context for the pdev
48  */
49 
50 static inline struct mgmt_rx_reo_context*
wlan_mgmt_rx_reo_get_ctx_from_pdev(struct wlan_objmgr_pdev * pdev)51 wlan_mgmt_rx_reo_get_ctx_from_pdev(struct wlan_objmgr_pdev *pdev)
52 {
53 	uint8_t ml_grp_id;
54 
55 	ml_grp_id = wlan_get_mlo_grp_id_from_pdev(pdev);
56 	if (ml_grp_id >= WLAN_MAX_MLO_GROUPS) {
57 		mgmt_rx_reo_err("REO context - Invalid ML Group ID");
58 		return NULL;
59 	}
60 
61 	return mgmt_rx_reo_get_context(ml_grp_id);
62 }
63 
64 /**
65  * mgmt_rx_reo_compare_pkt_ctrs_gte() - Compare given mgmt packet counters
66  * @ctr1: Management packet counter1
67  * @ctr2: Management packet counter2
68  *
69  * We can't directly use the comparison operator here because the counters can
70  * overflow. But these counters have a property that the difference between
71  * them can never be greater than half the range of the data type.
72  * We can make use of this condition to detect which one is actually greater.
73  *
74  * Return: true if @ctr1 is greater than or equal to @ctr2, else false
75  */
76 static inline bool
mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1,uint16_t ctr2)77 mgmt_rx_reo_compare_pkt_ctrs_gte(uint16_t ctr1, uint16_t ctr2)
78 {
79 	uint16_t delta = ctr1 - ctr2;
80 
81 	return delta <= MGMT_RX_REO_PKT_CTR_HALF_RANGE;
82 }
83 
84 /**
85  * mgmt_rx_reo_subtract_pkt_ctrs() - Subtract given mgmt packet counters
86  * @ctr1: Management packet counter1
87  * @ctr2: Management packet counter2
88  *
89  * We can't directly use the subtract operator here because the counters can
90  * overflow. But these counters have a property that the difference between
91  * them can never be greater than half the range of the data type.
92  * We can make use of this condition to detect whichone is actually greater and
93  * return the difference accordingly.
94  *
95  * Return: Difference between @ctr1 and @crt2
96  */
97 static inline int
mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1,uint16_t ctr2)98 mgmt_rx_reo_subtract_pkt_ctrs(uint16_t ctr1, uint16_t ctr2)
99 {
100 	uint16_t delta = ctr1 - ctr2;
101 
102 	/**
103 	 * if delta is greater than half the range (i.e, ctr1 is actually
104 	 * smaller than ctr2), then the result should be a negative number.
105 	 * subtracting the entire range should give the correct value.
106 	 */
107 	if (delta > MGMT_RX_REO_PKT_CTR_HALF_RANGE)
108 		return delta - MGMT_RX_REO_PKT_CTR_FULL_RANGE;
109 
110 	return delta;
111 }
112 
113 #define MGMT_RX_REO_GLOBAL_TS_HALF_RANGE (0x80000000)
114 /**
115  * mgmt_rx_reo_compare_global_timestamps_gte()-Compare given global timestamps
116  * @ts1: Global timestamp1
117  * @ts2: Global timestamp2
118  *
119  * We can't directly use the comparison operator here because the timestamps can
120  * overflow. But these timestamps have a property that the difference between
121  * them can never be greater than half the range of the data type.
122  * We can make use of this condition to detect which one is actually greater.
123  *
124  * Return: true if @ts1 is greater than or equal to @ts2, else false
125  */
126 static inline bool
mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1,uint32_t ts2)127 mgmt_rx_reo_compare_global_timestamps_gte(uint32_t ts1, uint32_t ts2)
128 {
129 	uint32_t delta = ts1 - ts2;
130 
131 	return delta <= MGMT_RX_REO_GLOBAL_TS_HALF_RANGE;
132 }
133 
134 #ifdef WLAN_MGMT_RX_REO_ERROR_HANDLING
135 /**
136  * handle_snapshot_sanity_failures() - Handle snapshot sanity failure
137  * @desc: Pointer to frame descriptor
138  * @link: Link ID
139  *
140  * API to handle snapshot sanity failure. Host drops management frames which
141  * results in snapshot sanity failure.
142  *
143  * Return: QDF_STATUS
144  */
145 static QDF_STATUS
handle_snapshot_sanity_failures(struct mgmt_rx_reo_frame_descriptor * desc,uint8_t link)146 handle_snapshot_sanity_failures(struct mgmt_rx_reo_frame_descriptor *desc,
147 				uint8_t link)
148 {
149 	if (!desc) {
150 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
151 		return QDF_STATUS_E_NULL_VALUE;
152 	}
153 
154 	mgmt_rx_reo_debug_rl("Snapshot sanity check for link %u failed", link);
155 
156 	desc->drop = true;
157 	desc->drop_reason = MGMT_RX_REO_SNAPSHOT_SANITY_FAILURE;
158 
159 	return QDF_STATUS_SUCCESS;
160 }
161 
162 /**
163  * handle_out_of_order_pkt_ctr() - Handle management frames with out of order
164  * packet counter values
165  * @desc: Pointer to frame descriptor
166  * @host_ss: Pointer to host snapshot
167  *
168  * API to handle management frames with out of order packet counter values.
169  * This API implements the design choice to drop management frames with packet
170  * counter value less than than or equal to the last management frame received
171  * in the same link.
172  *
173  * Return: QDF_STATUS
174  */
175 static QDF_STATUS
handle_out_of_order_pkt_ctr(struct mgmt_rx_reo_frame_descriptor * desc,struct mgmt_rx_reo_snapshot_params * host_ss)176 handle_out_of_order_pkt_ctr(struct mgmt_rx_reo_frame_descriptor *desc,
177 			    struct mgmt_rx_reo_snapshot_params *host_ss)
178 {
179 	if (!desc) {
180 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
181 		return QDF_STATUS_E_NULL_VALUE;
182 	}
183 
184 	if (!host_ss) {
185 		mgmt_rx_reo_err("Mgmt Rx REO host snapshot is null");
186 		return QDF_STATUS_E_NULL_VALUE;
187 	}
188 
189 	mgmt_rx_reo_debug_rl("Cur frame ctr <= last frame ctr for link = %u",
190 			     mgmt_rx_reo_get_link_id(desc->rx_params));
191 
192 	desc->drop = true;
193 	if (mgmt_rx_reo_get_pkt_counter(desc->rx_params) ==
194 	    host_ss->mgmt_pkt_ctr)
195 		desc->drop_reason = MGMT_RX_REO_DUPLICATE_PKT_CTR;
196 	else
197 		desc->drop_reason = MGMT_RX_REO_OUT_OF_ORDER_PKT_CTR;
198 
199 	return QDF_STATUS_SUCCESS;
200 }
201 
202 /**
203  * check_and_handle_zero_frame_duration() - Check and handle zero duration error
204  * @pdev: Pointer to pdev object
205  * @desc: Pointer to frame descriptor
206  *
207  * API to check for zero duration management frames. Host will be able to
208  * reorder such frames with the limitation that parallel rx detection may fail.
209  * Hence don't drop management frames with zero duration.
210  *
211  * Return: QDF_STATUS
212  */
213 static QDF_STATUS
check_and_handle_zero_frame_duration(struct wlan_objmgr_pdev * pdev,struct mgmt_rx_reo_frame_descriptor * desc)214 check_and_handle_zero_frame_duration(struct wlan_objmgr_pdev *pdev,
215 				     struct mgmt_rx_reo_frame_descriptor *desc)
216 {
217 	struct mgmt_rx_reo_params *reo_params;
218 
219 	if (!desc) {
220 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
221 		return QDF_STATUS_E_NULL_VALUE;
222 	}
223 
224 	if (!desc->rx_params) {
225 		mgmt_rx_reo_err("Mgmt Rx params is null");
226 		return QDF_STATUS_E_NULL_VALUE;
227 	}
228 
229 	reo_params = desc->rx_params->reo_params;
230 	if (!reo_params) {
231 		mgmt_rx_reo_err("Mgmt Rx REO params is NULL");
232 		return QDF_STATUS_E_NULL_VALUE;
233 	}
234 
235 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
236 	    !mgmt_rx_reo_get_duration_us(desc->rx_params)) {
237 		mgmt_rx_reo_debug_rl("0 dur: link= %u,valid= %u,ctr= %u,ts= %u",
238 				     reo_params->link_id, reo_params->valid,
239 				     reo_params->mgmt_pkt_ctr,
240 				     reo_params->global_timestamp);
241 	}
242 
243 	return QDF_STATUS_SUCCESS;
244 }
245 
246 /**
247  * check_and_handle_invalid_reo_params() - Check and handle invalid reo
248  * parameters error
249  * @desc: Pointer to frame descriptor
250  *
251  * API to check for invalid reo parameter error. Host won't be able to reorder
252  * this frame and hence drop this frame.
253  *
254  * Return: QDF_STATUS
255  */
256 static QDF_STATUS
check_and_handle_invalid_reo_params(struct mgmt_rx_reo_frame_descriptor * desc)257 check_and_handle_invalid_reo_params(struct mgmt_rx_reo_frame_descriptor *desc)
258 {
259 	struct mgmt_rx_reo_params *reo_params;
260 
261 	if (!desc) {
262 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
263 		return QDF_STATUS_E_NULL_VALUE;
264 	}
265 
266 	if (!desc->rx_params) {
267 		mgmt_rx_reo_err("Mgmt Rx params is null");
268 		return QDF_STATUS_E_NULL_VALUE;
269 	}
270 
271 	reo_params = desc->rx_params->reo_params;
272 	if (!reo_params) {
273 		mgmt_rx_reo_err("Mgmt Rx REO params is NULL");
274 		return QDF_STATUS_E_NULL_VALUE;
275 	}
276 
277 	if (!reo_params->valid) {
278 		mgmt_rx_reo_debug_rl("Invalid param: link= %u, ctr= %u, ts= %u",
279 				     reo_params->link_id,
280 				     reo_params->mgmt_pkt_ctr,
281 				     reo_params->global_timestamp);
282 		desc->drop = true;
283 		desc->drop_reason = MGMT_RX_REO_INVALID_REO_PARAMS;
284 
285 		return QDF_STATUS_E_FAILURE;
286 	}
287 
288 	return QDF_STATUS_SUCCESS;
289 }
290 #else
291 /**
292  * handle_snapshot_sanity_failures() - Handle snapshot sanity failure
293  * @desc: Pointer to frame descriptor
294  * @link: Link ID
295  *
296  * API to handle snapshot sanity failure. Host drops management frames which
297  * results in snapshot sanity failure.
298  *
299  * Return: QDF_STATUS
300  */
301 static QDF_STATUS
handle_snapshot_sanity_failures(struct mgmt_rx_reo_frame_descriptor * desc,uint8_t link)302 handle_snapshot_sanity_failures(struct mgmt_rx_reo_frame_descriptor *desc,
303 				uint8_t link)
304 {
305 	if (!desc) {
306 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
307 		return QDF_STATUS_E_NULL_VALUE;
308 	}
309 
310 	mgmt_rx_reo_err_rl("Snapshot sanity check for link %u failed", link);
311 
312 	desc->drop = true;
313 	desc->drop_reason = MGMT_RX_REO_SNAPSHOT_SANITY_FAILURE;
314 
315 	return QDF_STATUS_SUCCESS;
316 }
317 
318 /**
319  * handle_out_of_order_pkt_ctr() - Handle management frames with out of order
320  * packet counter values
321  * @desc: Pointer to frame descriptor
322  * @host_ss: Pointer to host snapshot
323  *
324  * API to handle management frames with out of order packet counter values.
325  * This API implements the design choice to assert on reception of management
326  * frames with packet counter value less than than or equal to the last
327  * management frame received in the same link.
328  *
329  * Return: QDF_STATUS
330  */
331 static QDF_STATUS
handle_out_of_order_pkt_ctr(struct mgmt_rx_reo_frame_descriptor * desc,struct mgmt_rx_reo_snapshot_params * host_ss)332 handle_out_of_order_pkt_ctr(struct mgmt_rx_reo_frame_descriptor *desc,
333 			    struct mgmt_rx_reo_snapshot_params *host_ss)
334 {
335 	if (!desc) {
336 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
337 		return QDF_STATUS_E_NULL_VALUE;
338 	}
339 
340 	mgmt_rx_reo_err_rl("Cur frame ctr <= last frame ctr for link = %u",
341 			   mgmt_rx_reo_get_link_id(desc->rx_params));
342 
343 	return QDF_STATUS_E_FAILURE;
344 }
345 
346 /**
347  * check_and_handle_zero_frame_duration() - Check and handle zero duration error
348  * @pdev: Pointer to pdev object
349  * @desc: Pointer to frame descriptor
350  *
351  * API to check for zero duration management frames and assert.
352  *
353  * Return: QDF_STATUS
354  */
355 static QDF_STATUS
check_and_handle_zero_frame_duration(struct wlan_objmgr_pdev * pdev,struct mgmt_rx_reo_frame_descriptor * desc)356 check_and_handle_zero_frame_duration(struct wlan_objmgr_pdev *pdev,
357 				     struct mgmt_rx_reo_frame_descriptor *desc)
358 {
359 	struct mgmt_rx_reo_params *reo_params;
360 
361 	if (!desc) {
362 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
363 		return QDF_STATUS_E_NULL_VALUE;
364 	}
365 
366 	if (!desc->rx_params) {
367 		mgmt_rx_reo_err("Mgmt Rx params is null");
368 		return QDF_STATUS_E_NULL_VALUE;
369 	}
370 
371 	reo_params = desc->rx_params->reo_params;
372 	if (!reo_params) {
373 		mgmt_rx_reo_err("Mgmt Rx REO params is NULL");
374 		return QDF_STATUS_E_NULL_VALUE;
375 	}
376 
377 	if (desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
378 	    !mgmt_rx_reo_get_duration_us(desc->rx_params)) {
379 		mgmt_rx_reo_err_rl("0 dur: link= %u,valid= %u,ctr= %u,ts= %u",
380 				   reo_params->link_id, reo_params->valid,
381 				   reo_params->mgmt_pkt_ctr,
382 				   reo_params->global_timestamp);
383 		mgmt_rx_reo_err("Triggering self recovery, zero duration pkt");
384 		qdf_trigger_self_recovery(wlan_pdev_get_psoc(pdev),
385 					  QDF_MGMT_RX_REO_ZERO_DURATION_PKT);
386 
387 		return QDF_STATUS_E_FAILURE;
388 	}
389 
390 	return QDF_STATUS_SUCCESS;
391 }
392 
393 /**
394  * check_and_handle_invalid_reo_params() - Check and handle invalid reo
395  * parameters error
396  * @desc: Pointer to frame descriptor
397  *
398  * API to check for invalid reo parameter error. Host won't be able to reorder
399  * this frame and hence drop this frame.
400  *
401  * Return: QDF_STATUS
402  */
403 static QDF_STATUS
check_and_handle_invalid_reo_params(struct mgmt_rx_reo_frame_descriptor * desc)404 check_and_handle_invalid_reo_params(struct mgmt_rx_reo_frame_descriptor *desc)
405 {
406 	struct mgmt_rx_reo_params *reo_params;
407 
408 	if (!desc) {
409 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor is null");
410 		return QDF_STATUS_E_NULL_VALUE;
411 	}
412 
413 	if (!desc->rx_params) {
414 		mgmt_rx_reo_err("Mgmt Rx params is null");
415 		return QDF_STATUS_E_NULL_VALUE;
416 	}
417 
418 	reo_params = desc->rx_params->reo_params;
419 	if (!reo_params) {
420 		mgmt_rx_reo_err("Mgmt Rx REO params is NULL");
421 		return QDF_STATUS_E_NULL_VALUE;
422 	}
423 
424 	if (!reo_params->valid) {
425 		mgmt_rx_reo_err_rl("Invalid params: link= %u, ctr= %u, ts= %u",
426 				   reo_params->link_id,
427 				   reo_params->mgmt_pkt_ctr,
428 				   reo_params->global_timestamp);
429 		desc->drop = true;
430 		desc->drop_reason = MGMT_RX_REO_INVALID_REO_PARAMS;
431 
432 		return QDF_STATUS_E_FAILURE;
433 	}
434 
435 	return QDF_STATUS_SUCCESS;
436 }
437 #endif /* WLAN_MGMT_RX_REO_ERROR_HANDLING */
438 
439 /**
440  * mgmt_rx_reo_is_stale_frame()- API to check whether the given management frame
441  * is stale
442  * @last_delivered_frame: pointer to the info of the last frame delivered to
443  * upper layer
444  * @frame_desc: pointer to frame descriptor
445  *
446  * This API checks whether the current management frame under processing is
447  * stale. Any frame older than the last frame delivered to upper layer is a
448  * stale frame. This could happen when we have to deliver frames out of order
449  * due to time out or list size limit. The frames which arrive late at host and
450  * with time stamp lesser than the last delivered frame are stale frames and
451  * they need to be handled differently.
452  *
453  * Return: QDF_STATUS. On success "is_stale" and "is_parallel_rx" members of
454  * @frame_desc will be filled with proper values.
455  */
456 static QDF_STATUS
mgmt_rx_reo_is_stale_frame(struct mgmt_rx_reo_frame_info * last_delivered_frame,struct mgmt_rx_reo_frame_descriptor * frame_desc)457 mgmt_rx_reo_is_stale_frame(
458 		struct mgmt_rx_reo_frame_info *last_delivered_frame,
459 		struct mgmt_rx_reo_frame_descriptor *frame_desc)
460 {
461 	uint32_t cur_frame_start_ts;
462 	uint32_t cur_frame_end_ts;
463 	uint32_t last_delivered_frame_start_ts;
464 	uint32_t last_delivered_frame_end_ts;
465 
466 	if (!last_delivered_frame) {
467 		mgmt_rx_reo_err("Last delivered frame info is null");
468 		return QDF_STATUS_E_NULL_VALUE;
469 	}
470 
471 	if (!frame_desc) {
472 		mgmt_rx_reo_err("Frame descriptor is null");
473 		return QDF_STATUS_E_NULL_VALUE;
474 	}
475 
476 	frame_desc->is_stale = false;
477 	frame_desc->is_parallel_rx = false;
478 	frame_desc->last_delivered_frame = *last_delivered_frame;
479 
480 	if (!frame_desc->reo_required)
481 		return QDF_STATUS_SUCCESS;
482 
483 	if (!last_delivered_frame->valid)
484 		return QDF_STATUS_SUCCESS;
485 
486 	cur_frame_start_ts = mgmt_rx_reo_get_start_ts(frame_desc->rx_params);
487 	cur_frame_end_ts = mgmt_rx_reo_get_end_ts(frame_desc->rx_params);
488 	last_delivered_frame_start_ts =
489 			last_delivered_frame->reo_params.start_timestamp;
490 	last_delivered_frame_end_ts =
491 			last_delivered_frame->reo_params.end_timestamp;
492 
493 	frame_desc->is_stale =
494 		!mgmt_rx_reo_compare_global_timestamps_gte(cur_frame_start_ts,
495 					last_delivered_frame_start_ts);
496 
497 	if (mgmt_rx_reo_compare_global_timestamps_gte
498 		(last_delivered_frame_start_ts, cur_frame_start_ts) &&
499 	    mgmt_rx_reo_compare_global_timestamps_gte
500 		(cur_frame_end_ts, last_delivered_frame_end_ts)) {
501 		frame_desc->is_parallel_rx = true;
502 		frame_desc->is_stale = false;
503 	}
504 
505 	return QDF_STATUS_SUCCESS;
506 }
507 
508 QDF_STATUS
mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc * psoc)509 mgmt_rx_reo_validate_mlo_link_info(struct wlan_objmgr_psoc *psoc)
510 {
511 	uint16_t valid_link_bitmap_shmem;
512 	uint16_t valid_link_bitmap;
513 	int8_t num_active_links_shmem;
514 	int8_t num_active_links;
515 	uint8_t grp_id = 0;
516 	QDF_STATUS status;
517 
518 	if (!psoc) {
519 		mgmt_rx_reo_err("psoc is null");
520 		return QDF_STATUS_E_NULL_VALUE;
521 	}
522 
523 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
524 		return QDF_STATUS_SUCCESS;
525 
526 	status = tgt_mgmt_rx_reo_get_num_active_hw_links(psoc,
527 							 &num_active_links_shmem);
528 	if (QDF_IS_STATUS_ERROR(status)) {
529 		mgmt_rx_reo_err("Failed to get number of active MLO HW links");
530 		return QDF_STATUS_E_FAILURE;
531 	}
532 
533 	if (num_active_links_shmem <= 0) {
534 		mgmt_rx_reo_err("Invalid number of active links from shmem %d",
535 				num_active_links_shmem);
536 		return QDF_STATUS_E_INVAL;
537 	}
538 
539 	if (!mlo_psoc_get_grp_id(psoc, &grp_id)) {
540 		mgmt_rx_reo_err("Failed to get valid MLO Group id");
541 		return QDF_STATUS_E_INVAL;
542 	}
543 
544 	num_active_links = wlan_mlo_get_num_active_links(grp_id);
545 	if (num_active_links <= 0) {
546 		mgmt_rx_reo_err("Invalid number of active links %d",
547 				num_active_links);
548 		return QDF_STATUS_E_INVAL;
549 	}
550 
551 	if (num_active_links_shmem != num_active_links) {
552 		mgmt_rx_reo_err("Mismatch in active links %d and %d",
553 				num_active_links_shmem, num_active_links);
554 		return QDF_STATUS_E_INVAL;
555 	}
556 
557 	status = tgt_mgmt_rx_reo_get_valid_hw_link_bitmap(psoc,
558 							  &valid_link_bitmap_shmem);
559 	if (QDF_IS_STATUS_ERROR(status)) {
560 		mgmt_rx_reo_err("Failed to get valid MLO HW link bitmap");
561 		return QDF_STATUS_E_INVAL;
562 	}
563 
564 	if (!valid_link_bitmap_shmem) {
565 		mgmt_rx_reo_err("Valid link bitmap from shmem is 0");
566 		return QDF_STATUS_E_INVAL;
567 	}
568 
569 	valid_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
570 	if (!valid_link_bitmap) {
571 		mgmt_rx_reo_err("Valid link bitmap is 0");
572 		return QDF_STATUS_E_INVAL;
573 	}
574 
575 	if (valid_link_bitmap_shmem != valid_link_bitmap) {
576 		mgmt_rx_reo_err("Mismatch in valid link bit map 0x%x and 0x%x",
577 				valid_link_bitmap_shmem, valid_link_bitmap);
578 		return QDF_STATUS_E_INVAL;
579 	}
580 
581 	return QDF_STATUS_SUCCESS;
582 }
583 
584 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
585 /**
586  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
587  * @link_id: Link id to be checked
588  * @grp_id: MLO Group id which it belongs to
589  *
590  * Return: true if @link_id is a valid link else false
591  */
592 static bool
mgmt_rx_reo_is_valid_link(uint8_t link_id,uint8_t grp_id)593 mgmt_rx_reo_is_valid_link(uint8_t link_id, uint8_t grp_id)
594 {
595 	uint16_t valid_hw_link_bitmap;
596 
597 	if (link_id >= MAX_MLO_LINKS) {
598 		mgmt_rx_reo_err("Invalid link id %u", link_id);
599 		return false;
600 	}
601 
602 	valid_hw_link_bitmap = wlan_mlo_get_valid_link_bitmap(grp_id);
603 	if (!valid_hw_link_bitmap) {
604 		mgmt_rx_reo_err("Valid HW link bitmap is zero");
605 		return false;
606 	}
607 
608 	return (valid_hw_link_bitmap & (1 << link_id));
609 }
610 
611 /**
612  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO HW links active in the
613  * system
614  * @reo_context: Pointer to reo context object
615  * @grp_id: MLO group id which it belongs to
616  *
617  * Return: On success returns number of active MLO HW links. On failure
618  * returns WLAN_MLO_INVALID_NUM_LINKS.
619  */
620 static int8_t
mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context * reo_context,uint8_t grp_id)621 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
622 			      uint8_t grp_id)
623 {
624 	if (!reo_context) {
625 		mgmt_rx_reo_err("Mgmt reo context is null");
626 		return WLAN_MLO_INVALID_NUM_LINKS;
627 	}
628 
629 	return wlan_mlo_get_num_active_links(grp_id);
630 }
631 
632 static QDF_STATUS
mgmt_rx_reo_handle_potential_premature_delivery(struct mgmt_rx_reo_context * reo_context,uint32_t global_timestamp)633 mgmt_rx_reo_handle_potential_premature_delivery(
634 				struct mgmt_rx_reo_context *reo_context,
635 				uint32_t global_timestamp)
636 {
637 	return QDF_STATUS_SUCCESS;
638 }
639 
640 static QDF_STATUS
mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list * reo_list,struct mgmt_rx_reo_frame_descriptor * desc)641 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
642 			       struct mgmt_rx_reo_frame_descriptor *desc)
643 {
644 	return QDF_STATUS_SUCCESS;
645 }
646 #else
647 /**
648  * mgmt_rx_reo_sim_is_valid_link() - Check whether the given HW link is valid
649  * @sim_context: Pointer to reo simulation context object
650  * @link_id: Link id to be checked
651  *
652  * Return: true if @link_id is a valid link, else false
653  */
654 static bool
mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context * sim_context,uint8_t link_id)655 mgmt_rx_reo_sim_is_valid_link(struct mgmt_rx_reo_sim_context *sim_context,
656 			      uint8_t link_id)
657 {
658 	bool is_valid_link = false;
659 
660 	if (!sim_context) {
661 		mgmt_rx_reo_err("Mgmt reo sim context is null");
662 		return false;
663 	}
664 
665 	if (link_id >= MAX_MLO_LINKS) {
666 		mgmt_rx_reo_err("Invalid link id %u", link_id);
667 		return false;
668 	}
669 
670 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
671 
672 	if (sim_context->link_id_to_pdev_map.map[link_id])
673 		is_valid_link = true;
674 
675 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
676 
677 	return is_valid_link;
678 }
679 
680 /**
681  * mgmt_rx_reo_is_valid_link() - Check whether the given HW link is valid
682  * @ml_grp_id: MLO Group id on which the Link ID  belongs to
683  * @link_id: HW Link ID to be verified
684  *
685  * Return: true if @link_id is a valid link else false
686  */
687 static bool
mgmt_rx_reo_is_valid_link(uint8_t ml_grp_id,uint8_t link_id)688 mgmt_rx_reo_is_valid_link(uint8_t ml_grp_id, uint8_t link_id)
689 {
690 	struct mgmt_rx_reo_context *reo_context;
691 
692 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
693 
694 	if (!reo_context) {
695 		mgmt_rx_reo_err("Mgmt reo context is null");
696 		return false;
697 	}
698 
699 	return mgmt_rx_reo_sim_is_valid_link(&reo_context->sim_context,
700 					     link_id);
701 }
702 
703 /**
704  * mgmt_rx_reo_sim_get_num_mlo_links() - Get number of MLO HW links from the reo
705  * simulation context object
706  * @sim_context: Pointer to reo simulation context object
707  *
708  * Number of MLO links will be equal to number of pdevs in the
709  * system. In case of simulation all the pdevs are assumed
710  * to have MLO capability.
711  *
712  * Return: On success returns number of MLO HW links. On failure
713  * returns WLAN_MLO_INVALID_NUM_LINKS.
714  */
715 static int8_t
mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context * sim_context)716 mgmt_rx_reo_sim_get_num_mlo_links(struct mgmt_rx_reo_sim_context *sim_context)
717 {
718 	uint8_t num_mlo_links;
719 
720 	if (!sim_context) {
721 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
722 		return WLAN_MLO_INVALID_NUM_LINKS;
723 	}
724 
725 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
726 
727 	num_mlo_links = sim_context->link_id_to_pdev_map.num_mlo_links;
728 
729 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
730 
731 	return num_mlo_links;
732 }
733 
734 /**
735  * mgmt_rx_reo_get_num_mlo_links() - Get number of MLO links from the reo
736  * context object
737  * @reo_context: Pointer to reo context object
738  * @grp_id: MLO Group id which it belongs to
739  *
740  * Return: On success returns number of MLO HW links. On failure
741  * returns WLAN_MLO_INVALID_NUM_LINKS.
742  */
743 static int8_t
mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context * reo_context,uint8_t grp_id)744 mgmt_rx_reo_get_num_mlo_links(struct mgmt_rx_reo_context *reo_context,
745 			      uint8_t grp_id)
746 {
747 	if (!reo_context) {
748 		mgmt_rx_reo_err("Mgmt reo context is null");
749 		return WLAN_MLO_INVALID_NUM_LINKS;
750 	}
751 
752 	return mgmt_rx_reo_sim_get_num_mlo_links(&reo_context->sim_context);
753 }
754 
755 /**
756  * mgmt_rx_reo_sim_get_context() - Helper API to get the management
757  * rx reorder simulation context
758  * @ml_grp_id: MLO group id for the rx reordering
759  *
760  * Return: On success returns the pointer to management rx reorder
761  * simulation context. On failure returns NULL.
762  */
763 static struct mgmt_rx_reo_sim_context *
mgmt_rx_reo_sim_get_context(uint8_t ml_grp_id)764 mgmt_rx_reo_sim_get_context(uint8_t ml_grp_id)
765 {
766 	struct mgmt_rx_reo_context *reo_context;
767 
768 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
769 	if (!reo_context) {
770 		mgmt_rx_reo_err("Mgmt reo context is null");
771 		return NULL;
772 	}
773 
774 	return &reo_context->sim_context;
775 }
776 
777 int8_t
mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev * pdev)778 mgmt_rx_reo_sim_get_mlo_link_id_from_pdev(struct wlan_objmgr_pdev *pdev)
779 {
780 	struct mgmt_rx_reo_sim_context *sim_context;
781 	int8_t link_id;
782 
783 	sim_context = mgmt_rx_reo_sim_get_context();
784 	if (!sim_context) {
785 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
786 		return MGMT_RX_REO_INVALID_LINK;
787 	}
788 
789 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
790 
791 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
792 		if (sim_context->link_id_to_pdev_map.map[link_id] == pdev)
793 			break;
794 
795 	/* pdev is not found in map */
796 	if (link_id == MAX_MLO_LINKS)
797 		link_id = MGMT_RX_REO_INVALID_LINK;
798 
799 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
800 
801 	return link_id;
802 }
803 
804 struct wlan_objmgr_pdev *
mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,wlan_objmgr_ref_dbgid refdbgid)805 mgmt_rx_reo_sim_get_pdev_from_mlo_link_id(uint8_t mlo_link_id,
806 					  wlan_objmgr_ref_dbgid refdbgid)
807 {
808 	struct mgmt_rx_reo_sim_context *sim_context;
809 	struct wlan_objmgr_pdev *pdev;
810 	QDF_STATUS status;
811 
812 	sim_context = mgmt_rx_reo_sim_get_context();
813 	if (!sim_context) {
814 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
815 		return NULL;
816 	}
817 
818 	if (mlo_link_id >= MAX_MLO_LINKS) {
819 		mgmt_rx_reo_err("Invalid link id %u", mlo_link_id);
820 		return NULL;
821 	}
822 
823 	qdf_spin_lock(&sim_context->link_id_to_pdev_map.lock);
824 
825 	pdev = sim_context->link_id_to_pdev_map.map[mlo_link_id];
826 	status = wlan_objmgr_pdev_try_get_ref(pdev, refdbgid);
827 	if (QDF_IS_STATUS_ERROR(status)) {
828 		mgmt_rx_reo_err("Failed to get pdev reference");
829 		return NULL;
830 	}
831 
832 	qdf_spin_unlock(&sim_context->link_id_to_pdev_map.lock);
833 
834 	return pdev;
835 }
836 
837 /**
838  * mgmt_rx_reo_handle_potential_premature_delivery - Helper API to handle
839  * premature delivery.
840  * @reo_context: Pointer to reorder list
841  * @global_timestamp: Global time stamp of the current management frame
842  *
843  * Sometimes we have to deliver a management frame to the upper layers even
844  * before its wait count reaching zero. This is called premature delivery.
845  * Premature delivery could happen due to time out or reorder list overflow.
846  *
847  * Return: QDF_STATUS
848  */
849 static QDF_STATUS
mgmt_rx_reo_handle_potential_premature_delivery(struct mgmt_rx_reo_context * reo_context,uint32_t global_timestamp)850 mgmt_rx_reo_handle_potential_premature_delivery(
851 				struct mgmt_rx_reo_context *reo_context,
852 				uint32_t global_timestamp)
853 {
854 	qdf_list_t stale_frame_list_temp;
855 	QDF_STATUS status;
856 	struct mgmt_rx_reo_pending_frame_list_entry *latest_stale_frame = NULL;
857 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
858 	struct mgmt_rx_reo_sim_context *sim_context;
859 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
860 
861 	if (!reo_context)
862 		return QDF_STATUS_E_NULL_VALUE;
863 
864 	sim_context = &reo_context->sim_context;
865 	master_frame_list = &sim_context->master_frame_list;
866 
867 	qdf_spin_lock(&master_frame_list->lock);
868 
869 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
870 		if (cur_entry->params.global_timestamp == global_timestamp)
871 			break;
872 
873 		latest_stale_frame = cur_entry;
874 	}
875 
876 	if (latest_stale_frame) {
877 		qdf_list_create(&stale_frame_list_temp,
878 				MGMT_RX_REO_SIM_STALE_FRAME_TEMP_LIST_MAX_SIZE);
879 
880 		status = qdf_list_split(&stale_frame_list_temp,
881 					&master_frame_list->pending_list,
882 					&latest_stale_frame->node);
883 		if (QDF_IS_STATUS_ERROR(status))
884 			goto exit_unlock_master_frame_list;
885 
886 		status = qdf_list_join(&master_frame_list->stale_list,
887 				       &stale_frame_list_temp);
888 		if (QDF_IS_STATUS_ERROR(status))
889 			goto exit_unlock_master_frame_list;
890 	}
891 
892 	status = QDF_STATUS_SUCCESS;
893 
894 exit_unlock_master_frame_list:
895 	qdf_spin_unlock(&master_frame_list->lock);
896 
897 	return status;
898 }
899 
900 /**
901  * mgmt_rx_reo_sim_remove_frame_from_stale_list() - Removes frame from the
902  * stale management frame list
903  * @master_frame_list: pointer to master management frame list
904  * @reo_params: pointer to reo params
905  *
906  * This API removes frames from the stale management frame list.
907  *
908  * Return: QDF_STATUS of operation
909  */
910 static QDF_STATUS
mgmt_rx_reo_sim_remove_frame_from_stale_list(struct mgmt_rx_reo_master_frame_list * master_frame_list,const struct mgmt_rx_reo_params * reo_params)911 mgmt_rx_reo_sim_remove_frame_from_stale_list(
912 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
913 		const struct mgmt_rx_reo_params *reo_params)
914 {
915 	struct mgmt_rx_reo_stale_frame_list_entry *cur_entry;
916 	struct mgmt_rx_reo_stale_frame_list_entry *matching_entry = NULL;
917 	QDF_STATUS status;
918 
919 	if (!master_frame_list || !reo_params)
920 		return QDF_STATUS_E_NULL_VALUE;
921 
922 	qdf_spin_lock(&master_frame_list->lock);
923 
924 	/**
925 	 * Stale frames can come in any order at host. Do a linear search and
926 	 * remove the matching entry.
927 	 */
928 	qdf_list_for_each(&master_frame_list->stale_list, cur_entry, node) {
929 		if (cur_entry->params.link_id == reo_params->link_id &&
930 		    cur_entry->params.mgmt_pkt_ctr == reo_params->mgmt_pkt_ctr &&
931 		    cur_entry->params.global_timestamp ==
932 		    reo_params->global_timestamp) {
933 			matching_entry = cur_entry;
934 			break;
935 		}
936 	}
937 
938 	if (!matching_entry) {
939 		qdf_spin_unlock(&master_frame_list->lock);
940 		mgmt_rx_reo_err("reo sim failure: absent in stale frame list");
941 		qdf_assert_always(0);
942 	}
943 
944 	status = qdf_list_remove_node(&master_frame_list->stale_list,
945 				      &matching_entry->node);
946 
947 	if (QDF_IS_STATUS_ERROR(status)) {
948 		qdf_spin_unlock(&master_frame_list->lock);
949 		return status;
950 	}
951 
952 	qdf_mem_free(matching_entry);
953 
954 	qdf_spin_unlock(&master_frame_list->lock);
955 
956 	return QDF_STATUS_SUCCESS;
957 }
958 
959 /**
960  * mgmt_rx_reo_handle_stale_frame() - API to handle stale management frames.
961  * @reo_list: Pointer to reorder list
962  * @desc: Pointer to frame descriptor
963  *
964  * Return: QDF_STATUS of operation
965  */
966 static QDF_STATUS
mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list * reo_list,struct mgmt_rx_reo_frame_descriptor * desc)967 mgmt_rx_reo_handle_stale_frame(struct mgmt_rx_reo_list *reo_list,
968 			       struct mgmt_rx_reo_frame_descriptor *desc)
969 {
970 	QDF_STATUS status;
971 	struct mgmt_rx_reo_context *reo_context;
972 	struct mgmt_rx_reo_sim_context *sim_context;
973 	struct mgmt_rx_reo_params *reo_params;
974 
975 	if (!reo_list || !desc)
976 		return QDF_STATUS_E_NULL_VALUE;
977 
978 	/* FW consumed/Error frames are already removed */
979 	if (desc->type != MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME)
980 		return QDF_STATUS_SUCCESS;
981 
982 	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
983 	if (!reo_context)
984 		return QDF_STATUS_E_NULL_VALUE;
985 
986 	sim_context = &reo_context->sim_context;
987 
988 	reo_params = desc->rx_params->reo_params;
989 	if (!reo_params)
990 		return QDF_STATUS_E_NULL_VALUE;
991 
992 	status = mgmt_rx_reo_sim_remove_frame_from_stale_list(
993 				&sim_context->master_frame_list, reo_params);
994 
995 	return status;
996 }
997 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
998 
999 /**
1000  * mgmt_rx_reo_is_potential_premature_delivery() - Helper API to check
1001  * whether the current frame getting delivered to upper layer is a premature
1002  * delivery
1003  * @release_reason: release reason
1004  *
1005  * Return: true for a premature delivery
1006  */
1007 static bool
mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)1008 mgmt_rx_reo_is_potential_premature_delivery(uint8_t release_reason)
1009 {
1010 	return !(release_reason & RELEASE_REASON_ZERO_WAIT_COUNT);
1011 }
1012 
1013 /**
1014  * wlan_mgmt_rx_reo_get_priv_object() - Get the pdev private object of
1015  * MGMT Rx REO module
1016  * @pdev: pointer to pdev object
1017  *
1018  * Return: Pointer to pdev private object of MGMT Rx REO module on success,
1019  * else NULL
1020  */
1021 static struct mgmt_rx_reo_pdev_info *
wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev * pdev)1022 wlan_mgmt_rx_reo_get_priv_object(struct wlan_objmgr_pdev *pdev)
1023 {
1024 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx;
1025 
1026 	if (!pdev) {
1027 		mgmt_rx_reo_err("pdev is null");
1028 		return NULL;
1029 	}
1030 
1031 	mgmt_txrx_pdev_ctx = (struct mgmt_txrx_priv_pdev_context *)
1032 		wlan_objmgr_pdev_get_comp_private_obj(pdev,
1033 						      WLAN_UMAC_COMP_MGMT_TXRX);
1034 
1035 	if (!mgmt_txrx_pdev_ctx) {
1036 		mgmt_rx_reo_err("mgmt txrx context is NULL");
1037 		return NULL;
1038 	}
1039 
1040 	return mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx;
1041 }
1042 
1043 /**
1044  * mgmt_rx_reo_print_snapshots() - Print all snapshots related
1045  * to management Rx reorder module
1046  * @mac_hw_ss: MAC HW snapshot
1047  * @fw_forwarded_ss: FW forwarded snapshot
1048  * @fw_consumed_ss: FW consumed snapshot
1049  * @host_ss: Host snapshot
1050  *
1051  * return: QDF_STATUS
1052  */
1053 static QDF_STATUS
mgmt_rx_reo_print_snapshots(struct mgmt_rx_reo_snapshot_params * mac_hw_ss,struct mgmt_rx_reo_snapshot_params * fw_forwarded_ss,struct mgmt_rx_reo_snapshot_params * fw_consumed_ss,struct mgmt_rx_reo_snapshot_params * host_ss)1054 mgmt_rx_reo_print_snapshots
1055 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
1056 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
1057 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
1058 			 struct mgmt_rx_reo_snapshot_params *host_ss)
1059 {
1060 	mgmt_rx_reo_debug("HW SS: valid = %u, ctr = %u, ts = %u",
1061 			  mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1062 			  mac_hw_ss->global_timestamp);
1063 	mgmt_rx_reo_debug("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1064 			  fw_forwarded_ss->valid,
1065 			  fw_forwarded_ss->mgmt_pkt_ctr,
1066 			  fw_forwarded_ss->global_timestamp);
1067 	mgmt_rx_reo_debug("FW consumed SS: valid = %u, ctr = %u, ts = %u",
1068 			  fw_consumed_ss->valid,
1069 			  fw_consumed_ss->mgmt_pkt_ctr,
1070 			  fw_consumed_ss->global_timestamp);
1071 	mgmt_rx_reo_debug("HOST SS: valid = %u, ctr = %u, ts = %u",
1072 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
1073 			  host_ss->global_timestamp);
1074 
1075 	return QDF_STATUS_SUCCESS;
1076 }
1077 
1078 /**
1079  * mgmt_rx_reo_invalidate_stale_snapshots() - Invalidate stale management
1080  * Rx REO snapshots
1081  * @mac_hw_ss: MAC HW snapshot
1082  * @fw_forwarded_ss: FW forwarded snapshot
1083  * @fw_consumed_ss: FW consumed snapshot
1084  * @host_ss: Host snapshot
1085  * @link: link ID
1086  *
1087  * return: QDF_STATUS
1088  */
1089 static QDF_STATUS
mgmt_rx_reo_invalidate_stale_snapshots(struct mgmt_rx_reo_snapshot_params * mac_hw_ss,struct mgmt_rx_reo_snapshot_params * fw_forwarded_ss,struct mgmt_rx_reo_snapshot_params * fw_consumed_ss,struct mgmt_rx_reo_snapshot_params * host_ss,uint8_t link)1090 mgmt_rx_reo_invalidate_stale_snapshots
1091 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
1092 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
1093 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
1094 			 struct mgmt_rx_reo_snapshot_params *host_ss,
1095 			 uint8_t link)
1096 {
1097 	if (!mac_hw_ss->valid)
1098 		return QDF_STATUS_SUCCESS;
1099 
1100 	if (host_ss->valid) {
1101 		if (!mgmt_rx_reo_compare_global_timestamps_gte
1102 					(mac_hw_ss->global_timestamp,
1103 					 host_ss->global_timestamp) ||
1104 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
1105 					(mac_hw_ss->mgmt_pkt_ctr,
1106 					 host_ss->mgmt_pkt_ctr)) {
1107 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
1108 						    fw_consumed_ss, host_ss);
1109 			mgmt_rx_reo_debug("Invalidate host snapshot, link %u",
1110 					  link);
1111 			host_ss->valid = false;
1112 		}
1113 	}
1114 
1115 	if (fw_forwarded_ss->valid) {
1116 		if (!mgmt_rx_reo_compare_global_timestamps_gte
1117 					(mac_hw_ss->global_timestamp,
1118 					 fw_forwarded_ss->global_timestamp) ||
1119 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
1120 					(mac_hw_ss->mgmt_pkt_ctr,
1121 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
1122 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
1123 						    fw_consumed_ss, host_ss);
1124 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
1125 					  link);
1126 			fw_forwarded_ss->valid = false;
1127 		}
1128 
1129 		if (host_ss->valid && fw_forwarded_ss->valid &&
1130 		    (mgmt_rx_reo_compare_global_timestamps_gte
1131 					(host_ss->global_timestamp,
1132 					 fw_forwarded_ss->global_timestamp) !=
1133 		     mgmt_rx_reo_compare_pkt_ctrs_gte
1134 					(host_ss->mgmt_pkt_ctr,
1135 					 fw_forwarded_ss->mgmt_pkt_ctr))) {
1136 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
1137 						    fw_consumed_ss, host_ss);
1138 			mgmt_rx_reo_debug("Invalidate FW forwarded SS, link %u",
1139 					  link);
1140 			fw_forwarded_ss->valid = false;
1141 		}
1142 	}
1143 
1144 	if (fw_consumed_ss->valid) {
1145 		if (!mgmt_rx_reo_compare_global_timestamps_gte
1146 					(mac_hw_ss->global_timestamp,
1147 					 fw_consumed_ss->global_timestamp) ||
1148 		    !mgmt_rx_reo_compare_pkt_ctrs_gte
1149 					(mac_hw_ss->mgmt_pkt_ctr,
1150 					 fw_consumed_ss->mgmt_pkt_ctr)) {
1151 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
1152 						    fw_consumed_ss, host_ss);
1153 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
1154 					  link);
1155 			fw_consumed_ss->valid = false;
1156 		}
1157 
1158 		if (host_ss->valid && fw_consumed_ss->valid &&
1159 		    (mgmt_rx_reo_compare_global_timestamps_gte
1160 					(host_ss->global_timestamp,
1161 					 fw_consumed_ss->global_timestamp) !=
1162 		     mgmt_rx_reo_compare_pkt_ctrs_gte
1163 					(host_ss->mgmt_pkt_ctr,
1164 					 fw_consumed_ss->mgmt_pkt_ctr))) {
1165 			mgmt_rx_reo_print_snapshots(mac_hw_ss, fw_forwarded_ss,
1166 						    fw_consumed_ss, host_ss);
1167 			mgmt_rx_reo_debug("Invalidate FW consumed SS, link %u",
1168 					  link);
1169 			fw_consumed_ss->valid = false;
1170 		}
1171 	}
1172 
1173 	return QDF_STATUS_SUCCESS;
1174 }
1175 
1176 /**
1177  * mgmt_rx_reo_snapshots_check_sanity() - Check the sanity of management
1178  * Rx REO snapshots
1179  * @mac_hw_ss: MAC HW snapshot
1180  * @fw_forwarded_ss: FW forwarded snapshot
1181  * @fw_consumed_ss: FW consumed snapshot
1182  * @host_ss: Host snapshot
1183  *
1184  * return: QDF_STATUS
1185  */
1186 static QDF_STATUS
mgmt_rx_reo_snapshots_check_sanity(struct mgmt_rx_reo_snapshot_params * mac_hw_ss,struct mgmt_rx_reo_snapshot_params * fw_forwarded_ss,struct mgmt_rx_reo_snapshot_params * fw_consumed_ss,struct mgmt_rx_reo_snapshot_params * host_ss)1187 mgmt_rx_reo_snapshots_check_sanity
1188 			(struct mgmt_rx_reo_snapshot_params *mac_hw_ss,
1189 			 struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss,
1190 			 struct mgmt_rx_reo_snapshot_params *fw_consumed_ss,
1191 			 struct mgmt_rx_reo_snapshot_params *host_ss)
1192 {
1193 	QDF_STATUS status;
1194 
1195 	if (!mac_hw_ss->valid) {
1196 		if (fw_forwarded_ss->valid || fw_consumed_ss->valid ||
1197 		    host_ss->valid) {
1198 			mgmt_rx_reo_warn_rl("MAC HW SS is invalid");
1199 			status = QDF_STATUS_E_INVAL;
1200 			goto fail;
1201 		}
1202 
1203 		return QDF_STATUS_SUCCESS;
1204 	}
1205 
1206 	if (!fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
1207 		if (host_ss->valid) {
1208 			mgmt_rx_reo_warn_rl("FW fwd and consumed SS invalid");
1209 			status = QDF_STATUS_E_INVAL;
1210 			goto fail;
1211 		}
1212 
1213 		return QDF_STATUS_SUCCESS;
1214 	}
1215 
1216 	if (fw_forwarded_ss->valid) {
1217 		if (!mgmt_rx_reo_compare_global_timestamps_gte
1218 					(mac_hw_ss->global_timestamp,
1219 					 fw_forwarded_ss->global_timestamp)) {
1220 			mgmt_rx_reo_warn_rl("TS: MAC HW SS < FW forwarded SS");
1221 			status = QDF_STATUS_E_INVAL;
1222 			goto fail;
1223 		}
1224 
1225 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
1226 					(mac_hw_ss->mgmt_pkt_ctr,
1227 					 fw_forwarded_ss->mgmt_pkt_ctr)) {
1228 			mgmt_rx_reo_warn_rl("CTR: MAC HW SS < FW forwarded SS");
1229 			status = QDF_STATUS_E_INVAL;
1230 			goto fail;
1231 		}
1232 	}
1233 
1234 	if (fw_consumed_ss->valid) {
1235 		if (!mgmt_rx_reo_compare_global_timestamps_gte
1236 					(mac_hw_ss->global_timestamp,
1237 					 fw_consumed_ss->global_timestamp)) {
1238 			mgmt_rx_reo_warn_rl("TS: MAC HW SS < FW consumed SS");
1239 			status = QDF_STATUS_E_INVAL;
1240 			goto fail;
1241 		}
1242 
1243 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
1244 					(mac_hw_ss->mgmt_pkt_ctr,
1245 					 fw_consumed_ss->mgmt_pkt_ctr)) {
1246 			mgmt_rx_reo_warn_rl("CTR: MAC HW SS < FW consumed SS");
1247 			status = QDF_STATUS_E_INVAL;
1248 			goto fail;
1249 		}
1250 	}
1251 
1252 	if (host_ss->valid) {
1253 		if (!mgmt_rx_reo_compare_global_timestamps_gte
1254 					(mac_hw_ss->global_timestamp,
1255 					 host_ss->global_timestamp)) {
1256 			mgmt_rx_reo_warn_rl("TS: MAC HW SS < host SS");
1257 			status = QDF_STATUS_E_INVAL;
1258 			goto fail;
1259 		}
1260 
1261 		if (!mgmt_rx_reo_compare_pkt_ctrs_gte
1262 					(mac_hw_ss->mgmt_pkt_ctr,
1263 					 host_ss->mgmt_pkt_ctr)) {
1264 			mgmt_rx_reo_warn_rl("PKT CTR: MAC HW SS < host SS");
1265 			status = QDF_STATUS_E_INVAL;
1266 			goto fail;
1267 		}
1268 
1269 		if (fw_forwarded_ss->valid && !fw_consumed_ss->valid) {
1270 			if (!mgmt_rx_reo_compare_global_timestamps_gte
1271 					(fw_forwarded_ss->global_timestamp,
1272 					 host_ss->global_timestamp)) {
1273 				mgmt_rx_reo_warn_rl("TS: FW fwd < host SS");
1274 				status = QDF_STATUS_E_INVAL;
1275 				goto fail;
1276 			}
1277 
1278 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
1279 					(fw_forwarded_ss->mgmt_pkt_ctr,
1280 					 host_ss->mgmt_pkt_ctr)) {
1281 				mgmt_rx_reo_warn_rl("CTR: FW fwd < host SS");
1282 				status = QDF_STATUS_E_INVAL;
1283 				goto fail;
1284 			}
1285 		}
1286 
1287 		if (fw_consumed_ss->valid && !fw_forwarded_ss->valid) {
1288 			if (!mgmt_rx_reo_compare_global_timestamps_gte
1289 					(fw_consumed_ss->global_timestamp,
1290 					 host_ss->global_timestamp)) {
1291 				mgmt_rx_reo_warn_rl("TS: FW consumed < host");
1292 				status = QDF_STATUS_E_INVAL;
1293 				goto fail;
1294 			}
1295 
1296 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
1297 					(fw_consumed_ss->mgmt_pkt_ctr,
1298 					 host_ss->mgmt_pkt_ctr)) {
1299 				mgmt_rx_reo_warn_rl("CTR: FW consumed < host");
1300 				status = QDF_STATUS_E_INVAL;
1301 				goto fail;
1302 			}
1303 		}
1304 
1305 		if (fw_forwarded_ss->valid && fw_consumed_ss->valid) {
1306 			if (!mgmt_rx_reo_compare_global_timestamps_gte
1307 					(fw_consumed_ss->global_timestamp,
1308 					 host_ss->global_timestamp) &&
1309 			    !mgmt_rx_reo_compare_global_timestamps_gte
1310 					(fw_forwarded_ss->global_timestamp,
1311 					 host_ss->global_timestamp)) {
1312 				mgmt_rx_reo_warn_rl("TS: FW consumed/fwd<host");
1313 				status = QDF_STATUS_E_INVAL;
1314 				goto fail;
1315 			}
1316 
1317 			if (!mgmt_rx_reo_compare_pkt_ctrs_gte
1318 					(fw_consumed_ss->mgmt_pkt_ctr,
1319 					 host_ss->mgmt_pkt_ctr) &&
1320 			    !mgmt_rx_reo_compare_pkt_ctrs_gte
1321 					(fw_forwarded_ss->mgmt_pkt_ctr,
1322 					 host_ss->mgmt_pkt_ctr)) {
1323 				mgmt_rx_reo_warn_rl("CTR:FW consumed/fwd<host");
1324 				status = QDF_STATUS_E_INVAL;
1325 				goto fail;
1326 			}
1327 		}
1328 	}
1329 
1330 	return QDF_STATUS_SUCCESS;
1331 
1332 fail:
1333 	mgmt_rx_reo_warn_rl("HW SS: valid = %u, ctr = %u, ts = %u",
1334 			    mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
1335 			    mac_hw_ss->global_timestamp);
1336 	mgmt_rx_reo_warn_rl("FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1337 			    fw_forwarded_ss->valid,
1338 			    fw_forwarded_ss->mgmt_pkt_ctr,
1339 			    fw_forwarded_ss->global_timestamp);
1340 	mgmt_rx_reo_warn_rl("FW consumed SS: valid = %u, ctr = %u, ts = %u",
1341 			    fw_consumed_ss->valid,
1342 			    fw_consumed_ss->mgmt_pkt_ctr,
1343 			    fw_consumed_ss->global_timestamp);
1344 	mgmt_rx_reo_warn_rl("HOST SS: valid = %u, ctr = %u, ts = %u",
1345 			    host_ss->valid, host_ss->mgmt_pkt_ctr,
1346 			    host_ss->global_timestamp);
1347 
1348 	return status;
1349 }
1350 
1351 /**
1352  * wlan_mgmt_rx_reo_algo_calculate_wait_count() - Calculates the number of
1353  * frames an incoming frame should wait for before it gets delivered.
1354  * @in_frame_pdev: pdev on which this frame is received
1355  * @desc: frame Descriptor
1356  *
1357  * Each frame carrys a MGMT pkt number which is local to that link, and a
1358  * timestamp which is global across all the links. MAC HW and FW also captures
1359  * the same details of the last frame that they have seen. Host also maintains
1360  * the details of the last frame it has seen. In total, there are 4 snapshots.
1361  * 1. MAC HW snapshot - latest frame seen at MAC HW
1362  * 2. FW forwarded snapshot- latest frame forwarded to the Host
1363  * 3. FW consumed snapshot - latest frame consumed by the FW
1364  * 4. Host/FW consumed snapshot - latest frame seen by the Host
1365  * By using all these snapshots, this function tries to compute the wait count
1366  * for a given incoming frame on all links.
1367  *
1368  * Return: QDF_STATUS of operation
1369  */
1370 static QDF_STATUS
wlan_mgmt_rx_reo_algo_calculate_wait_count(struct wlan_objmgr_pdev * in_frame_pdev,struct mgmt_rx_reo_frame_descriptor * desc)1371 wlan_mgmt_rx_reo_algo_calculate_wait_count(
1372 		struct wlan_objmgr_pdev *in_frame_pdev,
1373 		struct mgmt_rx_reo_frame_descriptor *desc)
1374 {
1375 	QDF_STATUS status;
1376 	uint8_t link;
1377 	int8_t grp_id;
1378 	int8_t in_frame_link;
1379 	int frames_pending, delta_fwd_host;
1380 	uint8_t snapshot_id;
1381 	struct wlan_objmgr_pdev *pdev;
1382 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
1383 	struct mgmt_rx_reo_pdev_info *in_frame_rx_reo_pdev_ctx;
1384 	struct mgmt_rx_reo_snapshot_info *snapshot_info;
1385 	struct mgmt_rx_reo_snapshot_params snapshot_params
1386 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
1387 	struct mgmt_rx_reo_snapshot_params *mac_hw_ss, *fw_forwarded_ss,
1388 					    *fw_consumed_ss, *host_ss;
1389 	struct mgmt_rx_reo_params *in_frame_params;
1390 	struct mgmt_rx_reo_wait_count *wait_count;
1391 
1392 	if (!in_frame_pdev) {
1393 		mgmt_rx_reo_err("pdev is null");
1394 		return QDF_STATUS_E_NULL_VALUE;
1395 	}
1396 
1397 	if (!desc) {
1398 		mgmt_rx_reo_err("Frame descriptor is null");
1399 		return QDF_STATUS_E_NULL_VALUE;
1400 	}
1401 
1402 	if (!desc->rx_params) {
1403 		mgmt_rx_reo_err("MGMT Rx params of incoming frame is NULL");
1404 		return QDF_STATUS_E_NULL_VALUE;
1405 	}
1406 
1407 	in_frame_params = desc->rx_params->reo_params;
1408 	if (!in_frame_params) {
1409 		mgmt_rx_reo_err("MGMT Rx REO params of incoming frame is NULL");
1410 		return QDF_STATUS_E_NULL_VALUE;
1411 	}
1412 
1413 	wait_count = &desc->wait_count;
1414 
1415 	/* Get the MLO link ID of incoming frame */
1416 	in_frame_link = wlan_get_mlo_link_id_from_pdev(in_frame_pdev);
1417 	grp_id = wlan_get_mlo_grp_id_from_pdev(in_frame_pdev);
1418 	if (in_frame_link < 0 || in_frame_link >= MAX_MLO_LINKS) {
1419 		mgmt_rx_reo_err("Invalid frame link = %d", in_frame_link);
1420 		return QDF_STATUS_E_INVAL;
1421 	}
1422 
1423 	if (!mgmt_rx_reo_is_valid_link(in_frame_link, grp_id)) {
1424 		mgmt_rx_reo_err("Invalid link = %d and group = %d",
1425 				in_frame_link, grp_id);
1426 		return QDF_STATUS_E_INVAL;
1427 	}
1428 
1429 	in_frame_rx_reo_pdev_ctx =
1430 			wlan_mgmt_rx_reo_get_priv_object(in_frame_pdev);
1431 	if (!in_frame_rx_reo_pdev_ctx) {
1432 		mgmt_rx_reo_err("Reo context null for incoming frame pdev");
1433 		return QDF_STATUS_E_FAILURE;
1434 	}
1435 	qdf_mem_zero(in_frame_rx_reo_pdev_ctx->raw_snapshots,
1436 		     sizeof(in_frame_rx_reo_pdev_ctx->raw_snapshots));
1437 
1438 	/* Iterate over all the valid MLO links */
1439 	for (link = 0; link < MAX_MLO_LINKS; link++) {
1440 		/* No need wait for any frames on an invalid link */
1441 		if (!mgmt_rx_reo_is_valid_link(link, grp_id)) {
1442 			frames_pending = 0;
1443 			goto update_pending_frames;
1444 		}
1445 
1446 		pdev = wlan_get_pdev_from_mlo_link_id(link, grp_id,
1447 						      WLAN_MGMT_RX_REO_ID);
1448 
1449 		/* No need to wait for any frames if the pdev is not found */
1450 		if (!pdev) {
1451 			mgmt_rx_reo_debug("pdev is null for link %d", link);
1452 			frames_pending = 0;
1453 			goto update_pending_frames;
1454 		}
1455 
1456 		rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
1457 		if (!rx_reo_pdev_ctx) {
1458 			mgmt_rx_reo_err("Mgmt reo context empty for pdev %pK",
1459 					pdev);
1460 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1461 			return QDF_STATUS_E_FAILURE;
1462 		}
1463 
1464 		if (!rx_reo_pdev_ctx->init_complete) {
1465 			mgmt_rx_reo_debug("REO init in progress for link %d",
1466 					  link);
1467 			wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1468 			frames_pending = 0;
1469 			goto update_pending_frames;
1470 		}
1471 
1472 		host_ss = &rx_reo_pdev_ctx->host_snapshot;
1473 		desc->host_snapshot[link] = rx_reo_pdev_ctx->host_snapshot;
1474 
1475 		mgmt_rx_reo_info("link_id = %u HOST SS: valid = %u, ctr = %u, ts = %u",
1476 				 link, host_ss->valid, host_ss->mgmt_pkt_ctr,
1477 				 host_ss->global_timestamp);
1478 
1479 		snapshot_id = 0;
1480 		/* Read all the shared snapshots */
1481 		while (snapshot_id <
1482 			MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
1483 			snapshot_info = &rx_reo_pdev_ctx->
1484 				host_target_shared_snapshot_info[snapshot_id];
1485 
1486 			qdf_mem_zero(&snapshot_params[snapshot_id],
1487 				     sizeof(snapshot_params[snapshot_id]));
1488 
1489 			status = tgt_mgmt_rx_reo_read_snapshot(
1490 					pdev, snapshot_info, snapshot_id,
1491 					&snapshot_params[snapshot_id],
1492 					in_frame_rx_reo_pdev_ctx->raw_snapshots
1493 					[link][snapshot_id]);
1494 
1495 			/* Read operation shouldn't fail */
1496 			if (QDF_IS_STATUS_ERROR(status)) {
1497 				mgmt_rx_reo_err("snapshot(%d) read failed on"
1498 						"link (%d)", snapshot_id, link);
1499 				wlan_objmgr_pdev_release_ref(
1500 						pdev, WLAN_MGMT_RX_REO_ID);
1501 				return status;
1502 			}
1503 
1504 			/* If snapshot is valid, save it in the pdev context */
1505 			if (snapshot_params[snapshot_id].valid) {
1506 				rx_reo_pdev_ctx->
1507 				   last_valid_shared_snapshot[snapshot_id] =
1508 				   snapshot_params[snapshot_id];
1509 			}
1510 			desc->shared_snapshots[link][snapshot_id] =
1511 						snapshot_params[snapshot_id];
1512 
1513 			snapshot_id++;
1514 		}
1515 
1516 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
1517 
1518 		mac_hw_ss = &snapshot_params
1519 				[MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
1520 		fw_forwarded_ss = &snapshot_params
1521 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
1522 		fw_consumed_ss = &snapshot_params
1523 				[MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
1524 
1525 		status = mgmt_rx_reo_invalidate_stale_snapshots(mac_hw_ss,
1526 								fw_forwarded_ss,
1527 								fw_consumed_ss,
1528 								host_ss, link);
1529 		if (QDF_IS_STATUS_ERROR(status)) {
1530 			mgmt_rx_reo_err("Failed to invalidate SS for link %u",
1531 					link);
1532 			return status;
1533 		}
1534 
1535 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW] =
1536 								*mac_hw_ss;
1537 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED] =
1538 								*fw_forwarded_ss;
1539 		desc->shared_snapshots[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED] =
1540 								*fw_consumed_ss;
1541 		desc->host_snapshot[link] = *host_ss;
1542 
1543 		status = mgmt_rx_reo_snapshots_check_sanity
1544 			(mac_hw_ss, fw_forwarded_ss, fw_consumed_ss, host_ss);
1545 		if (QDF_IS_STATUS_ERROR(status)) {
1546 			QDF_STATUS ret;
1547 
1548 			ret = handle_snapshot_sanity_failures(desc, link);
1549 			if (QDF_IS_STATUS_ERROR(ret)) {
1550 				mgmt_rx_reo_err_rl("Err:SS sanity fail handle");
1551 				return ret;
1552 			}
1553 			mgmt_rx_reo_warn_rl("Drop frame due to SS sanity fail");
1554 
1555 			return status;
1556 		}
1557 
1558 		mgmt_rx_reo_info("link_id = %u HW SS: valid = %u, ctr = %u, ts = %u",
1559 				 link, mac_hw_ss->valid,
1560 				 mac_hw_ss->mgmt_pkt_ctr,
1561 				 mac_hw_ss->global_timestamp);
1562 		mgmt_rx_reo_info("link_id = %u FW forwarded SS: valid = %u, ctr = %u, ts = %u",
1563 				 link, fw_forwarded_ss->valid,
1564 				 fw_forwarded_ss->mgmt_pkt_ctr,
1565 				 fw_forwarded_ss->global_timestamp);
1566 		mgmt_rx_reo_info("link_id = %u FW consumed SS: valid = %u, ctr = %u, ts = %u",
1567 				 link, fw_consumed_ss->valid,
1568 				 fw_consumed_ss->mgmt_pkt_ctr,
1569 				 fw_consumed_ss->global_timestamp);
1570 
1571 		/* No need wait for any frames on the same link */
1572 		if (link == in_frame_link) {
1573 			frames_pending = 0;
1574 			goto update_pending_frames;
1575 		}
1576 
1577 		/**
1578 		 * If MAC HW snapshot is invalid, the link has not started
1579 		 * receiving management frames. Set wait count to zero.
1580 		 */
1581 		if (!mac_hw_ss->valid) {
1582 			frames_pending = 0;
1583 			goto update_pending_frames;
1584 		}
1585 
1586 		/**
1587 		 * If host snapshot is invalid, wait for MAX number of frames.
1588 		 * When any frame in this link arrives at host, actual wait
1589 		 * counts will be updated.
1590 		 */
1591 		if (!host_ss->valid) {
1592 			wait_count->per_link_count[link] = UINT_MAX;
1593 			wait_count->total_count += UINT_MAX;
1594 			goto print_wait_count;
1595 		}
1596 
1597 		/**
1598 		 * If MAC HW snapshot sequence number and host snapshot
1599 		 * sequence number are same, all the frames received by
1600 		 * this link are processed by host. No need to wait for
1601 		 * any frames from this link.
1602 		 */
1603 		if (!mgmt_rx_reo_subtract_pkt_ctrs(mac_hw_ss->mgmt_pkt_ctr,
1604 						   host_ss->mgmt_pkt_ctr)) {
1605 			frames_pending = 0;
1606 			goto update_pending_frames;
1607 		}
1608 
1609 		/**
1610 		 * Ideally, the incoming frame has to wait for only those frames
1611 		 * (on other links) which meet all the below criterion.
1612 		 * 1. Frame's timestamp is less than incoming frame's
1613 		 * 2. Frame is supposed to be consumed by the Host
1614 		 * 3. Frame is not yet seen by the Host.
1615 		 * We may not be able to compute the exact optimal wait count
1616 		 * because HW/FW provides a limited assist.
1617 		 * This algorithm tries to get the best estimate of wait count
1618 		 * by not waiting for those frames where we have a conclusive
1619 		 * evidence that we don't have to wait for those frames.
1620 		 */
1621 
1622 		/**
1623 		 * If this link has already seen a frame whose timestamp is
1624 		 * greater than or equal to incoming frame's timestamp,
1625 		 * then no need to wait for any frames on this link.
1626 		 * If the total wait count becomes zero, then the policy on
1627 		 * whether to deliver such a frame to upper layers is handled
1628 		 * separately.
1629 		 */
1630 		if (mgmt_rx_reo_compare_global_timestamps_gte(
1631 				host_ss->global_timestamp,
1632 				in_frame_params->global_timestamp)) {
1633 			frames_pending = 0;
1634 			goto update_pending_frames;
1635 		}
1636 
1637 		/**
1638 		 * For starters, we only have to wait for the frames that are
1639 		 * seen by MAC HW but not yet seen by Host. The frames which
1640 		 * reach MAC HW later are guaranteed to have a timestamp
1641 		 * greater than incoming frame's timestamp.
1642 		 */
1643 		frames_pending = mgmt_rx_reo_subtract_pkt_ctrs(
1644 					mac_hw_ss->mgmt_pkt_ctr,
1645 					host_ss->mgmt_pkt_ctr);
1646 		qdf_assert_always(frames_pending >= 0);
1647 
1648 		if (frames_pending &&
1649 		    mgmt_rx_reo_compare_global_timestamps_gte
1650 					(mac_hw_ss->global_timestamp,
1651 					 in_frame_params->global_timestamp)) {
1652 			/**
1653 			 * Last frame seen at MAC HW has timestamp greater than
1654 			 * or equal to incoming frame's timestamp. So no need to
1655 			 * wait for that last frame, but we can't conclusively
1656 			 * say anything about timestamp of frames before the
1657 			 * last frame, so try to wait for all of those frames.
1658 			 */
1659 			frames_pending--;
1660 			qdf_assert_always(frames_pending >= 0);
1661 
1662 			if (fw_consumed_ss->valid &&
1663 			    mgmt_rx_reo_compare_global_timestamps_gte(
1664 				fw_consumed_ss->global_timestamp,
1665 				in_frame_params->global_timestamp)) {
1666 				/**
1667 				 * Last frame consumed by the FW has timestamp
1668 				 * greater than or equal to incoming frame's.
1669 				 * That means all the frames from
1670 				 * fw_consumed_ss->mgmt_pkt_ctr to
1671 				 * mac_hw->mgmt_pkt_ctr will have timestamp
1672 				 * greater than or equal to incoming frame's and
1673 				 * hence, no need to wait for those frames.
1674 				 * We just need to wait for frames from
1675 				 * host_ss->mgmt_pkt_ctr to
1676 				 * fw_consumed_ss->mgmt_pkt_ctr-1. This is a
1677 				 * better estimate over the above estimate,
1678 				 * so update frames_pending.
1679 				 */
1680 				frames_pending =
1681 				  mgmt_rx_reo_subtract_pkt_ctrs(
1682 				      fw_consumed_ss->mgmt_pkt_ctr,
1683 				      host_ss->mgmt_pkt_ctr) - 1;
1684 
1685 				qdf_assert_always(frames_pending >= 0);
1686 
1687 				/**
1688 				 * Last frame forwarded to Host has timestamp
1689 				 * less than incoming frame's. That means all
1690 				 * the frames starting from
1691 				 * fw_forwarded_ss->mgmt_pkt_ctr+1 to
1692 				 * fw_consumed_ss->mgmt_pkt_ctr are consumed by
1693 				 * the FW and hence, no need to wait for those
1694 				 * frames. We just need to wait for frames
1695 				 * from host_ss->mgmt_pkt_ctr to
1696 				 * fw_forwarded_ss->mgmt_pkt_ctr. This is a
1697 				 * better estimate over the above estimate,
1698 				 * so update frames_pending.
1699 				 */
1700 				if (fw_forwarded_ss->valid &&
1701 				    !mgmt_rx_reo_compare_global_timestamps_gte(
1702 					fw_forwarded_ss->global_timestamp,
1703 					in_frame_params->global_timestamp)) {
1704 					frames_pending =
1705 					  mgmt_rx_reo_subtract_pkt_ctrs(
1706 					      fw_forwarded_ss->mgmt_pkt_ctr,
1707 					      host_ss->mgmt_pkt_ctr);
1708 
1709 					/**
1710 					 * frames_pending can be negative in
1711 					 * cases whene there are no frames
1712 					 * getting forwarded to the Host. No
1713 					 * need to wait for any frames in that
1714 					 * case.
1715 					 */
1716 					if (frames_pending < 0)
1717 						frames_pending = 0;
1718 				}
1719 			}
1720 
1721 			/**
1722 			 * Last frame forwarded to Host has timestamp greater
1723 			 * than or equal to incoming frame's. That means all the
1724 			 * frames from fw_forwarded->mgmt_pkt_ctr to
1725 			 * mac_hw->mgmt_pkt_ctr will have timestamp greater than
1726 			 * or equal to incoming frame's and hence, no need to
1727 			 * wait for those frames. We may have to just wait for
1728 			 * frames from host_ss->mgmt_pkt_ctr to
1729 			 * fw_forwarded_ss->mgmt_pkt_ctr-1
1730 			 */
1731 			if (fw_forwarded_ss->valid &&
1732 			    mgmt_rx_reo_compare_global_timestamps_gte(
1733 				fw_forwarded_ss->global_timestamp,
1734 				in_frame_params->global_timestamp)) {
1735 				delta_fwd_host =
1736 				  mgmt_rx_reo_subtract_pkt_ctrs(
1737 				    fw_forwarded_ss->mgmt_pkt_ctr,
1738 				    host_ss->mgmt_pkt_ctr) - 1;
1739 
1740 				qdf_assert_always(delta_fwd_host >= 0);
1741 
1742 				/**
1743 				 * This will be a better estimate over the one
1744 				 * we computed using mac_hw_ss but this may or
1745 				 * may not be a better estimate over the
1746 				 * one we computed using fw_consumed_ss.
1747 				 * When timestamps of both fw_consumed_ss and
1748 				 * fw_forwarded_ss are greater than incoming
1749 				 * frame's but timestamp of fw_consumed_ss is
1750 				 * smaller than fw_forwarded_ss, then
1751 				 * frames_pending will be smaller than
1752 				 * delta_fwd_host, the reverse will be true in
1753 				 * other cases. Instead of checking for all
1754 				 * those cases, just waiting for the minimum
1755 				 * among these two should be sufficient.
1756 				 */
1757 				frames_pending = qdf_min(frames_pending,
1758 							 delta_fwd_host);
1759 				qdf_assert_always(frames_pending >= 0);
1760 			}
1761 		}
1762 
1763 update_pending_frames:
1764 			qdf_assert_always(frames_pending >= 0);
1765 
1766 			wait_count->per_link_count[link] = frames_pending;
1767 			wait_count->total_count += frames_pending;
1768 
1769 print_wait_count:
1770 			mgmt_rx_reo_info("link_id = %u wait count: per link = 0x%x, total = 0x%llx",
1771 					 link, wait_count->per_link_count[link],
1772 					 wait_count->total_count);
1773 	}
1774 
1775 	return QDF_STATUS_SUCCESS;
1776 }
1777 
1778 /**
1779  * struct mgmt_rx_reo_list_entry_debug_info - This structure holds the necessary
1780  * information about a reo list entry for debug purposes.
1781  * @link_id: link id
1782  * @mgmt_pkt_ctr: management packet counter
1783  * @global_timestamp: global time stamp
1784  * @wait_count: wait count values
1785  * @status: status of the entry in the list
1786  * @entry: pointer to reo list entry
1787  */
1788 struct mgmt_rx_reo_list_entry_debug_info {
1789 	uint8_t link_id;
1790 	uint16_t mgmt_pkt_ctr;
1791 	uint32_t global_timestamp;
1792 	struct mgmt_rx_reo_wait_count wait_count;
1793 	uint32_t status;
1794 	struct mgmt_rx_reo_list_entry *entry;
1795 };
1796 
1797 /**
1798  * mgmt_rx_reo_list_display() - API to print the entries in the reorder list
1799  * @reo_list: Pointer to reorder list
1800  *
1801  * Return: QDF_STATUS
1802  */
1803 static QDF_STATUS
mgmt_rx_reo_list_display(struct mgmt_rx_reo_list * reo_list)1804 mgmt_rx_reo_list_display(struct mgmt_rx_reo_list *reo_list)
1805 {
1806 	uint32_t reo_list_size;
1807 	uint32_t index;
1808 	struct mgmt_rx_reo_list_entry *cur_entry;
1809 	struct mgmt_rx_reo_list_entry_debug_info *debug_info;
1810 
1811 	if (!reo_list) {
1812 		mgmt_rx_reo_err("Pointer to reo list is null");
1813 		return QDF_STATUS_E_NULL_VALUE;
1814 	}
1815 
1816 	qdf_spin_lock_bh(&reo_list->list_lock);
1817 
1818 	reo_list_size = qdf_list_size(&reo_list->list);
1819 
1820 	if (reo_list_size == 0) {
1821 		qdf_spin_unlock_bh(&reo_list->list_lock);
1822 		mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1823 				  reo_list_size);
1824 		return QDF_STATUS_SUCCESS;
1825 	}
1826 
1827 	debug_info = qdf_mem_malloc_atomic(reo_list_size * sizeof(*debug_info));
1828 	if (!debug_info) {
1829 		qdf_spin_unlock_bh(&reo_list->list_lock);
1830 		mgmt_rx_reo_err("Memory allocation failed");
1831 		return QDF_STATUS_E_NOMEM;
1832 	}
1833 
1834 	index = 0;
1835 	qdf_list_for_each(&reo_list->list, cur_entry, node) {
1836 		debug_info[index].link_id =
1837 				mgmt_rx_reo_get_link_id(cur_entry->rx_params);
1838 		debug_info[index].mgmt_pkt_ctr =
1839 			mgmt_rx_reo_get_pkt_counter(cur_entry->rx_params);
1840 		debug_info[index].global_timestamp =
1841 				mgmt_rx_reo_get_global_ts(cur_entry->rx_params);
1842 		debug_info[index].wait_count = cur_entry->wait_count;
1843 		debug_info[index].status = cur_entry->status;
1844 		debug_info[index].entry = cur_entry;
1845 
1846 		++index;
1847 	}
1848 
1849 	qdf_spin_unlock_bh(&reo_list->list_lock);
1850 
1851 	mgmt_rx_reo_debug("Reorder list");
1852 	mgmt_rx_reo_debug("##################################################");
1853 	mgmt_rx_reo_debug("Number of entries in the reo list = %u",
1854 			  reo_list_size);
1855 	for (index = 0; index < reo_list_size; index++) {
1856 		uint8_t link_id;
1857 
1858 		mgmt_rx_reo_debug("index = %u: link_id = %u, ts = %u, ctr = %u, status = 0x%x, entry = %pK",
1859 				  index, debug_info[index].link_id,
1860 				  debug_info[index].global_timestamp,
1861 				  debug_info[index].mgmt_pkt_ctr,
1862 				  debug_info[index].status,
1863 				  debug_info[index].entry);
1864 
1865 		mgmt_rx_reo_debug("Total wait count = 0x%llx",
1866 				  debug_info[index].wait_count.total_count);
1867 
1868 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
1869 			mgmt_rx_reo_debug("Link id = %u, wait_count = 0x%x",
1870 					  link_id, debug_info[index].wait_count.
1871 					  per_link_count[link_id]);
1872 	}
1873 	mgmt_rx_reo_debug("##################################################");
1874 
1875 	qdf_mem_free(debug_info);
1876 
1877 	return QDF_STATUS_SUCCESS;
1878 }
1879 
1880 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
1881 /**
1882  * mgmt_rx_reo_egress_frame_debug_info_enabled() - API to check whether egress
1883  * frame info debug feaure is enabled
1884  * @egress_frame_debug_info: Pointer to egress frame debug info object
1885  *
1886  * Return: true or false
1887  */
1888 static bool
mgmt_rx_reo_egress_frame_debug_info_enabled(struct reo_egress_debug_info * egress_frame_debug_info)1889 mgmt_rx_reo_egress_frame_debug_info_enabled
1890 			(struct reo_egress_debug_info *egress_frame_debug_info)
1891 {
1892 	return egress_frame_debug_info->frame_list_size;
1893 }
1894 
1895 /**
1896  * mgmt_rx_reo_debug_print_scheduler_stats() - API to print the stats
1897  * related to frames getting scheduled by mgmt rx reo scheduler
1898  * @reo_ctx: Pointer to reorder context
1899  *
1900  * API to print the stats related to frames getting scheduled by management
1901  * Rx reorder scheduler.
1902  *
1903  * Return: QDF_STATUS
1904  */
1905 static QDF_STATUS
mgmt_rx_reo_debug_print_scheduler_stats(struct mgmt_rx_reo_context * reo_ctx)1906 mgmt_rx_reo_debug_print_scheduler_stats(struct mgmt_rx_reo_context *reo_ctx)
1907 {
1908 	struct reo_scheduler_stats *stats;
1909 	uint64_t scheduled_count_per_link[MAX_MLO_LINKS] = {0};
1910 	uint64_t scheduled_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
1911 	uint64_t total_scheduled_count = 0;
1912 	uint64_t rescheduled_count_per_link[MAX_MLO_LINKS] = {0};
1913 	uint64_t rescheduled_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
1914 	uint64_t total_rescheduled_count = 0;
1915 	uint64_t total_scheduler_cb_count = 0;
1916 	uint8_t link_id;
1917 	uint8_t ctx;
1918 
1919 	if (!reo_ctx)
1920 		return QDF_STATUS_E_NULL_VALUE;
1921 
1922 	stats = &reo_ctx->scheduler_debug_info.stats;
1923 
1924 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1925 		for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++) {
1926 			scheduled_count_per_link[link_id] +=
1927 				stats->scheduled_count[link_id][ctx];
1928 			rescheduled_count_per_link[link_id] +=
1929 				stats->rescheduled_count[link_id][ctx];
1930 		}
1931 
1932 		total_scheduled_count += scheduled_count_per_link[link_id];
1933 		total_rescheduled_count += rescheduled_count_per_link[link_id];
1934 		total_scheduler_cb_count += stats->scheduler_cb_count[link_id];
1935 	}
1936 
1937 	for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++) {
1938 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1939 			scheduled_count_per_context[ctx] +=
1940 				stats->scheduled_count[link_id][ctx];
1941 			rescheduled_count_per_context[ctx] +=
1942 				stats->rescheduled_count[link_id][ctx];
1943 		}
1944 	}
1945 
1946 	mgmt_rx_reo_alert("Scheduler stats:");
1947 	mgmt_rx_reo_alert("\t1) Scheduled count");
1948 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
1949 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
1950 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
1951 	mgmt_rx_reo_alert("\t------------------------------------");
1952 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
1953 	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
1954 	mgmt_rx_reo_alert("\t-------------------------------------------");
1955 
1956 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1957 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
1958 				  stats->scheduled_count[link_id][0],
1959 				  stats->scheduled_count[link_id][1],
1960 				  stats->scheduled_count[link_id][2],
1961 				  scheduled_count_per_link[link_id]);
1962 		mgmt_rx_reo_alert("\t-------------------------------------------");
1963 	}
1964 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
1965 			  scheduled_count_per_context[0],
1966 			  scheduled_count_per_context[1],
1967 			  scheduled_count_per_context[2],
1968 			  total_scheduled_count);
1969 
1970 	mgmt_rx_reo_alert("\t2) Rescheduled count");
1971 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
1972 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
1973 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
1974 	mgmt_rx_reo_alert("\t------------------------------------");
1975 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
1976 	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
1977 	mgmt_rx_reo_alert("\t-------------------------------------------");
1978 
1979 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
1980 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
1981 				  stats->rescheduled_count[link_id][0],
1982 				  stats->rescheduled_count[link_id][1],
1983 				  stats->rescheduled_count[link_id][2],
1984 				  rescheduled_count_per_link[link_id]);
1985 		mgmt_rx_reo_alert("\t-------------------------------------------");
1986 	}
1987 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
1988 			  rescheduled_count_per_context[0],
1989 			  rescheduled_count_per_context[1],
1990 			  rescheduled_count_per_context[2],
1991 			  total_rescheduled_count);
1992 
1993 	mgmt_rx_reo_alert("\t3) Per link stats:");
1994 	mgmt_rx_reo_alert("\t----------------------");
1995 	mgmt_rx_reo_alert("\t|link id|Scheduler CB|");
1996 	mgmt_rx_reo_alert("\t|       |    Count   |");
1997 	mgmt_rx_reo_alert("\t----------------------");
1998 
1999 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2000 		mgmt_rx_reo_alert("\t|%7u|%12llu|", link_id,
2001 				  stats->scheduler_cb_count[link_id]);
2002 		mgmt_rx_reo_alert("\t----------------------");
2003 	}
2004 	mgmt_rx_reo_alert("\t%8s|%12llu|\n\n", "", total_scheduler_cb_count);
2005 
2006 	return QDF_STATUS_SUCCESS;
2007 }
2008 
2009 /**
2010  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
2011  * related to frames going out of the reorder module
2012  * @reo_ctx: Pointer to reorder context
2013  *
2014  * API to print the stats related to frames going out of the management
2015  * Rx reorder module.
2016  *
2017  * Return: QDF_STATUS
2018  */
2019 static QDF_STATUS
mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context * reo_ctx)2020 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2021 {
2022 	struct reo_egress_frame_stats *stats;
2023 	uint8_t link_id;
2024 	uint8_t reason;
2025 	uint8_t ctx;
2026 	uint64_t total_delivery_attempts_count = 0;
2027 	uint64_t total_delivery_success_count = 0;
2028 	uint64_t total_drop_count = 0;
2029 	uint64_t total_premature_delivery_count = 0;
2030 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
2031 	uint64_t delivery_count_per_reason[RELEASE_REASON_MAX] = {0};
2032 	uint64_t delivery_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
2033 	uint64_t total_delivery_count = 0;
2034 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
2035 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
2036 	QDF_STATUS status;
2037 
2038 	if (!reo_ctx)
2039 		return QDF_STATUS_E_NULL_VALUE;
2040 
2041 	stats = &reo_ctx->egress_frame_debug_info.stats;
2042 
2043 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2044 		total_delivery_attempts_count +=
2045 				stats->delivery_attempts_count[link_id];
2046 		total_delivery_success_count +=
2047 				stats->delivery_success_count[link_id];
2048 		total_drop_count += stats->drop_count[link_id];
2049 		total_premature_delivery_count +=
2050 				stats->premature_delivery_count[link_id];
2051 	}
2052 
2053 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2054 		for (reason = 0; reason < RELEASE_REASON_MAX;
2055 		     reason++)
2056 			delivery_count_per_link[link_id] +=
2057 				stats->delivery_reason_count[link_id][reason];
2058 		total_delivery_count += delivery_count_per_link[link_id];
2059 	}
2060 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++)
2061 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
2062 			delivery_count_per_reason[reason] +=
2063 				stats->delivery_reason_count[link_id][reason];
2064 	for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++)
2065 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
2066 			delivery_count_per_context[ctx] +=
2067 				stats->delivery_context_count[link_id][ctx];
2068 
2069 	mgmt_rx_reo_alert("Egress frame stats:");
2070 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
2071 	mgmt_rx_reo_alert("\t------------------------------------------------");
2072 	mgmt_rx_reo_alert("\t|link id  |Attempts|Success |Premature|Drop    |");
2073 	mgmt_rx_reo_alert("\t|         | count  | count  | count   |count   |");
2074 	mgmt_rx_reo_alert("\t------------------------------------------------");
2075 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2076 		mgmt_rx_reo_alert("\t|%9u|%8llu|%8llu|%9llu|%8llu|", link_id,
2077 				  stats->delivery_attempts_count[link_id],
2078 				  stats->delivery_success_count[link_id],
2079 				  stats->premature_delivery_count[link_id],
2080 				  stats->drop_count[link_id]);
2081 		mgmt_rx_reo_alert("\t------------------------------------------------");
2082 	}
2083 	mgmt_rx_reo_alert("\t%10s|%8llu|%8llu|%9llu|%8llu|\n\n", "",
2084 			  total_delivery_attempts_count,
2085 			  total_delivery_success_count,
2086 			  total_premature_delivery_count,
2087 			  total_drop_count);
2088 
2089 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
2090 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
2091 	mgmt_rx_reo_alert("\tREASON_ZERO_WAIT_COUNT - 0x%lx",
2092 			  RELEASE_REASON_ZERO_WAIT_COUNT);
2093 	mgmt_rx_reo_alert("\tREASON_AGED_OUT - 0x%lx",
2094 			  RELEASE_REASON_AGED_OUT);
2095 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_AGED_OUT_FRAME - 0x%lx",
2096 			  RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME);
2097 	mgmt_rx_reo_alert("\tREASON_INGRESS_LIST_OVERFLOW - 0x%lx",
2098 			  RELEASE_REASON_INGRESS_LIST_OVERFLOW);
2099 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_READY_TO_DELIVER_FRAMES - 0x%lx",
2100 			  RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES);
2101 	mgmt_rx_reo_alert("\tREASON_EGRESS_LIST_OVERFLOW - 0x%lx",
2102 			  RELEASE_REASON_EGRESS_LIST_OVERFLOW);
2103 
2104 	qdf_mem_set(delivery_reason_stats_boarder_a,
2105 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
2106 	qdf_mem_set(delivery_reason_stats_boarder_b,
2107 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE, '-');
2108 
2109 	mgmt_rx_reo_alert("\t%66s", delivery_reason_stats_boarder_a);
2110 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "Release Reason/",
2111 			  "", "", "", "", "", "");
2112 	mgmt_rx_reo_alert("\t|%16s|%7s|%7s|%7s|%7s|%7s|%7s|", "link id",
2113 			  "0", "1", "2", "3", "4", "5");
2114 	mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
2115 
2116 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++) {
2117 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
2118 				  reason,
2119 				  stats->delivery_reason_count[0][reason],
2120 				  stats->delivery_reason_count[1][reason],
2121 				  stats->delivery_reason_count[2][reason],
2122 				  stats->delivery_reason_count[3][reason],
2123 				  stats->delivery_reason_count[4][reason],
2124 				  stats->delivery_reason_count[5][reason],
2125 				  delivery_count_per_reason[reason]);
2126 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
2127 	}
2128 	mgmt_rx_reo_alert("\t%17s|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu\n\n",
2129 			  "", delivery_count_per_link[0],
2130 			  delivery_count_per_link[1],
2131 			  delivery_count_per_link[2],
2132 			  delivery_count_per_link[3],
2133 			  delivery_count_per_link[4],
2134 			  delivery_count_per_link[5],
2135 			  total_delivery_count);
2136 
2137 	mgmt_rx_reo_alert("\t3) Delivery context related stats");
2138 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
2139 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
2140 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
2141 	mgmt_rx_reo_alert("\t------------------------------------");
2142 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
2143 	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
2144 	mgmt_rx_reo_alert("\t-------------------------------------------");
2145 
2146 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
2147 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
2148 				  stats->delivery_context_count[link_id][0],
2149 				  stats->delivery_context_count[link_id][1],
2150 				  stats->delivery_context_count[link_id][2],
2151 				  delivery_count_per_link[link_id]);
2152 		mgmt_rx_reo_alert("\t-------------------------------------------");
2153 	}
2154 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
2155 			  delivery_count_per_context[0],
2156 			  delivery_count_per_context[1],
2157 			  delivery_count_per_context[2],
2158 			  total_delivery_count);
2159 
2160 	mgmt_rx_reo_alert("\t4) Misc stats:");
2161 	mgmt_rx_reo_alert("\t\tEgress list overflow count = %llu\n\n",
2162 			  reo_ctx->egress_list.reo_list.overflow_count);
2163 
2164 	status = mgmt_rx_reo_debug_print_scheduler_stats(reo_ctx);
2165 	if (QDF_IS_STATUS_ERROR(status)) {
2166 		mgmt_rx_reo_err("Failed to print scheduler stats");
2167 		return status;
2168 	}
2169 
2170 	return QDF_STATUS_SUCCESS;
2171 }
2172 
2173 /**
2174  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
2175  * frame exiting the reorder module. Logging is done before attempting the frame
2176  * delivery to upper layers.
2177  * @reo_ctx: management rx reorder context
2178  * @entry: Pointer to reorder list entry
2179  *
2180  * Return: QDF_STATUS of operation
2181  */
2182 static QDF_STATUS
mgmt_rx_reo_log_egress_frame_before_delivery(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_list_entry * entry)2183 mgmt_rx_reo_log_egress_frame_before_delivery(
2184 					struct mgmt_rx_reo_context *reo_ctx,
2185 					struct mgmt_rx_reo_list_entry *entry)
2186 {
2187 	struct reo_egress_debug_info *egress_frame_debug_info;
2188 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
2189 	struct reo_egress_frame_stats *stats;
2190 	uint8_t link_id;
2191 
2192 	if (!reo_ctx || !entry)
2193 		return QDF_STATUS_E_NULL_VALUE;
2194 
2195 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
2196 
2197 	stats = &egress_frame_debug_info->stats;
2198 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2199 	stats->delivery_attempts_count[link_id]++;
2200 	if (entry->is_premature_delivery)
2201 		stats->premature_delivery_count[link_id]++;
2202 
2203 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
2204 						(egress_frame_debug_info))
2205 		return QDF_STATUS_SUCCESS;
2206 
2207 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
2208 			[egress_frame_debug_info->next_index];
2209 
2210 	cur_frame_debug_info->link_id = link_id;
2211 	cur_frame_debug_info->mgmt_pkt_ctr =
2212 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
2213 	cur_frame_debug_info->global_timestamp =
2214 				mgmt_rx_reo_get_global_ts(entry->rx_params);
2215 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
2216 	cur_frame_debug_info->final_wait_count = entry->wait_count;
2217 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
2218 		     entry->shared_snapshots,
2219 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
2220 			     sizeof(entry->shared_snapshots)));
2221 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
2222 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
2223 			     sizeof(entry->host_snapshot)));
2224 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
2225 	cur_frame_debug_info->ingress_list_insertion_ts =
2226 					entry->ingress_list_insertion_ts;
2227 	cur_frame_debug_info->ingress_list_removal_ts =
2228 					entry->ingress_list_removal_ts;
2229 	cur_frame_debug_info->egress_list_insertion_ts =
2230 					entry->egress_list_insertion_ts;
2231 	cur_frame_debug_info->egress_list_removal_ts =
2232 					entry->egress_list_removal_ts;
2233 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
2234 	cur_frame_debug_info->egress_list_size = entry->egress_list_size;
2235 	cur_frame_debug_info->first_scheduled_ts = entry->first_scheduled_ts;
2236 	cur_frame_debug_info->last_scheduled_ts = entry->last_scheduled_ts;
2237 	cur_frame_debug_info->scheduled_count =
2238 				qdf_atomic_read(&entry->scheduled_count);
2239 	cur_frame_debug_info->ctx_info = entry->ctx_info;
2240 	cur_frame_debug_info->release_reason = entry->release_reason;
2241 	cur_frame_debug_info->is_premature_delivery =
2242 						entry->is_premature_delivery;
2243 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
2244 
2245 	return QDF_STATUS_SUCCESS;
2246 }
2247 
2248 /**
2249  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
2250  * frame exiting the reorder module. Logging is done after attempting the frame
2251  * delivery to upper layer.
2252  * @reo_ctx: management rx reorder context
2253  * @entry: Pointer to reorder list entry
2254  * @link_id: multi-link link ID
2255  *
2256  * Return: QDF_STATUS of operation
2257  */
2258 static QDF_STATUS
mgmt_rx_reo_log_egress_frame_after_delivery(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_list_entry * entry,uint8_t link_id)2259 mgmt_rx_reo_log_egress_frame_after_delivery(
2260 					struct mgmt_rx_reo_context *reo_ctx,
2261 					struct mgmt_rx_reo_list_entry *entry,
2262 					uint8_t link_id)
2263 {
2264 	struct reo_egress_debug_info *egress_frame_debug_info;
2265 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
2266 	struct reo_egress_frame_stats *stats;
2267 	uint8_t context;
2268 
2269 	if (!reo_ctx || !entry)
2270 		return QDF_STATUS_E_NULL_VALUE;
2271 
2272 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
2273 	context = entry->ctx_info.context;
2274 	if (context >= MGMT_RX_REO_CONTEXT_MAX)
2275 		return QDF_STATUS_E_INVAL;
2276 
2277 	stats = &egress_frame_debug_info->stats;
2278 	if (entry->is_delivered) {
2279 		uint8_t release_reason = entry->release_reason;
2280 
2281 		stats->delivery_reason_count[link_id][release_reason]++;
2282 		stats->delivery_context_count[link_id][context]++;
2283 		stats->delivery_success_count[link_id]++;
2284 	}
2285 
2286 	if (entry->is_dropped)
2287 		stats->drop_count[link_id]++;
2288 
2289 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
2290 						(egress_frame_debug_info))
2291 		return QDF_STATUS_SUCCESS;
2292 
2293 	cur_frame_debug_info = &egress_frame_debug_info->frame_list
2294 			[egress_frame_debug_info->next_index];
2295 
2296 	cur_frame_debug_info->is_delivered = entry->is_delivered;
2297 	cur_frame_debug_info->is_dropped = entry->is_dropped;
2298 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
2299 					cur_frame_debug_info->egress_timestamp;
2300 
2301 	egress_frame_debug_info->next_index++;
2302 	egress_frame_debug_info->next_index %=
2303 				egress_frame_debug_info->frame_list_size;
2304 	if (egress_frame_debug_info->next_index == 0)
2305 		egress_frame_debug_info->wrap_aroud = true;
2306 
2307 	return QDF_STATUS_SUCCESS;
2308 }
2309 
2310 /**
2311  * mgmt_rx_reo_debug_print_egress_frame_info() - Print the debug information
2312  * about the latest frames leaving the reorder module
2313  * @reo_ctx: management rx reorder context
2314  * @num_frames: Number of frames for which the debug information is to be
2315  * printed. If @num_frames is 0, then debug information about all the frames
2316  * in the ring buffer will be  printed.
2317  *
2318  * Return: QDF_STATUS of operation
2319  */
2320 static QDF_STATUS
mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context * reo_ctx,uint16_t num_frames)2321 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
2322 					  uint16_t num_frames)
2323 {
2324 	struct reo_egress_debug_info *egress_frame_debug_info;
2325 	int start_index;
2326 	uint16_t index;
2327 	uint16_t entry;
2328 	uint16_t num_valid_entries;
2329 	uint16_t num_entries_to_print;
2330 	char *boarder;
2331 
2332 	if (!reo_ctx)
2333 		return QDF_STATUS_E_NULL_VALUE;
2334 
2335 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
2336 
2337 	if (egress_frame_debug_info->wrap_aroud)
2338 		num_valid_entries = egress_frame_debug_info->frame_list_size;
2339 	else
2340 		num_valid_entries = egress_frame_debug_info->next_index;
2341 
2342 	if (num_frames == 0) {
2343 		num_entries_to_print = num_valid_entries;
2344 
2345 		if (egress_frame_debug_info->wrap_aroud)
2346 			start_index = egress_frame_debug_info->next_index;
2347 		else
2348 			start_index = 0;
2349 	} else {
2350 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
2351 
2352 		start_index = (egress_frame_debug_info->next_index -
2353 			       num_entries_to_print +
2354 			       egress_frame_debug_info->frame_list_size)
2355 			      % egress_frame_debug_info->frame_list_size;
2356 	}
2357 
2358 	mgmt_rx_reo_alert_no_fl("Egress Frame Info:-");
2359 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
2360 				num_frames,
2361 				egress_frame_debug_info->wrap_aroud,
2362 				egress_frame_debug_info->next_index);
2363 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
2364 				start_index, num_entries_to_print);
2365 
2366 	if (!num_entries_to_print)
2367 		return QDF_STATUS_SUCCESS;
2368 
2369 	boarder = egress_frame_debug_info->boarder;
2370 
2371 	mgmt_rx_reo_alert_no_fl("%s", boarder);
2372 	mgmt_rx_reo_alert_no_fl("|%3s|%5s|%4s|%5s|%10s|%11s|%11s|%11s|%11s|%11s|%11s|%5s|%7s|%7s|%5s|%4s|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
2373 				"No.", "CPU", "Link", "SeqNo", "Global ts",
2374 				"Ingress ts", "Ing Insert",
2375 				"Ing Remove", "Eg Insert", "Eg Remove",
2376 				"Egress ts", "E Dur", "I W Dur", "E W Dur",
2377 				"Flags", "Rea.", "Final wait count",
2378 				"Initial wait count", "Snapshot : link 0",
2379 				"Snapshot : link 1", "Snapshot : link 2",
2380 				"Snapshot : link 3", "Snapshot : link 4",
2381 				"Snapshot : link 5");
2382 	mgmt_rx_reo_alert_no_fl("%s", boarder);
2383 
2384 	index = start_index;
2385 	for (entry = 0; entry < num_entries_to_print; entry++) {
2386 		struct reo_egress_debug_frame_info *info;
2387 		char flags[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {0};
2388 		char final_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
2389 		char initial_wait_count[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
2390 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {0};
2391 		char flag_premature_delivery = ' ';
2392 		char flag_error = ' ';
2393 		uint8_t link;
2394 
2395 		info = &reo_ctx->egress_frame_debug_info.frame_list[index];
2396 
2397 		if (!info->is_delivered)
2398 			flag_error = 'E';
2399 
2400 		if (info->is_premature_delivery)
2401 			flag_premature_delivery = 'P';
2402 
2403 		snprintf(flags, sizeof(flags), "%c %c", flag_error,
2404 			 flag_premature_delivery);
2405 		snprintf(initial_wait_count, sizeof(initial_wait_count),
2406 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
2407 			 info->initial_wait_count.total_count,
2408 			 info->initial_wait_count.per_link_count[0],
2409 			 info->initial_wait_count.per_link_count[1],
2410 			 info->initial_wait_count.per_link_count[2],
2411 			 info->initial_wait_count.per_link_count[3],
2412 			 info->initial_wait_count.per_link_count[4],
2413 			 info->initial_wait_count.per_link_count[5]);
2414 		snprintf(final_wait_count, sizeof(final_wait_count),
2415 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
2416 			 info->final_wait_count.total_count,
2417 			 info->final_wait_count.per_link_count[0],
2418 			 info->final_wait_count.per_link_count[1],
2419 			 info->final_wait_count.per_link_count[2],
2420 			 info->final_wait_count.per_link_count[3],
2421 			 info->final_wait_count.per_link_count[4],
2422 			 info->final_wait_count.per_link_count[5]);
2423 
2424 		for (link = 0; link < MAX_MLO_LINKS; link++) {
2425 			char mac_hw[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2426 			char fw_consumed[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2427 			char fw_forwarded[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2428 			char host[MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
2429 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
2430 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
2431 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
2432 			struct mgmt_rx_reo_snapshot_params *host_ss;
2433 
2434 			mac_hw_ss = &info->shared_snapshots
2435 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
2436 			fw_consumed_ss = &info->shared_snapshots
2437 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
2438 			fw_forwarded_ss = &info->shared_snapshots
2439 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
2440 			host_ss = &info->host_snapshot[link];
2441 
2442 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
2443 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
2444 				 mac_hw_ss->global_timestamp);
2445 			snprintf(fw_consumed, sizeof(fw_consumed),
2446 				 "(%1u, %5u, %10u)",
2447 				 fw_consumed_ss->valid,
2448 				 fw_consumed_ss->mgmt_pkt_ctr,
2449 				 fw_consumed_ss->global_timestamp);
2450 			snprintf(fw_forwarded, sizeof(fw_forwarded),
2451 				 "(%1u, %5u, %10u)",
2452 				 fw_forwarded_ss->valid,
2453 				 fw_forwarded_ss->mgmt_pkt_ctr,
2454 				 fw_forwarded_ss->global_timestamp);
2455 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
2456 				 host_ss->valid,
2457 				 host_ss->mgmt_pkt_ctr,
2458 				 host_ss->global_timestamp);
2459 			snprintf(snapshots[link], sizeof(snapshots[link]),
2460 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
2461 				 fw_forwarded, host);
2462 		}
2463 
2464 		mgmt_rx_reo_alert_no_fl("|%3u|%5d|%4u|%5u|%10u|%11llu|%11llu|%11llu|%11llu|%11llu|%11llu|%5llu|%7llu|%7llu|%5s|%4x|%69s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
2465 					entry, info->cpu_id, info->link_id,
2466 					info->mgmt_pkt_ctr,
2467 					info->global_timestamp,
2468 					info->ingress_timestamp,
2469 					info->ingress_list_insertion_ts,
2470 					info->ingress_list_removal_ts,
2471 					info->egress_list_insertion_ts,
2472 					info->egress_list_removal_ts,
2473 					info->egress_timestamp,
2474 					info->egress_duration,
2475 					info->ingress_list_removal_ts -
2476 					info->ingress_list_insertion_ts,
2477 					info->egress_list_removal_ts -
2478 					info->egress_list_insertion_ts,
2479 					flags, info->release_reason,
2480 					final_wait_count, initial_wait_count,
2481 					snapshots[0], snapshots[1],
2482 					snapshots[2], snapshots[3],
2483 					snapshots[4], snapshots[5]);
2484 		mgmt_rx_reo_alert_no_fl("%s", boarder);
2485 
2486 		index++;
2487 		index %= egress_frame_debug_info->frame_list_size;
2488 	}
2489 
2490 	return QDF_STATUS_SUCCESS;
2491 }
2492 #else
2493 /**
2494  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
2495  * related to frames going out of the reorder module
2496  * @reo_ctx: Pointer to reorder context
2497  *
2498  * API to print the stats related to frames going out of the management
2499  * Rx reorder module.
2500  *
2501  * Return: QDF_STATUS
2502  */
2503 static QDF_STATUS
mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context * reo_ctx)2504 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
2505 {
2506 	return QDF_STATUS_SUCCESS;
2507 }
2508 
2509 /**
2510  * mgmt_rx_reo_log_egress_frame_before_delivery() - Log the information about a
2511  * frame exiting the reorder module. Logging is done before attempting the frame
2512  * delivery to upper layers.
2513  * @reo_ctx: management rx reorder context
2514  * @entry: Pointer to reorder list entry
2515  *
2516  * Return: QDF_STATUS of operation
2517  */
2518 static QDF_STATUS
mgmt_rx_reo_log_egress_frame_before_delivery(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_list_entry * entry)2519 mgmt_rx_reo_log_egress_frame_before_delivery(
2520 					struct mgmt_rx_reo_context *reo_ctx,
2521 					struct mgmt_rx_reo_list_entry *entry)
2522 {
2523 	return QDF_STATUS_SUCCESS;
2524 }
2525 
2526 /**
2527  * mgmt_rx_reo_log_egress_frame_after_delivery() - Log the information about a
2528  * frame exiting the reorder module. Logging is done after attempting the frame
2529  * delivery to upper layer.
2530  * @reo_ctx: management rx reorder context
2531  * @entry: Pointer to reorder list entry
2532  * @link_id: multi-link link ID
2533  *
2534  * Return: QDF_STATUS of operation
2535  */
2536 static QDF_STATUS
mgmt_rx_reo_log_egress_frame_after_delivery(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_list_entry * entry,uint8_t link_id)2537 mgmt_rx_reo_log_egress_frame_after_delivery(
2538 					struct mgmt_rx_reo_context *reo_ctx,
2539 					struct mgmt_rx_reo_list_entry *entry,
2540 					uint8_t link_id)
2541 {
2542 	return QDF_STATUS_SUCCESS;
2543 }
2544 
2545 /**
2546  * mgmt_rx_reo_debug_print_egress_frame_info() - Print debug information about
2547  * the latest frames leaving the reorder module
2548  * @reo_ctx: management rx reorder context
2549  *
2550  * Return: QDF_STATUS of operation
2551  */
2552 static QDF_STATUS
mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context * reo_ctx)2553 mgmt_rx_reo_debug_print_egress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
2554 {
2555 	return QDF_STATUS_SUCCESS;
2556 }
2557 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2558 
2559 /**
2560  * mgmt_rx_reo_list_entry_get_release_reason() - Helper API to get the reason
2561  * for releasing the reorder list entry to upper layer.
2562  * reorder list.
2563  * @entry: List entry
2564  *
2565  * This API expects the caller to acquire the spin lock protecting the reorder
2566  * list.
2567  *
2568  * Return: Reason for releasing the frame.
2569  */
2570 static uint8_t
mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry * entry)2571 mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
2572 {
2573 	uint8_t reason = 0;
2574 
2575 	if (!entry)
2576 		return 0;
2577 
2578 	if (!LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry))
2579 		reason |= RELEASE_REASON_ZERO_WAIT_COUNT;
2580 
2581 	if (LIST_ENTRY_IS_AGED_OUT(entry))
2582 		reason |= RELEASE_REASON_AGED_OUT;
2583 
2584 	if (LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry))
2585 		reason |= RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME;
2586 
2587 	if (LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry))
2588 		reason |= RELEASE_REASON_INGRESS_LIST_OVERFLOW;
2589 
2590 	if (LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry))
2591 		reason |= RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES;
2592 
2593 	if (LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry))
2594 		reason |= RELEASE_REASON_EGRESS_LIST_OVERFLOW;
2595 
2596 	return reason;
2597 }
2598 
2599 /**
2600  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
2601  * @reo_context: Pointer to reorder context
2602  * @entry: List entry
2603  * @deliver: Indicates whether this entry has to be delivered to upper layers
2604  * or dropped in the reo layer itself.
2605  *
2606  * API to send the frame to the upper layer. This API has to be called only
2607  * for entries which can be released to upper layer. It is the caller's
2608  * responsibility to ensure that entry can be released (by using API
2609  * mgmt_rx_reo_is_entry_ready_to_send_up). This API is called after
2610  * acquiring the lock which serializes the frame delivery to the upper layers.
2611  *
2612  * Return: QDF_STATUS
2613  */
2614 static QDF_STATUS
mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context * reo_context,struct mgmt_rx_reo_list_entry * entry,bool deliver)2615 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context *reo_context,
2616 			       struct mgmt_rx_reo_list_entry *entry,
2617 			       bool deliver)
2618 {
2619 	uint8_t release_reason;
2620 	uint8_t link_id;
2621 	uint32_t entry_global_ts;
2622 	QDF_STATUS status;
2623 	QDF_STATUS temp;
2624 
2625 	if (!reo_context) {
2626 		mgmt_rx_reo_err("Reo context is null");
2627 		return QDF_STATUS_E_NULL_VALUE;
2628 	}
2629 
2630 	if (!entry) {
2631 		mgmt_rx_reo_err("Entry is null");
2632 		return QDF_STATUS_E_NULL_VALUE;
2633 	}
2634 
2635 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2636 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
2637 
2638 	release_reason = mgmt_rx_reo_list_entry_get_release_reason(entry);
2639 	if (!release_reason) {
2640 		mgmt_rx_reo_err("Release reason is zero");
2641 		return QDF_STATUS_E_INVAL;
2642 	}
2643 
2644 	entry->is_delivered = false;
2645 	entry->is_dropped = false;
2646 	entry->is_premature_delivery = false;
2647 	entry->release_reason = release_reason;
2648 
2649 	if (mgmt_rx_reo_is_potential_premature_delivery(release_reason)) {
2650 		entry->is_premature_delivery = true;
2651 		status = mgmt_rx_reo_handle_potential_premature_delivery(
2652 						reo_context, entry_global_ts);
2653 		if (QDF_IS_STATUS_ERROR(status))
2654 			goto exit;
2655 	}
2656 
2657 	status = mgmt_rx_reo_log_egress_frame_before_delivery(reo_context,
2658 							      entry);
2659 	if (QDF_IS_STATUS_ERROR(status))
2660 		goto exit;
2661 
2662 	if (deliver) {
2663 		status = wlan_mgmt_txrx_process_rx_frame(entry->pdev,
2664 							 entry->nbuf,
2665 							 entry->rx_params);
2666 		/* Above call frees nbuf and rx_params, make them null */
2667 		entry->nbuf = NULL;
2668 		entry->rx_params = NULL;
2669 
2670 		if (QDF_IS_STATUS_ERROR(status))
2671 			goto exit_log;
2672 
2673 		entry->is_delivered = true;
2674 	} else {
2675 		free_mgmt_rx_event_params(entry->rx_params);
2676 		qdf_nbuf_free(entry->nbuf);
2677 		entry->is_dropped = true;
2678 	}
2679 
2680 	status = QDF_STATUS_SUCCESS;
2681 
2682 exit_log:
2683 	temp = mgmt_rx_reo_log_egress_frame_after_delivery(reo_context, entry,
2684 							   link_id);
2685 	if (QDF_IS_STATUS_ERROR(temp))
2686 		status = temp;
2687 exit:
2688 	/**
2689 	 * Release the reference taken when the entry is inserted into
2690 	 * the reorder list
2691 	 */
2692 	wlan_objmgr_pdev_release_ref(entry->pdev, WLAN_MGMT_RX_REO_ID);
2693 
2694 	return status;
2695 }
2696 
2697 /**
2698  * mgmt_rx_reo_is_entry_ready_to_send_up() - API to check whether the
2699  * list entry can be send to upper layers.
2700  * @entry: List entry
2701  *
2702  * Return: QDF_STATUS
2703  */
2704 static bool
mgmt_rx_reo_is_entry_ready_to_send_up(struct mgmt_rx_reo_list_entry * entry)2705 mgmt_rx_reo_is_entry_ready_to_send_up(struct mgmt_rx_reo_list_entry *entry)
2706 {
2707 	if (!entry) {
2708 		mgmt_rx_reo_err("Entry is null");
2709 		return false;
2710 	}
2711 
2712 	return LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry) ||
2713 	       LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry) ||
2714 	       !LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry) ||
2715 	       LIST_ENTRY_IS_AGED_OUT(entry) ||
2716 	       LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry) ||
2717 	       LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry);
2718 }
2719 
2720 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
2721 /**
2722  * mgmt_rx_reo_scheduler_debug_info_enabled() - API to check whether scheduler
2723  * debug feaure is enabled
2724  * @scheduler_debug_info: Pointer to scheduler debug info object
2725  *
2726  * Return: true or false
2727  */
2728 static bool
mgmt_rx_reo_scheduler_debug_info_enabled(struct reo_scheduler_debug_info * scheduler_debug_info)2729 mgmt_rx_reo_scheduler_debug_info_enabled
2730 			(struct reo_scheduler_debug_info *scheduler_debug_info)
2731 {
2732 	return scheduler_debug_info->frame_list_size;
2733 }
2734 
2735 /**
2736  * mgmt_rx_reo_log_scheduler_debug_info() - Log the information about a
2737  * frame getting scheduled by mgmt rx reo scheduler
2738  * @reo_ctx: management rx reorder context
2739  * @entry: Pointer to reorder list entry
2740  * @reschedule: Indicates rescheduling
2741  *
2742  * Return: QDF_STATUS of operation
2743  */
2744 static QDF_STATUS
mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_list_entry * entry,bool reschedule)2745 mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
2746 				     struct mgmt_rx_reo_list_entry *entry,
2747 				     bool reschedule)
2748 {
2749 	struct reo_scheduler_debug_info *scheduler_debug_info;
2750 	struct reo_scheduler_debug_frame_info *cur_frame_debug_info;
2751 	struct reo_scheduler_stats *stats;
2752 	uint8_t link_id;
2753 
2754 	if (!reo_ctx || !entry)
2755 		return QDF_STATUS_E_NULL_VALUE;
2756 
2757 	scheduler_debug_info = &reo_ctx->scheduler_debug_info;
2758 
2759 	stats = &scheduler_debug_info->stats;
2760 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2761 	stats->scheduled_count[link_id][entry->ctx_info.context]++;
2762 	if (reschedule)
2763 		stats->rescheduled_count[link_id][entry->ctx_info.context]++;
2764 
2765 	if (!mgmt_rx_reo_scheduler_debug_info_enabled(scheduler_debug_info))
2766 		return QDF_STATUS_SUCCESS;
2767 
2768 	cur_frame_debug_info = &scheduler_debug_info->frame_list
2769 			[scheduler_debug_info->next_index];
2770 
2771 	cur_frame_debug_info->link_id = link_id;
2772 	cur_frame_debug_info->mgmt_pkt_ctr =
2773 				mgmt_rx_reo_get_pkt_counter(entry->rx_params);
2774 	cur_frame_debug_info->global_timestamp =
2775 				mgmt_rx_reo_get_global_ts(entry->rx_params);
2776 	cur_frame_debug_info->initial_wait_count = entry->initial_wait_count;
2777 	cur_frame_debug_info->final_wait_count = entry->wait_count;
2778 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
2779 		     entry->shared_snapshots,
2780 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
2781 			     sizeof(entry->shared_snapshots)));
2782 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, entry->host_snapshot,
2783 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
2784 			     sizeof(entry->host_snapshot)));
2785 	cur_frame_debug_info->ingress_timestamp = entry->ingress_timestamp;
2786 	cur_frame_debug_info->ingress_list_insertion_ts =
2787 					entry->ingress_list_insertion_ts;
2788 	cur_frame_debug_info->ingress_list_removal_ts =
2789 					entry->ingress_list_removal_ts;
2790 	cur_frame_debug_info->egress_list_insertion_ts =
2791 					entry->egress_list_insertion_ts;
2792 	cur_frame_debug_info->scheduled_ts = qdf_get_log_timestamp();
2793 	cur_frame_debug_info->first_scheduled_ts = entry->first_scheduled_ts;
2794 	cur_frame_debug_info->last_scheduled_ts = entry->last_scheduled_ts;
2795 	cur_frame_debug_info->scheduled_count =
2796 				qdf_atomic_read(&entry->scheduled_count);
2797 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
2798 	cur_frame_debug_info->ctx_info = entry->ctx_info;
2799 
2800 	scheduler_debug_info->next_index++;
2801 	scheduler_debug_info->next_index %=
2802 				scheduler_debug_info->frame_list_size;
2803 	if (scheduler_debug_info->next_index == 0)
2804 		scheduler_debug_info->wrap_aroud = true;
2805 
2806 	return QDF_STATUS_SUCCESS;
2807 }
2808 #else
2809 /**
2810  * mgmt_rx_reo_log_scheduler_debug_info() - Log the information about a
2811  * frame getting scheduled by mgmt rx reo scheduler
2812  * @reo_ctx: management rx reorder context
2813  * @entry: Pointer to reorder list entry
2814  * @reschedule: Indicates rescheduling
2815  *
2816  * Return: QDF_STATUS of operation
2817  */
2818 static inline QDF_STATUS
mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_list_entry * entry,bool reschedule)2819 mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
2820 				     struct mgmt_rx_reo_list_entry *entry,
2821 				     bool reschedule)
2822 {
2823 	return QDF_STATUS_SUCCESS;
2824 }
2825 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
2826 
2827 /**
2828  * mgmt_rx_reo_defer_delivery() - Helper API to check whether a management
2829  * frame can be delivered in the current context or it has to be scheduled
2830  * for delivery in a different context
2831  * @entry: List entry
2832  * @link_bitmap: Link bitmap
2833  *
2834  * Return: true if frame can't be delivered in the current context and its
2835  * delivery has to be done in a different context
2836  */
2837 bool
mgmt_rx_reo_defer_delivery(struct mgmt_rx_reo_list_entry * entry,uint32_t link_bitmap)2838 mgmt_rx_reo_defer_delivery(struct mgmt_rx_reo_list_entry *entry,
2839 			   uint32_t link_bitmap)
2840 {
2841 	uint8_t link_id;
2842 	uint8_t mlo_grp_id;
2843 	struct wlan_objmgr_pdev *pdev;
2844 
2845 	if (!entry) {
2846 		mgmt_rx_reo_err("Entry is null");
2847 		return true;
2848 	}
2849 
2850 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2851 	mlo_grp_id = entry->rx_params->reo_params->mlo_grp_id;
2852 
2853 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, mlo_grp_id,
2854 					      WLAN_MGMT_RX_REO_ID);
2855 	if (!pdev) {
2856 		mgmt_rx_reo_err("pdev for link %u, group %u is null",
2857 				link_id, mlo_grp_id);
2858 		return false;
2859 	}
2860 
2861 	if (!wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev(pdev)) {
2862 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2863 		return false;
2864 	}
2865 
2866 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2867 
2868 	return !(link_bitmap & (1 << link_id));
2869 }
2870 
2871 /**
2872  * mgmt_rx_reo_schedule_delivery() - Helper API to schedule the delivery of
2873  * a management frames.
2874  * @reo_context: Pointer to reorder context
2875  * @entry: List entry corresponding to the frame which has to be scheduled
2876  * for delivery
2877  *
2878  * Return: QDF_STATUS
2879  */
2880 QDF_STATUS
mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context * reo_context,struct mgmt_rx_reo_list_entry * entry)2881 mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
2882 			      struct mgmt_rx_reo_list_entry *entry)
2883 {
2884 	int scheduled_count;
2885 	int8_t link_id;
2886 	uint8_t mlo_grp_id;
2887 	struct wlan_objmgr_pdev *pdev;
2888 	QDF_STATUS status;
2889 	bool reschedule;
2890 
2891 	if (!reo_context) {
2892 		mgmt_rx_reo_err("Reo context is null");
2893 		return QDF_STATUS_E_NULL_VALUE;
2894 	}
2895 
2896 	if (!entry) {
2897 		mgmt_rx_reo_err("List entry is null");
2898 		return QDF_STATUS_E_NULL_VALUE;
2899 	}
2900 
2901 	scheduled_count = qdf_atomic_inc_return(&entry->scheduled_count);
2902 
2903 	reschedule = (scheduled_count > 1);
2904 	status = mgmt_rx_reo_log_scheduler_debug_info(reo_context, entry,
2905 						      reschedule);
2906 	if (QDF_IS_STATUS_ERROR(status)) {
2907 		mgmt_rx_reo_err("Failed to log scheduler debug info");
2908 		return status;
2909 	}
2910 
2911 	if (reschedule) {
2912 		entry->last_scheduled_ts = qdf_get_log_timestamp();
2913 		return QDF_STATUS_SUCCESS;
2914 	}
2915 
2916 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
2917 	mlo_grp_id = entry->rx_params->reo_params->mlo_grp_id;
2918 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, mlo_grp_id,
2919 					      WLAN_MGMT_RX_REO_ID);
2920 	if (!pdev) {
2921 		mgmt_rx_reo_err("pdev for link %u, group %u is null",
2922 				link_id, mlo_grp_id);
2923 		return QDF_STATUS_E_NULL_VALUE;
2924 	}
2925 
2926 	entry->first_scheduled_ts = qdf_get_log_timestamp();
2927 	status = tgt_mgmt_rx_reo_schedule_delivery(wlan_pdev_get_psoc(pdev));
2928 	if (QDF_IS_STATUS_ERROR(status)) {
2929 		mgmt_rx_reo_err("Failed to schedule for link %u, group %u",
2930 				link_id, mlo_grp_id);
2931 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2932 		return status;
2933 	}
2934 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
2935 
2936 	return QDF_STATUS_SUCCESS;
2937 }
2938 
2939 /**
2940  * mgmt_rx_reo_release_egress_list_entries() - Release entries from the
2941  * egress list
2942  * @reo_context: Pointer to management Rx reorder context
2943  * @link_bitmap: Bitmap of links for which frames can be released in the current
2944  * context
2945  * @ctx: Current execution context info
2946  *
2947  * This API releases the entries from the egress list based on the following
2948  * conditions.
2949  *   a) Entries with total wait count equal to 0
2950  *   b) Entries which are timed out or entries with global time stamp <= global
2951  *      time stamp of the latest frame which is timed out. We can only release
2952  *      the entries in the increasing order of the global time stamp.
2953  *      So all the entries with global time stamp <= global time stamp of the
2954  *      latest timed out frame has to be released.
2955  *
2956  * Return: QDF_STATUS
2957  */
2958 static QDF_STATUS
mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context * reo_context,uint32_t link_bitmap,struct mgmt_rx_reo_context_info * ctx)2959 mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context,
2960 					uint32_t link_bitmap,
2961 					struct mgmt_rx_reo_context_info *ctx)
2962 {
2963 	QDF_STATUS status;
2964 	struct mgmt_rx_reo_egress_list *egress_list;
2965 	struct mgmt_rx_reo_list *reo_egress_list;
2966 	qdf_timer_t *egress_inactivity_timer;
2967 
2968 	if (!reo_context) {
2969 		mgmt_rx_reo_err("reo context is null");
2970 		return QDF_STATUS_E_NULL_VALUE;
2971 	}
2972 
2973 	egress_list = &reo_context->egress_list;
2974 	reo_egress_list = &egress_list->reo_list;
2975 	egress_inactivity_timer = &egress_list->egress_inactivity_timer;
2976 
2977 	qdf_spin_lock(&reo_context->frame_release_lock);
2978 
2979 	while (1) {
2980 		struct mgmt_rx_reo_list_entry *first_entry;
2981 		/* TODO yield if release_count > THRESHOLD */
2982 		uint16_t release_count = 0;
2983 		uint32_t first_entry_ts;
2984 		struct mgmt_rx_event_params *rx_params;
2985 		struct mgmt_rx_reo_frame_info *last_released_frame =
2986 					&reo_egress_list->last_released_frame;
2987 		uint32_t last_released_frame_ts;
2988 		bool ready;
2989 		bool defer;
2990 		bool overflow;
2991 
2992 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
2993 
2994 		first_entry = qdf_list_first_entry_or_null(
2995 					&reo_egress_list->list,
2996 					struct mgmt_rx_reo_list_entry, node);
2997 		if (!first_entry) {
2998 			status = QDF_STATUS_SUCCESS;
2999 			goto exit_unlock_egress_list_lock;
3000 		}
3001 
3002 		ready = mgmt_rx_reo_is_entry_ready_to_send_up(first_entry);
3003 		if (!ready) {
3004 			status = QDF_STATUS_E_FAILURE;
3005 			goto exit_unlock_egress_list_lock;
3006 		}
3007 
3008 		first_entry->ctx_info = *ctx;
3009 		defer = mgmt_rx_reo_defer_delivery(first_entry, link_bitmap);
3010 		overflow =
3011 		 LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(first_entry);
3012 		if (defer && !overflow) {
3013 			status = mgmt_rx_reo_schedule_delivery(reo_context,
3014 							       first_entry);
3015 			if (QDF_IS_STATUS_ERROR(status))
3016 				mgmt_rx_reo_err("Failed to schedule delivery");
3017 			goto exit_unlock_egress_list_lock;
3018 		}
3019 
3020 		first_entry->egress_list_size =
3021 					qdf_list_size(&reo_egress_list->list);
3022 		status = qdf_list_remove_node(&reo_egress_list->list,
3023 					      &first_entry->node);
3024 		if (QDF_IS_STATUS_ERROR(status)) {
3025 			status = QDF_STATUS_E_FAILURE;
3026 			goto exit_unlock_egress_list_lock;
3027 		}
3028 		first_entry->egress_list_removal_ts = qdf_get_log_timestamp();
3029 
3030 		/**
3031 		 * Last released frame global time stamp is invalid means that
3032 		 * current frame is the first frame to be released to the
3033 		 * upper layer from the egress list. Blindly update the last
3034 		 * released frame global time stamp to the current frame's
3035 		 * global time stamp and set the valid to true.
3036 		 * If the last released frame global time stamp is valid and
3037 		 * current frame's global time stamp is >= last released frame
3038 		 * global time stamp, deliver the current frame to upper layer
3039 		 * and update the last released frame global time stamp.
3040 		 */
3041 		rx_params = first_entry->rx_params;
3042 		first_entry_ts = mgmt_rx_reo_get_global_ts(rx_params);
3043 		last_released_frame_ts =
3044 			last_released_frame->reo_params.global_timestamp;
3045 
3046 		if (!last_released_frame->valid ||
3047 		    mgmt_rx_reo_compare_global_timestamps_gte(
3048 			first_entry_ts, last_released_frame_ts)) {
3049 			qdf_timer_sync_cancel(egress_inactivity_timer);
3050 
3051 			last_released_frame->reo_params =
3052 						*rx_params->reo_params;
3053 			last_released_frame->valid = true;
3054 
3055 			qdf_timer_mod(egress_inactivity_timer,
3056 				      MGMT_RX_REO_EGRESS_INACTIVITY_TIMEOUT);
3057 		} else {
3058 			/**
3059 			 * This should never happen. All the frames older than
3060 			 * the last frame released from the reorder list will be
3061 			 * discarded at the entry to reorder algorithm itself.
3062 			 */
3063 			qdf_assert_always(first_entry->is_parallel_rx);
3064 		}
3065 
3066 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3067 
3068 		status = mgmt_rx_reo_list_entry_send_up(reo_context,
3069 							first_entry,
3070 							!defer || !overflow);
3071 		if (QDF_IS_STATUS_ERROR(status)) {
3072 			status = QDF_STATUS_E_FAILURE;
3073 			qdf_mem_free(first_entry);
3074 			goto exit_unlock_frame_release_lock;
3075 		}
3076 
3077 		qdf_mem_free(first_entry);
3078 		release_count++;
3079 	}
3080 
3081 	status = QDF_STATUS_SUCCESS;
3082 	goto exit_unlock_frame_release_lock;
3083 
3084 exit_unlock_egress_list_lock:
3085 	if (qdf_list_size(&reo_egress_list->list) >
3086 	    reo_egress_list->max_list_size)
3087 		mgmt_rx_reo_err("Egress list overflow size =%u, max = %u",
3088 				qdf_list_size(&reo_egress_list->list),
3089 				reo_egress_list->max_list_size);
3090 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3091 exit_unlock_frame_release_lock:
3092 	qdf_spin_unlock(&reo_context->frame_release_lock);
3093 
3094 	return status;
3095 }
3096 
3097 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
3098 /**
3099  * mgmt_rx_reo_scheduler_cb_stats_inc() - API to increment scheduler_cb_count.
3100  * @link_bitmap: Bitmap of links for which frames can be released in the current
3101  * context
3102  * @reo_context: Pointer to management Rx reorder context
3103  *
3104  * This API increments the scheduler_cb_count of links for which frames can be
3105  * released in the current context
3106  */
mgmt_rx_reo_scheduler_cb_stats_inc(uint32_t link_bitmap,struct mgmt_rx_reo_context * reo_context)3107 static void mgmt_rx_reo_scheduler_cb_stats_inc(uint32_t link_bitmap,
3108 					       struct mgmt_rx_reo_context
3109 					       *reo_context)
3110 {
3111 	uint8_t link;
3112 
3113 	for (link = 0; link < MAX_MLO_LINKS; link++)
3114 		if (link_bitmap & (1 << link)) {
3115 			struct reo_scheduler_stats *stats;
3116 
3117 			stats = &reo_context->scheduler_debug_info.stats;
3118 			stats->scheduler_cb_count[link]++;
3119 		}
3120 }
3121 #else
mgmt_rx_reo_scheduler_cb_stats_inc(uint32_t link_bitmap,struct mgmt_rx_reo_context * reo_context)3122 static void mgmt_rx_reo_scheduler_cb_stats_inc(uint32_t link_bitmap,
3123 					       struct mgmt_rx_reo_context
3124 					       *reo_context)
3125 {
3126 }
3127 #endif
3128 
3129 QDF_STATUS
mgmt_rx_reo_release_frames(uint8_t mlo_grp_id,uint32_t link_bitmap)3130 mgmt_rx_reo_release_frames(uint8_t mlo_grp_id, uint32_t link_bitmap)
3131 {
3132 	struct mgmt_rx_reo_context *reo_context;
3133 	QDF_STATUS ret;
3134 	struct mgmt_rx_reo_context_info ctx_info = {0};
3135 
3136 	if (mlo_grp_id >= WLAN_MAX_MLO_GROUPS) {
3137 		mgmt_rx_reo_err("Invalid mlo grp id");
3138 		return QDF_STATUS_E_INVAL;
3139 	}
3140 
3141 	reo_context = mgmt_rx_reo_get_context(mlo_grp_id);
3142 	if (!reo_context) {
3143 		mgmt_rx_reo_err("Mgmt rx reo context is null");
3144 		return QDF_STATUS_E_NULL_VALUE;
3145 	}
3146 	mgmt_rx_reo_scheduler_cb_stats_inc(link_bitmap, reo_context);
3147 	ctx_info.context = MGMT_RX_REO_CONTEXT_SCHEDULER_CB;
3148 	ctx_info.context_id = qdf_atomic_inc_return(&reo_context->context_id);
3149 	ret = mgmt_rx_reo_release_egress_list_entries(reo_context, link_bitmap,
3150 						      &ctx_info);
3151 	if (QDF_IS_STATUS_ERROR(ret)) {
3152 		mgmt_rx_reo_err("Failure to release frames grp = %u bm = 0x%x",
3153 				mlo_grp_id, link_bitmap);
3154 		return ret;
3155 	}
3156 
3157 	return QDF_STATUS_SUCCESS;
3158 }
3159 
3160 /**
3161  * mgmt_rx_reo_check_sanity_list() - Check the sanity of reorder list
3162  * @reo_list: Pointer to reorder list
3163  *
3164  * Check the sanity of ingress reorder list or egress reorder list.
3165  * Ingress/Egress reorder list entries should be in the non decreasing order
3166  * of global time stamp.
3167  *
3168  * Return: QDF_STATUS
3169  */
3170 static QDF_STATUS
mgmt_rx_reo_check_sanity_list(struct mgmt_rx_reo_list * reo_list)3171 mgmt_rx_reo_check_sanity_list(struct mgmt_rx_reo_list *reo_list)
3172 {
3173 	struct mgmt_rx_reo_list_entry *first;
3174 	struct mgmt_rx_reo_list_entry *cur;
3175 	uint32_t ts_prev;
3176 	uint32_t ts_cur;
3177 
3178 	if (!reo_list) {
3179 		mgmt_rx_reo_err("Reo list is null");
3180 		return QDF_STATUS_E_NULL_VALUE;
3181 	}
3182 
3183 	if (qdf_list_empty(&reo_list->list))
3184 		return QDF_STATUS_SUCCESS;
3185 
3186 	first = qdf_list_first_entry_or_null(&reo_list->list,
3187 					     struct mgmt_rx_reo_list_entry,
3188 					     node);
3189 	if (!first) {
3190 		mgmt_rx_reo_err("First entry is null");
3191 		return QDF_STATUS_E_NULL_VALUE;
3192 	}
3193 
3194 	cur = first;
3195 	ts_prev = mgmt_rx_reo_get_global_ts(first->rx_params);
3196 
3197 	qdf_list_for_each_continue(&reo_list->list, cur, node) {
3198 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3199 
3200 		if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_cur,
3201 							       ts_prev))
3202 			return QDF_STATUS_E_INVAL;
3203 
3204 		ts_prev = ts_cur;
3205 	}
3206 
3207 	return QDF_STATUS_SUCCESS;
3208 }
3209 
3210 /**
3211  * mgmt_rx_reo_check_sanity_lists() - Check the sanity of ingress and
3212  * egress reorder lists
3213  * @reo_egress_list: Pointer to egress reorder list
3214  * @reo_ingress_list: Pointer to ingress reorder list
3215  *
3216  * Check the sanity of ingress reorder list and egress reorder list.
3217  * This API does the following sanity checks.
3218  *
3219  * 1. Ingress list entries should be in the non decreasing order of global
3220  *    time stamp.
3221  * 2. Egress list entries should be in the non decreasing order of global
3222  *    time stamp.
3223  * 3. All the entries in egress list should have global time stamp less
3224  *    than or equal to all the entries in ingress list.
3225  *
3226  * Return: QDF_STATUS
3227  */
3228 static QDF_STATUS
mgmt_rx_reo_check_sanity_lists(struct mgmt_rx_reo_list * reo_egress_list,struct mgmt_rx_reo_list * reo_ingress_list)3229 mgmt_rx_reo_check_sanity_lists(struct mgmt_rx_reo_list *reo_egress_list,
3230 			       struct mgmt_rx_reo_list *reo_ingress_list)
3231 {
3232 	QDF_STATUS status;
3233 	struct mgmt_rx_reo_list_entry *last_entry_egress_list;
3234 	uint32_t ts_egress_last_entry;
3235 	struct mgmt_rx_reo_list_entry *first_entry_ingress_list;
3236 	uint32_t ts_ingress_first_entry;
3237 
3238 	if (!reo_egress_list) {
3239 		mgmt_rx_reo_err("Egress list is null");
3240 		return QDF_STATUS_E_NULL_VALUE;
3241 	}
3242 
3243 	if (!reo_ingress_list) {
3244 		mgmt_rx_reo_err("Ingress list is null");
3245 		return QDF_STATUS_E_NULL_VALUE;
3246 	}
3247 
3248 	status = mgmt_rx_reo_check_sanity_list(reo_egress_list);
3249 	if (QDF_IS_STATUS_ERROR(status)) {
3250 		mgmt_rx_reo_err("Sanity check of egress list failed");
3251 		return status;
3252 	}
3253 
3254 	status = mgmt_rx_reo_check_sanity_list(reo_ingress_list);
3255 	if (QDF_IS_STATUS_ERROR(status)) {
3256 		mgmt_rx_reo_err("Sanity check of ingress list failed");
3257 		return status;
3258 	}
3259 
3260 	if (qdf_list_empty(&reo_egress_list->list) ||
3261 	    qdf_list_empty(&reo_ingress_list->list))
3262 		return QDF_STATUS_SUCCESS;
3263 
3264 	last_entry_egress_list =
3265 		qdf_list_last_entry(&reo_egress_list->list,
3266 				    struct mgmt_rx_reo_list_entry, node);
3267 	ts_egress_last_entry =
3268 		mgmt_rx_reo_get_global_ts(last_entry_egress_list->rx_params);
3269 
3270 	first_entry_ingress_list =
3271 		qdf_list_first_entry_or_null(&reo_ingress_list->list,
3272 					     struct mgmt_rx_reo_list_entry,
3273 					     node);
3274 	if (!first_entry_ingress_list) {
3275 		mgmt_rx_reo_err("Ingress list is expected to be non empty");
3276 		return QDF_STATUS_E_INVAL;
3277 	}
3278 
3279 	ts_ingress_first_entry =
3280 		mgmt_rx_reo_get_global_ts(first_entry_ingress_list->rx_params);
3281 
3282 	if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_ingress_first_entry,
3283 						       ts_egress_last_entry))
3284 		return QDF_STATUS_E_INVAL;
3285 
3286 	return QDF_STATUS_SUCCESS;
3287 }
3288 
3289 /**
3290  * mgmt_rx_reo_handle_egress_overflow() - Handle overflow of management
3291  * rx reorder egress list
3292  * @reo_egress_list: Pointer to egress reorder list
3293  *
3294  * API to handle overflow of management rx reorder egress list.
3295  *
3296  * Return: QDF_STATUS
3297  */
3298 static QDF_STATUS
mgmt_rx_reo_handle_egress_overflow(struct mgmt_rx_reo_list * reo_egress_list)3299 mgmt_rx_reo_handle_egress_overflow(struct mgmt_rx_reo_list *reo_egress_list)
3300 {
3301 	struct mgmt_rx_reo_list_entry *cur_entry;
3302 	uint32_t egress_list_max_size;
3303 	uint32_t egress_list_cur_size;
3304 	uint32_t num_overflow_frames;
3305 
3306 	if (!reo_egress_list) {
3307 		mgmt_rx_reo_err("Egress reorder list is null");
3308 		return QDF_STATUS_E_NULL_VALUE;
3309 	}
3310 
3311 	reo_egress_list->overflow_count++;
3312 	reo_egress_list->last_overflow_ts = qdf_get_log_timestamp();
3313 	mgmt_rx_reo_debug_rl("Egress overflow, cnt:%llu size:%u",
3314 			     reo_egress_list->overflow_count,
3315 			     qdf_list_size(&reo_egress_list->list));
3316 
3317 	egress_list_cur_size = qdf_list_size(&reo_egress_list->list);
3318 	egress_list_max_size = reo_egress_list->max_list_size;
3319 	num_overflow_frames = egress_list_cur_size - egress_list_max_size;
3320 
3321 	qdf_list_for_each(&reo_egress_list->list, cur_entry, node) {
3322 		if (num_overflow_frames > 0) {
3323 			cur_entry->status |= STATUS_EGRESS_LIST_OVERFLOW;
3324 			num_overflow_frames--;
3325 		}
3326 	}
3327 
3328 	return QDF_STATUS_SUCCESS;
3329 }
3330 
3331 /**
3332  * mgmt_rx_reo_move_entries_ingress_to_egress_list() - Moves frames in
3333  * the ingress list which are ready to be delivered to the egress list
3334  * @ingress_list: Pointer to ingress list
3335  * @egress_list: Pointer to egress list
3336  *
3337  * This API moves frames in the ingress list which are ready to be delivered
3338  * to the egress list.
3339  *
3340  * Return: QDF_STATUS
3341  */
3342 static QDF_STATUS
mgmt_rx_reo_move_entries_ingress_to_egress_list(struct mgmt_rx_reo_ingress_list * ingress_list,struct mgmt_rx_reo_egress_list * egress_list)3343 mgmt_rx_reo_move_entries_ingress_to_egress_list
3344 		(struct mgmt_rx_reo_ingress_list *ingress_list,
3345 		 struct mgmt_rx_reo_egress_list *egress_list)
3346 {
3347 	struct mgmt_rx_reo_list *reo_ingress_list;
3348 	struct mgmt_rx_reo_list *reo_egress_list;
3349 	QDF_STATUS status;
3350 	struct mgmt_rx_reo_list_entry *ingress_list_entry;
3351 	struct mgmt_rx_reo_list_entry *latest_frame_ready_to_deliver = NULL;
3352 	uint16_t num_frames_ready_to_deliver = 0;
3353 	uint32_t num_overflow_frames = 0;
3354 	uint32_t ingress_list_max_size;
3355 	uint32_t ingress_list_cur_size;
3356 
3357 	if (!ingress_list) {
3358 		mgmt_rx_reo_err("Ingress list is null");
3359 		return QDF_STATUS_E_NULL_VALUE;
3360 	}
3361 	reo_ingress_list = &ingress_list->reo_list;
3362 
3363 	if (!egress_list) {
3364 		mgmt_rx_reo_err("Egress list is null");
3365 		return QDF_STATUS_E_NULL_VALUE;
3366 	}
3367 	reo_egress_list = &egress_list->reo_list;
3368 
3369 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
3370 
3371 	ingress_list_cur_size = qdf_list_size(&reo_ingress_list->list);
3372 	ingress_list_max_size = reo_ingress_list->max_list_size;
3373 	if (mgmt_rx_reo_list_overflowed(reo_ingress_list))
3374 		num_overflow_frames =
3375 				ingress_list_cur_size - ingress_list_max_size;
3376 
3377 	qdf_list_for_each(&reo_ingress_list->list, ingress_list_entry, node) {
3378 		if (num_overflow_frames > 0) {
3379 			ingress_list_entry->status |=
3380 						STATUS_INGRESS_LIST_OVERFLOW;
3381 			num_overflow_frames--;
3382 		}
3383 
3384 		if (!mgmt_rx_reo_is_entry_ready_to_send_up(ingress_list_entry))
3385 			break;
3386 
3387 		ingress_list_entry->ingress_list_removal_ts =
3388 							qdf_get_log_timestamp();
3389 		ingress_list_entry->egress_list_insertion_ts =
3390 							qdf_get_log_timestamp();
3391 		latest_frame_ready_to_deliver = ingress_list_entry;
3392 		num_frames_ready_to_deliver++;
3393 	}
3394 
3395 	/* Check if ingress list has at least one frame ready to be delivered */
3396 	if (num_frames_ready_to_deliver) {
3397 		qdf_list_t temp_list_frames_to_deliver;
3398 
3399 		qdf_list_create(&temp_list_frames_to_deliver,
3400 				INGRESS_TO_EGRESS_MOVEMENT_TEMP_LIST_MAX_SIZE);
3401 
3402 		status = qdf_list_split(&temp_list_frames_to_deliver,
3403 					&reo_ingress_list->list,
3404 					&latest_frame_ready_to_deliver->node);
3405 		if (QDF_IS_STATUS_ERROR(status)) {
3406 			mgmt_rx_reo_err("Failed to split list");
3407 			qdf_list_destroy(&temp_list_frames_to_deliver);
3408 			goto exit_unlock_ingress_list;
3409 		}
3410 
3411 		if (num_frames_ready_to_deliver !=
3412 		    qdf_list_size(&temp_list_frames_to_deliver)) {
3413 			uint32_t list_size;
3414 
3415 			list_size = qdf_list_size(&temp_list_frames_to_deliver);
3416 			mgmt_rx_reo_err("Mismatch in frames ready %u and %u",
3417 					num_frames_ready_to_deliver,
3418 					list_size);
3419 			status = QDF_STATUS_E_INVAL;
3420 			qdf_list_destroy(&temp_list_frames_to_deliver);
3421 			goto exit_unlock_ingress_list;
3422 		}
3423 
3424 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
3425 
3426 		status = qdf_list_join(&reo_egress_list->list,
3427 				       &temp_list_frames_to_deliver);
3428 		if (QDF_IS_STATUS_ERROR(status)) {
3429 			mgmt_rx_reo_err("Failed to join lists");
3430 			qdf_list_destroy(&temp_list_frames_to_deliver);
3431 			goto exit_unlock_egress_and_ingress_list;
3432 		}
3433 
3434 		if (mgmt_rx_reo_list_overflowed(reo_egress_list)) {
3435 			status =
3436 			    mgmt_rx_reo_handle_egress_overflow(reo_egress_list);
3437 			if (QDF_IS_STATUS_ERROR(status)) {
3438 				mgmt_rx_reo_err("Failed to handle overflow");
3439 				qdf_list_destroy(&temp_list_frames_to_deliver);
3440 				goto exit_unlock_egress_and_ingress_list;
3441 			}
3442 		}
3443 
3444 		status = mgmt_rx_reo_check_sanity_lists(reo_egress_list,
3445 							reo_ingress_list);
3446 		if (QDF_IS_STATUS_ERROR(status)) {
3447 			mgmt_rx_reo_err("Sanity check of reo lists failed");
3448 			qdf_list_destroy(&temp_list_frames_to_deliver);
3449 			goto exit_unlock_egress_and_ingress_list;
3450 		}
3451 
3452 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3453 
3454 		qdf_list_destroy(&temp_list_frames_to_deliver);
3455 	}
3456 
3457 	status = QDF_STATUS_SUCCESS;
3458 	goto exit_unlock_ingress_list;
3459 
3460 exit_unlock_egress_and_ingress_list:
3461 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
3462 exit_unlock_ingress_list:
3463 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
3464 
3465 	return status;
3466 }
3467 
3468 /**
3469  * mgmt_rx_reo_ageout_entries_ingress_list() - Helper API to ageout entries
3470  * in the ingress list
3471  * @ingress_list: Pointer to the ingress list
3472  * @latest_aged_out_entry: Double pointer to the latest agedout entry in the
3473  * ingress list
3474  *
3475  * Helper API to ageout entries in the ingress list.
3476  *
3477  * Return: QDF_STATUS
3478  */
3479 static QDF_STATUS
mgmt_rx_reo_ageout_entries_ingress_list(struct mgmt_rx_reo_ingress_list * ingress_list,struct mgmt_rx_reo_list_entry ** latest_aged_out_entry)3480 mgmt_rx_reo_ageout_entries_ingress_list
3481 			(struct mgmt_rx_reo_ingress_list *ingress_list,
3482 			 struct mgmt_rx_reo_list_entry **latest_aged_out_entry)
3483 {
3484 	struct mgmt_rx_reo_list *reo_ingress_list;
3485 	struct mgmt_rx_reo_list_entry *cur_entry;
3486 	uint64_t cur_ts;
3487 
3488 	if (!ingress_list) {
3489 		mgmt_rx_reo_err("Ingress list is null");
3490 		return QDF_STATUS_E_NULL_VALUE;
3491 	}
3492 
3493 	if (!latest_aged_out_entry) {
3494 		mgmt_rx_reo_err("Latest aged out entry is null");
3495 		return QDF_STATUS_E_NULL_VALUE;
3496 	}
3497 
3498 	*latest_aged_out_entry = NULL;
3499 	reo_ingress_list = &ingress_list->reo_list;
3500 
3501 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
3502 
3503 	cur_ts = qdf_get_log_timestamp();
3504 
3505 	qdf_list_for_each(&reo_ingress_list->list, cur_entry, node) {
3506 		if (cur_ts - cur_entry->ingress_list_insertion_ts >=
3507 		    ingress_list->list_entry_timeout_us) {
3508 			*latest_aged_out_entry = cur_entry;
3509 			cur_entry->status |= STATUS_AGED_OUT;
3510 		}
3511 	}
3512 
3513 	if (!*latest_aged_out_entry)
3514 		goto exit_release_list_lock;
3515 
3516 	qdf_list_for_each(&reo_ingress_list->list, cur_entry, node) {
3517 		if (cur_entry == *latest_aged_out_entry)
3518 			break;
3519 		cur_entry->status |= STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME;
3520 	}
3521 
3522 exit_release_list_lock:
3523 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
3524 
3525 	return QDF_STATUS_SUCCESS;
3526 }
3527 
3528 /**
3529  * mgmt_rx_reo_ingress_list_ageout_timer_handler() - Periodic ageout timer
3530  * handler
3531  * @arg: Argument to timer handler
3532  *
3533  * This is the handler for periodic ageout timer used to timeout entries in the
3534  * ingress list.
3535  *
3536  * Return: void
3537  */
3538 static void
mgmt_rx_reo_ingress_list_ageout_timer_handler(void * arg)3539 mgmt_rx_reo_ingress_list_ageout_timer_handler(void *arg)
3540 {
3541 	struct mgmt_rx_reo_ingress_list *ingress_list = arg;
3542 	struct mgmt_rx_reo_egress_list *egress_list;
3543 	QDF_STATUS ret;
3544 	struct mgmt_rx_reo_context *reo_ctx;
3545 	/**
3546 	 * Stores the pointer to the entry in ingress list for the latest aged
3547 	 * out frame. Latest aged out frame is the aged out frame in reorder
3548 	 * list which has the largest global time stamp value.
3549 	 */
3550 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
3551 	struct mgmt_rx_reo_context_info ctx_info = {0};
3552 
3553 	if (!ingress_list) {
3554 		mgmt_rx_reo_err("Ingress list is null");
3555 		return;
3556 	}
3557 
3558 	reo_ctx = mgmt_rx_reo_get_context_from_ingress_list(ingress_list);
3559 	if (!reo_ctx) {
3560 		mgmt_rx_reo_err("Reo context is null");
3561 		return;
3562 	}
3563 	egress_list = &reo_ctx->egress_list;
3564 
3565 	qdf_timer_mod(&ingress_list->ageout_timer,
3566 		      MGMT_RX_REO_INGRESS_LIST_AGEOUT_TIMER_PERIOD_MS);
3567 
3568 	ret = mgmt_rx_reo_ageout_entries_ingress_list(ingress_list,
3569 						      &latest_aged_out_entry);
3570 	if (QDF_IS_STATUS_ERROR(ret)) {
3571 		mgmt_rx_reo_err("Failure to ageout entries in ingress list");
3572 		return;
3573 	}
3574 
3575 	if (!latest_aged_out_entry)
3576 		return;
3577 
3578 	ret = mgmt_rx_reo_move_entries_ingress_to_egress_list(ingress_list,
3579 							      egress_list);
3580 	if (QDF_IS_STATUS_ERROR(ret)) {
3581 		mgmt_rx_reo_err("Ingress to egress list movement failure(%d)",
3582 				ret);
3583 		return;
3584 	}
3585 
3586 	ctx_info.context = MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT;
3587 	ctx_info.context_id = qdf_atomic_inc_return(&reo_ctx->context_id);
3588 	ret = mgmt_rx_reo_release_egress_list_entries(reo_ctx, 0, &ctx_info);
3589 	if (QDF_IS_STATUS_ERROR(ret)) {
3590 		mgmt_rx_reo_err("Failure to release entries, ret = %d", ret);
3591 		return;
3592 	}
3593 }
3594 
3595 /**
3596  * mgmt_rx_reo_egress_inactivity_timer_handler() - Timer handler
3597  * for egress inactivity timer
3598  * @arg: Argument to timer handler
3599  *
3600  * This is the timer handler for tracking management Rx inactivity
3601  * across links.
3602  *
3603  * Return: void
3604  */
3605 static void
mgmt_rx_reo_egress_inactivity_timer_handler(void * arg)3606 mgmt_rx_reo_egress_inactivity_timer_handler(void *arg)
3607 {
3608 	struct mgmt_rx_reo_egress_list *egress_list = arg;
3609 	struct mgmt_rx_reo_list *reo_egress_list;
3610 	struct mgmt_rx_reo_frame_info *last_delivered_frame;
3611 
3612 	if (!egress_list) {
3613 		mgmt_rx_reo_err("Egress list is null");
3614 		return;
3615 	}
3616 
3617 	reo_egress_list = &egress_list->reo_list;
3618 	last_delivered_frame = &reo_egress_list->last_released_frame;
3619 
3620 	qdf_spin_lock(&reo_egress_list->list_lock);
3621 
3622 	qdf_mem_zero(last_delivered_frame, sizeof(*last_delivered_frame));
3623 
3624 	qdf_spin_unlock(&reo_egress_list->list_lock);
3625 }
3626 
3627 /**
3628  * mgmt_rx_reo_prepare_list_entry() - Prepare a list entry from the management
3629  * frame received.
3630  * @frame_desc: Pointer to the frame descriptor
3631  * @entry: Pointer to the list entry
3632  *
3633  * This API prepares the reorder list entry corresponding to a management frame
3634  * to be consumed by host. This entry would be inserted at the appropriate
3635  * position in the reorder list.
3636  *
3637  * Return: QDF_STATUS
3638  */
3639 static QDF_STATUS
mgmt_rx_reo_prepare_list_entry(const struct mgmt_rx_reo_frame_descriptor * frame_desc,struct mgmt_rx_reo_list_entry ** entry)3640 mgmt_rx_reo_prepare_list_entry(
3641 		const struct mgmt_rx_reo_frame_descriptor *frame_desc,
3642 		struct mgmt_rx_reo_list_entry **entry)
3643 {
3644 	struct mgmt_rx_reo_list_entry *list_entry;
3645 	struct wlan_objmgr_pdev *pdev;
3646 	uint8_t link_id;
3647 	uint8_t ml_grp_id;
3648 
3649 	if (!frame_desc) {
3650 		mgmt_rx_reo_err("frame descriptor is null");
3651 		return QDF_STATUS_E_NULL_VALUE;
3652 	}
3653 
3654 	if (!entry) {
3655 		mgmt_rx_reo_err("Pointer to list entry is null");
3656 		return QDF_STATUS_E_NULL_VALUE;
3657 	}
3658 
3659 	link_id = mgmt_rx_reo_get_link_id(frame_desc->rx_params);
3660 	ml_grp_id = mgmt_rx_reo_get_mlo_grp_id(frame_desc->rx_params);
3661 
3662 	pdev = wlan_get_pdev_from_mlo_link_id(link_id, ml_grp_id,
3663 					      WLAN_MGMT_RX_REO_ID);
3664 	if (!pdev) {
3665 		mgmt_rx_reo_err("pdev corresponding to link %u is null",
3666 				link_id);
3667 		return QDF_STATUS_E_NULL_VALUE;
3668 	}
3669 
3670 	list_entry =  qdf_mem_malloc(sizeof(*list_entry));
3671 	if (!list_entry) {
3672 		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
3673 		mgmt_rx_reo_err("List entry allocation failed");
3674 		return QDF_STATUS_E_NOMEM;
3675 	}
3676 
3677 	list_entry->pdev = pdev;
3678 	list_entry->nbuf = frame_desc->nbuf;
3679 	list_entry->rx_params = frame_desc->rx_params;
3680 	list_entry->wait_count = frame_desc->wait_count;
3681 	list_entry->initial_wait_count = frame_desc->wait_count;
3682 	qdf_mem_copy(list_entry->shared_snapshots, frame_desc->shared_snapshots,
3683 		     qdf_min(sizeof(list_entry->shared_snapshots),
3684 			     sizeof(frame_desc->shared_snapshots)));
3685 	qdf_mem_copy(list_entry->host_snapshot, frame_desc->host_snapshot,
3686 		     qdf_min(sizeof(list_entry->host_snapshot),
3687 			     sizeof(frame_desc->host_snapshot)));
3688 	list_entry->status = 0;
3689 	if (list_entry->wait_count.total_count)
3690 		list_entry->status |= STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3691 	qdf_atomic_init(&list_entry->scheduled_count);
3692 
3693 	*entry = list_entry;
3694 
3695 	return QDF_STATUS_SUCCESS;
3696 }
3697 
3698 /**
3699  * mgmt_rx_reo_update_wait_count() - Update the wait count for a frame based
3700  * on the wait count of a frame received after that on air.
3701  * @wait_count_old_frame: Pointer to the wait count structure for the old frame.
3702  * @wait_count_new_frame: Pointer to the wait count structure for the new frame.
3703  *
3704  * This API optimizes the wait count of a frame based on the wait count of
3705  * a frame received after that on air. Old frame refers to the frame received
3706  * first on the air and new frame refers to the frame received after that.
3707  * We use the following fundamental idea. Wait counts for old frames can't be
3708  * more than wait counts for the new frame. Use this to optimize the wait count
3709  * for the old frames. Per link wait count of an old frame is minimum of the
3710  * per link wait count of the old frame and new frame.
3711  *
3712  * Return: QDF_STATUS
3713  */
3714 static QDF_STATUS
mgmt_rx_reo_update_wait_count(struct mgmt_rx_reo_wait_count * wait_count_old_frame,const struct mgmt_rx_reo_wait_count * wait_count_new_frame)3715 mgmt_rx_reo_update_wait_count(
3716 		struct mgmt_rx_reo_wait_count *wait_count_old_frame,
3717 		const struct mgmt_rx_reo_wait_count *wait_count_new_frame)
3718 {
3719 	uint8_t link_id;
3720 
3721 	if (!wait_count_old_frame) {
3722 		mgmt_rx_reo_err("Pointer to old frame wait count is null");
3723 		return QDF_STATUS_E_NULL_VALUE;
3724 	}
3725 
3726 	if (!wait_count_new_frame) {
3727 		mgmt_rx_reo_err("Pointer to new frame wait count is null");
3728 		return QDF_STATUS_E_NULL_VALUE;
3729 	}
3730 
3731 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
3732 		if (wait_count_old_frame->per_link_count[link_id]) {
3733 			uint32_t temp_wait_count;
3734 			uint32_t wait_count_diff;
3735 
3736 			temp_wait_count =
3737 				wait_count_old_frame->per_link_count[link_id];
3738 			wait_count_old_frame->per_link_count[link_id] =
3739 				qdf_min(wait_count_old_frame->
3740 					per_link_count[link_id],
3741 					wait_count_new_frame->
3742 					per_link_count[link_id]);
3743 			wait_count_diff = temp_wait_count -
3744 				wait_count_old_frame->per_link_count[link_id];
3745 
3746 			wait_count_old_frame->total_count -= wait_count_diff;
3747 		}
3748 	}
3749 
3750 	return QDF_STATUS_SUCCESS;
3751 }
3752 
3753 /**
3754  * mgmt_rx_reo_update_ingress_list() - Modify the reorder list when a frame is
3755  * received
3756  * @ingress_list: Pointer to ingress list
3757  * @frame_desc: Pointer to frame descriptor
3758  * @new: pointer to the list entry for the current frame
3759  * @is_queued: Whether this frame is queued in the REO list
3760  *
3761  * API to update the reorder list on every management frame reception.
3762  * This API does the following things.
3763  *   a) Update the wait counts for all the frames in the reorder list with
3764  *      global time stamp <= current frame's global time stamp. We use the
3765  *      following principle for updating the wait count in this case.
3766  *      Let A and B be two management frames with global time stamp of A <=
3767  *      global time stamp of B. Let WAi and WBi be the wait count of A and B
3768  *      for link i, then WAi <= WBi. Hence we can optimize WAi as
3769  *      min(WAi, WBi).
3770  *   b) If the current frame is to be consumed by host, insert it in the
3771  *      reorder list such that the list is always sorted in the increasing order
3772  *      of global time stamp. Update the wait count of the current frame based
3773  *      on the frame next to it in the reorder list (if any).
3774  *   c) Update the wait count of the frames in the reorder list with global
3775  *      time stamp > current frame's global time stamp. Let the current frame
3776  *      belong to link "l". Then link "l"'s wait count can be reduced by one for
3777  *      all the frames in the reorder list with global time stamp > current
3778  *      frame's global time stamp.
3779  *
3780  * Return: QDF_STATUS
3781  */
3782 static QDF_STATUS
mgmt_rx_reo_update_ingress_list(struct mgmt_rx_reo_ingress_list * ingress_list,struct mgmt_rx_reo_frame_descriptor * frame_desc,struct mgmt_rx_reo_list_entry * new,bool * is_queued)3783 mgmt_rx_reo_update_ingress_list(struct mgmt_rx_reo_ingress_list *ingress_list,
3784 				struct mgmt_rx_reo_frame_descriptor *frame_desc,
3785 				struct mgmt_rx_reo_list_entry *new,
3786 				bool *is_queued)
3787 {
3788 	struct mgmt_rx_reo_list *reo_ingress_list;
3789 	struct mgmt_rx_reo_list_entry *cur;
3790 	struct mgmt_rx_reo_list_entry *least_greater = NULL;
3791 	bool least_greater_entry_found = false;
3792 	QDF_STATUS status;
3793 	uint16_t list_insertion_pos = 0;
3794 	uint32_t ts_new;
3795 
3796 	if (!ingress_list) {
3797 		mgmt_rx_reo_err("Mgmt Rx reo ingress list is null");
3798 		return QDF_STATUS_E_NULL_VALUE;
3799 	}
3800 	reo_ingress_list = &ingress_list->reo_list;
3801 
3802 	if (!frame_desc) {
3803 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
3804 		return QDF_STATUS_E_NULL_VALUE;
3805 	}
3806 
3807 	if (!(frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3808 	      frame_desc->reo_required) != !new) {
3809 		mgmt_rx_reo_err("Invalid input");
3810 		return QDF_STATUS_E_INVAL;
3811 	}
3812 
3813 	if (!is_queued) {
3814 		mgmt_rx_reo_err("Pointer to queued indication is null");
3815 		return QDF_STATUS_E_NULL_VALUE;
3816 	}
3817 	*is_queued = false;
3818 
3819 	/**
3820 	 * In some cases, the current frame and its associated
3821 	 * rx_params/reo_params may get freed immediately after the frame
3822 	 * is queued to egress list. Hence fetching the global time stamp from
3823 	 * "frame_desc->rx_params->reo_params" could lead to use after free.
3824 	 * Store a copy of "reo_params" in the frame descriptor and access
3825 	 * the copy after the frame is queued to egress list.
3826 	 *
3827 	 * TODO:- Fix this cleanly using refcount mechanism or structure
3828 	 * duplication.
3829 	 */
3830 	ts_new = frame_desc->reo_params_copy.global_timestamp;
3831 
3832 	frame_desc->ingress_list_size_rx =
3833 				qdf_list_size(&reo_ingress_list->list);
3834 
3835 	qdf_list_for_each(&reo_ingress_list->list, cur, node) {
3836 		uint32_t ts_cur;
3837 
3838 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
3839 
3840 		least_greater_entry_found =
3841 		     !mgmt_rx_reo_compare_global_timestamps_gte(ts_new, ts_cur);
3842 		if (least_greater_entry_found) {
3843 			least_greater = cur;
3844 			break;
3845 		}
3846 
3847 		qdf_assert_always(!frame_desc->is_stale || cur->is_parallel_rx);
3848 
3849 		list_insertion_pos++;
3850 
3851 		status = mgmt_rx_reo_update_wait_count(&cur->wait_count,
3852 						       &frame_desc->wait_count);
3853 		if (QDF_IS_STATUS_ERROR(status))
3854 			return status;
3855 
3856 		if (cur->wait_count.total_count == 0)
3857 			cur->status &= ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3858 	}
3859 
3860 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
3861 	    !frame_desc->is_stale && frame_desc->reo_required &&
3862 	    (frame_desc->queued_list != MGMT_RX_REO_LIST_TYPE_EGRESS)) {
3863 		bool overflow;
3864 
3865 		if (least_greater_entry_found) {
3866 			status = mgmt_rx_reo_update_wait_count(
3867 					&new->wait_count,
3868 					&least_greater->wait_count);
3869 
3870 			if (QDF_IS_STATUS_ERROR(status))
3871 				return status;
3872 
3873 			frame_desc->wait_count = new->wait_count;
3874 
3875 			if (new->wait_count.total_count == 0)
3876 				new->status &=
3877 					~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3878 		}
3879 
3880 		new->ingress_list_insertion_ts = qdf_get_log_timestamp();
3881 		new->ingress_timestamp = frame_desc->ingress_timestamp;
3882 		new->is_parallel_rx = frame_desc->is_parallel_rx;
3883 		frame_desc->ingress_list_insertion_pos = list_insertion_pos;
3884 
3885 		if (least_greater_entry_found)
3886 			status = qdf_list_insert_before(
3887 					&reo_ingress_list->list, &new->node,
3888 					&least_greater->node);
3889 		else
3890 			status = qdf_list_insert_back(
3891 					&reo_ingress_list->list, &new->node);
3892 
3893 		if (QDF_IS_STATUS_ERROR(status))
3894 			return status;
3895 
3896 		*is_queued = true;
3897 		frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_INGRESS;
3898 
3899 		overflow = (qdf_list_size(&reo_ingress_list->list) >
3900 					  reo_ingress_list->max_list_size);
3901 		if (overflow) {
3902 			qdf_list_t *ingress_list_ptr = &reo_ingress_list->list;
3903 
3904 			reo_ingress_list->overflow_count++;
3905 			reo_ingress_list->last_overflow_ts =
3906 							qdf_get_log_timestamp();
3907 			mgmt_rx_reo_debug_rl("Ingress ovrflw, cnt:%llu size:%u",
3908 					     reo_ingress_list->overflow_count,
3909 					     qdf_list_size(ingress_list_ptr));
3910 		}
3911 
3912 		if (new->wait_count.total_count == 0)
3913 			frame_desc->zero_wait_count_rx = true;
3914 
3915 		if (frame_desc->zero_wait_count_rx &&
3916 		    qdf_list_first_entry_or_null(&reo_ingress_list->list,
3917 						 struct mgmt_rx_reo_list_entry,
3918 						 node) == new)
3919 			frame_desc->immediate_delivery = true;
3920 	}
3921 
3922 	if (least_greater_entry_found) {
3923 		cur = least_greater;
3924 
3925 		qdf_list_for_each_from(&reo_ingress_list->list, cur, node) {
3926 			uint8_t frame_link_id;
3927 			struct mgmt_rx_reo_wait_count *wait_count;
3928 
3929 			/**
3930 			 * In some cases, the current frame and its associated
3931 			 * rx_params/reo_params may get freed immediately after
3932 			 * the frame is queued to egress list. Hence fetching
3933 			 * the link ID from
3934 			 * "frame_desc->rx_params->reo_params" could lead to
3935 			 * use after free. Store a copy of "reo_params" in the
3936 			 * frame descriptor and access the copy after the frame
3937 			 * is queued to egress list.
3938 			 *
3939 			 * TODO:- Fix this cleanly using refcount mechanism or
3940 			 * structure duplication.
3941 			 */
3942 			frame_link_id = frame_desc->reo_params_copy.link_id;
3943 			wait_count = &cur->wait_count;
3944 			if (wait_count->per_link_count[frame_link_id]) {
3945 				uint32_t old_wait_count;
3946 				uint32_t new_wait_count;
3947 				uint32_t wait_count_diff;
3948 				uint16_t pkt_ctr_delta;
3949 
3950 				pkt_ctr_delta = frame_desc->pkt_ctr_delta;
3951 				old_wait_count =
3952 				      wait_count->per_link_count[frame_link_id];
3953 
3954 				if (old_wait_count >= pkt_ctr_delta)
3955 					new_wait_count = old_wait_count -
3956 							 pkt_ctr_delta;
3957 				else
3958 					new_wait_count = 0;
3959 
3960 				wait_count_diff = old_wait_count -
3961 						  new_wait_count;
3962 
3963 				wait_count->per_link_count[frame_link_id] =
3964 								new_wait_count;
3965 				wait_count->total_count -= wait_count_diff;
3966 
3967 				if (wait_count->total_count == 0)
3968 					cur->status &=
3969 					  ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
3970 			}
3971 		}
3972 	}
3973 
3974 	return QDF_STATUS_SUCCESS;
3975 }
3976 
3977 static QDF_STATUS
mgmt_rx_reo_update_egress_list(struct mgmt_rx_reo_egress_list * egress_list,struct mgmt_rx_reo_frame_descriptor * frame_desc,struct mgmt_rx_reo_list_entry * new,bool * is_queued)3978 mgmt_rx_reo_update_egress_list(struct mgmt_rx_reo_egress_list *egress_list,
3979 			       struct mgmt_rx_reo_frame_descriptor *frame_desc,
3980 			       struct mgmt_rx_reo_list_entry *new,
3981 			       bool *is_queued)
3982 {
3983 	struct mgmt_rx_reo_list *reo_egress_list;
3984 	struct mgmt_rx_reo_list_entry *cur;
3985 	struct mgmt_rx_reo_list_entry *last;
3986 	struct mgmt_rx_reo_list_entry *least_greater = NULL;
3987 	bool least_greater_entry_found = false;
3988 	uint32_t ts_last;
3989 	uint32_t ts_new;
3990 	uint16_t list_insertion_pos = 0;
3991 	QDF_STATUS ret;
3992 
3993 	if (!egress_list) {
3994 		mgmt_rx_reo_err("Mgmt Rx reo egress list is null");
3995 		return QDF_STATUS_E_NULL_VALUE;
3996 	}
3997 	reo_egress_list = &egress_list->reo_list;
3998 
3999 	if (!frame_desc) {
4000 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
4001 		return QDF_STATUS_E_NULL_VALUE;
4002 	}
4003 
4004 	if (!(frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
4005 	      frame_desc->reo_required) != !new) {
4006 		mgmt_rx_reo_err("Invalid input");
4007 		return QDF_STATUS_E_INVAL;
4008 	}
4009 
4010 	if (!is_queued) {
4011 		mgmt_rx_reo_err("Pointer to queued indication is null");
4012 		return QDF_STATUS_E_NULL_VALUE;
4013 	}
4014 	*is_queued = false;
4015 
4016 	ts_new = mgmt_rx_reo_get_global_ts(frame_desc->rx_params);
4017 	frame_desc->egress_list_size_rx = qdf_list_size(&reo_egress_list->list);
4018 
4019 	ret = mgmt_rx_reo_is_stale_frame(&reo_egress_list->last_released_frame,
4020 					 frame_desc);
4021 	if (QDF_IS_STATUS_ERROR(ret))
4022 		return ret;
4023 
4024 	if (frame_desc->is_stale) {
4025 		ret = mgmt_rx_reo_handle_stale_frame(reo_egress_list,
4026 						     frame_desc);
4027 		if (QDF_IS_STATUS_ERROR(ret))
4028 			return ret;
4029 
4030 		qdf_list_for_each(&reo_egress_list->list, cur, node) {
4031 			uint32_t ts_cur;
4032 
4033 			ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
4034 
4035 			if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_new,
4036 								       ts_cur))
4037 				break;
4038 
4039 			qdf_assert_always(cur->is_parallel_rx);
4040 		}
4041 
4042 		return QDF_STATUS_SUCCESS;
4043 	}
4044 
4045 	if (!new)
4046 		return QDF_STATUS_SUCCESS;
4047 
4048 	if (qdf_list_empty(&reo_egress_list->list))
4049 		return QDF_STATUS_SUCCESS;
4050 
4051 	last = qdf_list_last_entry(&reo_egress_list->list,
4052 				   struct mgmt_rx_reo_list_entry, node);
4053 
4054 	ts_last = mgmt_rx_reo_get_global_ts(last->rx_params);
4055 
4056 	if (mgmt_rx_reo_compare_global_timestamps_gte(ts_new, ts_last))
4057 		return QDF_STATUS_SUCCESS;
4058 
4059 	qdf_list_for_each(&reo_egress_list->list, cur, node) {
4060 		uint32_t ts_cur;
4061 
4062 		ts_cur = mgmt_rx_reo_get_global_ts(cur->rx_params);
4063 
4064 		if (!mgmt_rx_reo_compare_global_timestamps_gte(ts_new,
4065 							       ts_cur)) {
4066 			least_greater = cur;
4067 			least_greater_entry_found = true;
4068 			break;
4069 		}
4070 
4071 		list_insertion_pos++;
4072 	}
4073 
4074 	if (!least_greater_entry_found) {
4075 		mgmt_rx_reo_err("Lest greater entry not found");
4076 		return QDF_STATUS_E_FAILURE;
4077 	}
4078 
4079 	ret = mgmt_rx_reo_update_wait_count(&new->wait_count,
4080 					    &least_greater->wait_count);
4081 
4082 	if (QDF_IS_STATUS_ERROR(ret))
4083 		return ret;
4084 
4085 	frame_desc->wait_count = new->wait_count;
4086 
4087 	if (new->wait_count.total_count == 0)
4088 		new->status &= ~STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS;
4089 
4090 	new->egress_list_insertion_ts = qdf_get_log_timestamp();
4091 	new->ingress_timestamp = frame_desc->ingress_timestamp;
4092 	new->is_parallel_rx = frame_desc->is_parallel_rx;
4093 	new->status |= STATUS_OLDER_THAN_READY_TO_DELIVER_FRAMES;
4094 	frame_desc->egress_list_insertion_pos = list_insertion_pos;
4095 
4096 	ret = qdf_list_insert_before(&reo_egress_list->list, &new->node,
4097 				     &least_greater->node);
4098 	if (QDF_IS_STATUS_ERROR(ret))
4099 		return ret;
4100 
4101 	if (mgmt_rx_reo_list_overflowed(reo_egress_list)) {
4102 		ret = mgmt_rx_reo_handle_egress_overflow(reo_egress_list);
4103 		if (QDF_IS_STATUS_ERROR(ret)) {
4104 			mgmt_rx_reo_err("Failed to handle egress overflow");
4105 		}
4106 	}
4107 
4108 	*is_queued = true;
4109 	frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_EGRESS;
4110 
4111 	if (frame_desc->wait_count.total_count == 0)
4112 		frame_desc->zero_wait_count_rx = true;
4113 	frame_desc->immediate_delivery = true;
4114 
4115 	return QDF_STATUS_SUCCESS;
4116 }
4117 
4118 static QDF_STATUS
mgmt_rx_reo_update_lists(struct mgmt_rx_reo_ingress_list * ingress_list,struct mgmt_rx_reo_egress_list * egress_list,struct mgmt_rx_reo_frame_descriptor * frame_desc,bool * is_queued)4119 mgmt_rx_reo_update_lists(struct mgmt_rx_reo_ingress_list *ingress_list,
4120 			 struct mgmt_rx_reo_egress_list *egress_list,
4121 			 struct mgmt_rx_reo_frame_descriptor *frame_desc,
4122 			 bool *is_queued)
4123 {
4124 	struct mgmt_rx_reo_list *reo_ingress_list;
4125 	struct mgmt_rx_reo_list *reo_egress_list;
4126 	bool is_queued_to_ingress_list = false;
4127 	bool is_queued_to_egress_list = false;
4128 	QDF_STATUS status;
4129 	struct mgmt_rx_reo_list_entry *new_entry = NULL;
4130 	enum mgmt_rx_reo_list_type queued_list;
4131 
4132 	if (!ingress_list) {
4133 		mgmt_rx_reo_err("Mgmt Rx reo ingress list is null");
4134 		return QDF_STATUS_E_NULL_VALUE;
4135 	}
4136 	reo_ingress_list = &ingress_list->reo_list;
4137 
4138 	if (!egress_list) {
4139 		mgmt_rx_reo_err("Mgmt Rx reo egress list is null");
4140 		return QDF_STATUS_E_NULL_VALUE;
4141 	}
4142 	reo_egress_list = &egress_list->reo_list;
4143 
4144 	if (!frame_desc) {
4145 		mgmt_rx_reo_err("Mgmt frame descriptor is null");
4146 		return QDF_STATUS_E_NULL_VALUE;
4147 	}
4148 
4149 	if (!is_queued) {
4150 		mgmt_rx_reo_err("Pointer to queued indication is null");
4151 		return QDF_STATUS_E_NULL_VALUE;
4152 	}
4153 	*is_queued = false;
4154 
4155 	/* Prepare the list entry before acquiring lock */
4156 	if (frame_desc->type == MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME &&
4157 	    frame_desc->reo_required) {
4158 		status = mgmt_rx_reo_prepare_list_entry(frame_desc, &new_entry);
4159 		if (QDF_IS_STATUS_ERROR(status)) {
4160 			mgmt_rx_reo_err("Failed to prepare list entry");
4161 			return QDF_STATUS_E_FAILURE;
4162 		}
4163 	}
4164 
4165 	qdf_spin_lock_bh(&reo_ingress_list->list_lock);
4166 
4167 	qdf_spin_lock_bh(&reo_egress_list->list_lock);
4168 
4169 	status = mgmt_rx_reo_update_egress_list(egress_list, frame_desc,
4170 						new_entry,
4171 						&is_queued_to_egress_list);
4172 	if (QDF_IS_STATUS_ERROR(status)) {
4173 		mgmt_rx_reo_err("Egress list update failed");
4174 		goto exit_release_egress_list_lock;
4175 	}
4176 
4177 	status = mgmt_rx_reo_check_sanity_list(reo_egress_list);
4178 	if (QDF_IS_STATUS_ERROR(status)) {
4179 		mgmt_rx_reo_err("Sanity check of egress list failed");
4180 		goto exit_release_egress_list_lock;
4181 	}
4182 
4183 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
4184 
4185 	status = mgmt_rx_reo_update_ingress_list(ingress_list, frame_desc,
4186 						 new_entry,
4187 						 &is_queued_to_ingress_list);
4188 	if (QDF_IS_STATUS_ERROR(status)) {
4189 		mgmt_rx_reo_err("Ingress list update failed");
4190 		goto exit_release_ingress_list_lock;
4191 	}
4192 
4193 	status = mgmt_rx_reo_check_sanity_list(reo_ingress_list);
4194 	if (QDF_IS_STATUS_ERROR(status)) {
4195 		mgmt_rx_reo_err("Sanity check of ingress list failed");
4196 		goto exit_release_ingress_list_lock;
4197 	}
4198 
4199 	status = QDF_STATUS_SUCCESS;
4200 	goto exit_release_ingress_list_lock;
4201 
4202 exit_release_egress_list_lock:
4203 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
4204 exit_release_ingress_list_lock:
4205 	qdf_spin_unlock_bh(&reo_ingress_list->list_lock);
4206 
4207 	if (is_queued_to_ingress_list && is_queued_to_egress_list)
4208 		mgmt_rx_reo_err("Frame is queued to ingress and egress lists");
4209 
4210 	*is_queued = is_queued_to_ingress_list || is_queued_to_egress_list;
4211 
4212 	queued_list = frame_desc->queued_list;
4213 	if (*is_queued && queued_list == MGMT_RX_REO_LIST_TYPE_INVALID)
4214 		mgmt_rx_reo_err("Invalid queued list type %d", queued_list);
4215 
4216 	if (!new_entry && *is_queued)
4217 		mgmt_rx_reo_err("Queued an invalid frame");
4218 
4219 	/* Cleanup the entry if it is not queued */
4220 	if (new_entry && !*is_queued) {
4221 		/**
4222 		 * New entry created is not inserted to reorder list, free
4223 		 * the entry and release the reference
4224 		 */
4225 		wlan_objmgr_pdev_release_ref(new_entry->pdev,
4226 					     WLAN_MGMT_RX_REO_ID);
4227 		qdf_mem_free(new_entry);
4228 	}
4229 
4230 	return status;
4231 }
4232 
4233 /**
4234  * mgmt_rx_reo_ingress_list_init() - Initialize the management rx-reorder
4235  * ingress list
4236  * @ingress_list: Pointer to ingress list
4237  *
4238  * API to initialize the management rx-reorder ingress list.
4239  *
4240  * Return: QDF_STATUS
4241  */
4242 static QDF_STATUS
mgmt_rx_reo_ingress_list_init(struct mgmt_rx_reo_ingress_list * ingress_list)4243 mgmt_rx_reo_ingress_list_init(struct mgmt_rx_reo_ingress_list *ingress_list)
4244 {
4245 	QDF_STATUS status;
4246 	struct mgmt_rx_reo_list *reo_ingress_list;
4247 
4248 	if (!ingress_list) {
4249 		mgmt_rx_reo_err("Ingress list is null");
4250 		return QDF_STATUS_E_NULL_VALUE;
4251 	}
4252 
4253 	reo_ingress_list = &ingress_list->reo_list;
4254 
4255 	reo_ingress_list->max_list_size = MGMT_RX_REO_INGRESS_LIST_MAX_SIZE;
4256 	qdf_list_create(&reo_ingress_list->list,
4257 			reo_ingress_list->max_list_size);
4258 	qdf_spinlock_create(&reo_ingress_list->list_lock);
4259 	qdf_mem_zero(&reo_ingress_list->last_inserted_frame,
4260 		     sizeof(reo_ingress_list->last_inserted_frame));
4261 	qdf_mem_zero(&reo_ingress_list->last_released_frame,
4262 		     sizeof(reo_ingress_list->last_released_frame));
4263 
4264 	ingress_list->list_entry_timeout_us =
4265 					MGMT_RX_REO_INGRESS_LIST_TIMEOUT_US;
4266 
4267 	status = qdf_timer_init(NULL, &ingress_list->ageout_timer,
4268 				mgmt_rx_reo_ingress_list_ageout_timer_handler,
4269 				ingress_list, QDF_TIMER_TYPE_WAKE_APPS);
4270 	if (QDF_IS_STATUS_ERROR(status)) {
4271 		mgmt_rx_reo_err("Failed to initialize ingress ageout timer");
4272 		return status;
4273 	}
4274 	qdf_timer_start(&ingress_list->ageout_timer,
4275 			MGMT_RX_REO_INGRESS_LIST_AGEOUT_TIMER_PERIOD_MS);
4276 
4277 	return QDF_STATUS_SUCCESS;
4278 }
4279 
4280 /**
4281  * mgmt_rx_reo_egress_list_init() - Initialize the management rx-reorder
4282  * egress list
4283  * @egress_list: Pointer to egress list
4284  *
4285  * API to initialize the management rx-reorder egress list.
4286  *
4287  * Return: QDF_STATUS
4288  */
4289 static QDF_STATUS
mgmt_rx_reo_egress_list_init(struct mgmt_rx_reo_egress_list * egress_list)4290 mgmt_rx_reo_egress_list_init(struct mgmt_rx_reo_egress_list *egress_list)
4291 {
4292 	struct mgmt_rx_reo_list *reo_egress_list;
4293 	QDF_STATUS status;
4294 
4295 	if (!egress_list) {
4296 		mgmt_rx_reo_err("Egress list is null");
4297 		return QDF_STATUS_E_NULL_VALUE;
4298 	}
4299 
4300 	reo_egress_list = &egress_list->reo_list;
4301 
4302 	reo_egress_list->max_list_size = MGMT_RX_REO_EGRESS_LIST_MAX_SIZE;
4303 	qdf_list_create(&reo_egress_list->list, reo_egress_list->max_list_size);
4304 	qdf_spinlock_create(&reo_egress_list->list_lock);
4305 	qdf_mem_zero(&reo_egress_list->last_inserted_frame,
4306 		     sizeof(reo_egress_list->last_inserted_frame));
4307 	qdf_mem_zero(&reo_egress_list->last_released_frame,
4308 		     sizeof(reo_egress_list->last_released_frame));
4309 
4310 	status = qdf_timer_init(NULL, &egress_list->egress_inactivity_timer,
4311 				mgmt_rx_reo_egress_inactivity_timer_handler,
4312 				egress_list, QDF_TIMER_TYPE_WAKE_APPS);
4313 	if (QDF_IS_STATUS_ERROR(status)) {
4314 		mgmt_rx_reo_err("Failed to initialize egress inactivity timer");
4315 		return status;
4316 	}
4317 
4318 	return QDF_STATUS_SUCCESS;
4319 }
4320 
4321 /**
4322  * check_frame_sanity() - Check the sanity of a given management frame
4323  * @pdev: Pointer to pdev object
4324  * @desc: Pointer to frame descriptor
4325  *
4326  * API to check the sanity of a given management frame. This API checks for the
4327  * following errors.
4328  *
4329  *     1. Invalid management rx reo parameters
4330  *     2. Host consumed management frames with zero duration
4331  *
4332  * Return: QDF_STATUS
4333  */
4334 static QDF_STATUS
check_frame_sanity(struct wlan_objmgr_pdev * pdev,struct mgmt_rx_reo_frame_descriptor * desc)4335 check_frame_sanity(struct wlan_objmgr_pdev *pdev,
4336 		   struct mgmt_rx_reo_frame_descriptor *desc)
4337 {
4338 	QDF_STATUS status;
4339 
4340 	if (!desc) {
4341 		mgmt_rx_reo_err("Frame descriptor is null");
4342 		return QDF_STATUS_E_NULL_VALUE;
4343 	}
4344 
4345 	status = check_and_handle_invalid_reo_params(desc);
4346 	if (QDF_IS_STATUS_ERROR(status)) {
4347 		mgmt_rx_reo_warn_rl("Drop frame with invalid reo params");
4348 		return status;
4349 	}
4350 
4351 	status = check_and_handle_zero_frame_duration(pdev, desc);
4352 	if (QDF_IS_STATUS_ERROR(status)) {
4353 		mgmt_rx_reo_warn_rl("Drop frame with zero duration");
4354 		return status;
4355 	}
4356 
4357 	return QDF_STATUS_SUCCESS;
4358 }
4359 
4360 /**
4361  * wlan_mgmt_rx_reo_update_host_snapshot() - Update Host snapshot with the MGMT
4362  * Rx REO parameters.
4363  * @pdev: pdev extracted from the WMI event
4364  * @desc: pointer to frame descriptor
4365  *
4366  * Return: QDF_STATUS of operation
4367  */
4368 static QDF_STATUS
wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev * pdev,struct mgmt_rx_reo_frame_descriptor * desc)4369 wlan_mgmt_rx_reo_update_host_snapshot(struct wlan_objmgr_pdev *pdev,
4370 				      struct mgmt_rx_reo_frame_descriptor *desc)
4371 {
4372 	struct mgmt_rx_reo_pdev_info *rx_reo_pdev_ctx;
4373 	struct mgmt_rx_reo_snapshot_params *host_ss;
4374 	struct mgmt_rx_reo_params *reo_params;
4375 	int pkt_ctr_delta;
4376 	struct wlan_objmgr_psoc *psoc;
4377 	uint16_t pkt_ctr_delta_thresh;
4378 
4379 	if (!desc) {
4380 		mgmt_rx_reo_err("Mgmt Rx REO frame descriptor null");
4381 		return QDF_STATUS_E_NULL_VALUE;
4382 	}
4383 
4384 	if (!desc->rx_params) {
4385 		mgmt_rx_reo_err("Mgmt Rx params null");
4386 		return QDF_STATUS_E_NULL_VALUE;
4387 	}
4388 
4389 	reo_params = desc->rx_params->reo_params;
4390 	if (!reo_params) {
4391 		mgmt_rx_reo_err("Mgmt Rx REO params NULL");
4392 		return QDF_STATUS_E_NULL_VALUE;
4393 	}
4394 
4395 	rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
4396 	if (!rx_reo_pdev_ctx) {
4397 		mgmt_rx_reo_err("Mgmt Rx REO context empty for pdev %pK", pdev);
4398 		return QDF_STATUS_E_FAILURE;
4399 	}
4400 
4401 	psoc = wlan_pdev_get_psoc(pdev);
4402 
4403 	/* FW should send valid REO parameters */
4404 	if (!reo_params->valid) {
4405 		mgmt_rx_reo_err("Mgmt Rx REO params is invalid");
4406 		return QDF_STATUS_E_FAILURE;
4407 	}
4408 
4409 	host_ss = &rx_reo_pdev_ctx->host_snapshot;
4410 
4411 	if (!host_ss->valid) {
4412 		desc->pkt_ctr_delta = 1;
4413 		goto update_host_ss;
4414 	}
4415 
4416 	if (mgmt_rx_reo_compare_pkt_ctrs_gte(host_ss->mgmt_pkt_ctr,
4417 					     reo_params->mgmt_pkt_ctr)) {
4418 		QDF_STATUS status;
4419 
4420 		status = handle_out_of_order_pkt_ctr(desc, host_ss);
4421 		if (QDF_IS_STATUS_ERROR(status)) {
4422 			mgmt_rx_reo_err_rl("Failed to handle out of order pkt");
4423 			goto failure_debug;
4424 		}
4425 
4426 		mgmt_rx_reo_warn_rl("Drop frame with out of order pkt ctr");
4427 	}
4428 
4429 	pkt_ctr_delta = mgmt_rx_reo_subtract_pkt_ctrs(reo_params->mgmt_pkt_ctr,
4430 						      host_ss->mgmt_pkt_ctr);
4431 	desc->pkt_ctr_delta = pkt_ctr_delta;
4432 
4433 	if (pkt_ctr_delta == 1)
4434 		goto update_host_ss;
4435 
4436 	/*
4437 	 * Under back pressure scenarios, FW may drop management Rx frame
4438 	 * WMI events. So holes in the management packet counter is expected.
4439 	 * Add a debug print and optional assert to track the holes.
4440 	 */
4441 	mgmt_rx_reo_debug("pkt_ctr_delta = %d, link = %u", pkt_ctr_delta,
4442 			  reo_params->link_id);
4443 	mgmt_rx_reo_debug("Cur frame valid = %u, pkt_ctr = %u, ts = %u",
4444 			  reo_params->valid, reo_params->mgmt_pkt_ctr,
4445 			  reo_params->global_timestamp);
4446 	mgmt_rx_reo_debug("Last frame valid = %u, pkt_ctr = %u, ts = %u",
4447 			  host_ss->valid, host_ss->mgmt_pkt_ctr,
4448 			  host_ss->global_timestamp);
4449 
4450 	pkt_ctr_delta_thresh = wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh(psoc);
4451 
4452 	if (pkt_ctr_delta_thresh && pkt_ctr_delta > pkt_ctr_delta_thresh) {
4453 		mgmt_rx_reo_err("pkt ctr delta %u > thresh %u for link %u",
4454 				pkt_ctr_delta, pkt_ctr_delta_thresh,
4455 				reo_params->link_id);
4456 		goto failure_debug;
4457 	}
4458 
4459 update_host_ss:
4460 	host_ss->valid = true;
4461 	host_ss->global_timestamp = reo_params->global_timestamp;
4462 	host_ss->mgmt_pkt_ctr = reo_params->mgmt_pkt_ctr;
4463 
4464 	return QDF_STATUS_SUCCESS;
4465 
4466 failure_debug:
4467 	mgmt_rx_reo_err("Cur Pkt valid = %u, pkt_ctr = %u, ts = %u, link = %u",
4468 			reo_params->valid, reo_params->mgmt_pkt_ctr,
4469 			reo_params->global_timestamp, reo_params->link_id);
4470 	mgmt_rx_reo_err("Last Pkt valid = %u, pkt_ctr = %u, ts = %u",
4471 			host_ss->valid, host_ss->mgmt_pkt_ctr,
4472 			host_ss->global_timestamp);
4473 	mgmt_rx_reo_err("Triggering self recovery, out of order pkt");
4474 	qdf_trigger_self_recovery(psoc, QDF_MGMT_RX_REO_OUT_OF_ORDER_PKT);
4475 
4476 	return QDF_STATUS_E_FAILURE;
4477 }
4478 
4479 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
4480 /**
4481  * mgmt_rx_reo_ingress_frame_debug_info_enabled() - API to check whether ingress
4482  * frame info debug feaure is enabled
4483  * @ingress_frame_debug_info: Pointer to ingress frame debug info object
4484  *
4485  * Return: true or false
4486  */
4487 static bool
mgmt_rx_reo_ingress_frame_debug_info_enabled(struct reo_ingress_debug_info * ingress_frame_debug_info)4488 mgmt_rx_reo_ingress_frame_debug_info_enabled
4489 		(struct reo_ingress_debug_info *ingress_frame_debug_info)
4490 {
4491 	return ingress_frame_debug_info->frame_list_size;
4492 }
4493 
4494 /**
4495  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
4496  * related to frames going into the reorder module
4497  * @reo_ctx: Pointer to reorder context
4498  *
4499  * API to print the stats related to frames going into the management
4500  * Rx reorder module.
4501  *
4502  * Return: QDF_STATUS
4503  */
4504 static QDF_STATUS
mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context * reo_ctx)4505 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
4506 {
4507 	struct reo_ingress_frame_stats *stats;
4508 	uint8_t link_id;
4509 	uint8_t desc_type;
4510 	uint8_t reason;
4511 	uint8_t list_type;
4512 	uint64_t ingress_count_per_link[MAX_MLO_LINKS] = {0};
4513 	uint64_t ingress_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
4514 	uint64_t total_ingress_count = 0;
4515 	uint64_t reo_count_per_link[MAX_MLO_LINKS] = {0};
4516 	uint64_t reo_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
4517 	uint64_t total_reo_count = 0;
4518 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
4519 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
4520 	uint64_t total_stale_count = 0;
4521 	uint64_t parallel_rx_count_per_link[MAX_MLO_LINKS] = {0};
4522 	uint64_t parallel_rx_per_desc[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
4523 	uint64_t total_parallel_rx_count = 0;
4524 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
4525 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
4526 	uint64_t total_error_count = 0;
4527 	uint64_t drop_count_per_link[MAX_MLO_LINKS] = {0};
4528 	uint64_t drop_count_per_reason[MGMT_RX_REO_INGRESS_DROP_REASON_MAX] = {0};
4529 	uint64_t total_drop_count = 0;
4530 	uint64_t total_missing_count = 0;
4531 	uint64_t total_queued = 0;
4532 	uint64_t queued_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
4533 	uint64_t queued_per_link[MAX_MLO_LINKS] = {0};
4534 	uint64_t total_zero_wait_count_rx = 0;
4535 	uint64_t zero_wait_count_rx_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
4536 	uint64_t zero_wait_count_rx_per_link[MAX_MLO_LINKS] = {0};
4537 	uint64_t total_immediate_delivery = 0;
4538 	uint64_t immediate_delivery_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
4539 	uint64_t immediate_delivery_per_link[MAX_MLO_LINKS] = {0};
4540 
4541 	if (!reo_ctx)
4542 		return QDF_STATUS_E_NULL_VALUE;
4543 
4544 	stats = &reo_ctx->ingress_frame_debug_info.stats;
4545 
4546 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4547 		for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
4548 		     desc_type++) {
4549 			ingress_count_per_link[link_id] +=
4550 				stats->ingress_count[link_id][desc_type];
4551 			reo_count_per_link[link_id] +=
4552 				stats->reo_count[link_id][desc_type];
4553 			stale_count_per_link[link_id] +=
4554 					stats->stale_count[link_id][desc_type];
4555 			error_count_per_link[link_id] +=
4556 					stats->error_count[link_id][desc_type];
4557 			parallel_rx_count_per_link[link_id] +=
4558 				   stats->parallel_rx_count[link_id][desc_type];
4559 		}
4560 
4561 		total_ingress_count += ingress_count_per_link[link_id];
4562 		total_reo_count += reo_count_per_link[link_id];
4563 		total_stale_count += stale_count_per_link[link_id];
4564 		total_error_count += error_count_per_link[link_id];
4565 		total_parallel_rx_count += parallel_rx_count_per_link[link_id];
4566 		total_missing_count += stats->missing_count[link_id];
4567 	}
4568 
4569 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
4570 	     desc_type++) {
4571 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4572 			ingress_count_per_desc_type[desc_type] +=
4573 				stats->ingress_count[link_id][desc_type];
4574 			reo_count_per_desc_type[desc_type] +=
4575 				stats->reo_count[link_id][desc_type];
4576 			stale_count_per_desc_type[desc_type] +=
4577 					stats->stale_count[link_id][desc_type];
4578 			error_count_per_desc_type[desc_type] +=
4579 					stats->error_count[link_id][desc_type];
4580 			parallel_rx_per_desc[desc_type] +=
4581 				stats->parallel_rx_count[link_id][desc_type];
4582 		}
4583 	}
4584 
4585 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4586 		for (list_type = 0; list_type < MGMT_RX_REO_LIST_TYPE_MAX;
4587 		     list_type++) {
4588 			queued_per_link[link_id] +=
4589 				stats->queued_count[link_id][list_type];
4590 			zero_wait_count_rx_per_link[link_id] +=
4591 			    stats->zero_wait_count_rx_count[link_id][list_type];
4592 			immediate_delivery_per_link[link_id] +=
4593 			    stats->immediate_delivery_count[link_id][list_type];
4594 		}
4595 
4596 		total_queued += queued_per_link[link_id];
4597 		total_zero_wait_count_rx +=
4598 					zero_wait_count_rx_per_link[link_id];
4599 		total_immediate_delivery +=
4600 					immediate_delivery_per_link[link_id];
4601 	}
4602 
4603 	for (list_type = 0; list_type < MGMT_RX_REO_LIST_TYPE_MAX;
4604 	     list_type++) {
4605 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4606 			queued_per_list[list_type] +=
4607 				stats->queued_count[link_id][list_type];
4608 			zero_wait_count_rx_per_list[list_type] +=
4609 			    stats->zero_wait_count_rx_count[link_id][list_type];
4610 			immediate_delivery_per_list[list_type] +=
4611 			    stats->immediate_delivery_count[link_id][list_type];
4612 		}
4613 	}
4614 
4615 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4616 		for (reason = 0; reason < MGMT_RX_REO_INGRESS_DROP_REASON_MAX;
4617 		     reason++) {
4618 			drop_count_per_link[link_id] +=
4619 					stats->drop_count[link_id][reason];
4620 		}
4621 		total_drop_count += drop_count_per_link[link_id];
4622 	}
4623 
4624 	for (reason = 0; reason < MGMT_RX_REO_INGRESS_DROP_REASON_MAX;
4625 	     reason++) {
4626 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4627 			drop_count_per_reason[reason] +=
4628 					stats->drop_count[link_id][reason];
4629 		}
4630 	}
4631 
4632 	mgmt_rx_reo_alert("Ingress Frame Stats:");
4633 	mgmt_rx_reo_alert("\t1) Ingress Frame Count:");
4634 	mgmt_rx_reo_alert("\tDescriptor Type Values:-");
4635 	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_FRAME_DESC_HOST_CONSUMED_FRAME");
4636 	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_FRAME_DESC_FW_CONSUMED_FRAME");
4637 	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_FRAME_DESC_ERROR_FRAME");
4638 	mgmt_rx_reo_alert("\t------------------------------------");
4639 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4640 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4641 	mgmt_rx_reo_alert("\t-------------------------------------------");
4642 
4643 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4644 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4645 				  stats->ingress_count[link_id][0],
4646 				  stats->ingress_count[link_id][1],
4647 				  stats->ingress_count[link_id][2],
4648 				  ingress_count_per_link[link_id]);
4649 		mgmt_rx_reo_alert("\t-------------------------------------------");
4650 	}
4651 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4652 			  ingress_count_per_desc_type[0],
4653 			  ingress_count_per_desc_type[1],
4654 			  ingress_count_per_desc_type[2],
4655 			  total_ingress_count);
4656 
4657 	mgmt_rx_reo_alert("\t2) Reo required Frame Count:");
4658 	mgmt_rx_reo_alert("\t------------------------------------");
4659 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4660 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4661 	mgmt_rx_reo_alert("\t-------------------------------------------");
4662 
4663 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4664 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4665 				  stats->reo_count[link_id][0],
4666 				  stats->reo_count[link_id][1],
4667 				  stats->reo_count[link_id][2],
4668 				  reo_count_per_link[link_id]);
4669 		mgmt_rx_reo_alert("\t-------------------------------------------");
4670 	}
4671 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4672 			  reo_count_per_desc_type[0],
4673 			  reo_count_per_desc_type[1],
4674 			  reo_count_per_desc_type[2],
4675 			  total_reo_count);
4676 
4677 	mgmt_rx_reo_alert("\t3) Stale Frame Count:");
4678 	mgmt_rx_reo_alert("\t------------------------------------");
4679 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4680 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4681 	mgmt_rx_reo_alert("\t-------------------------------------------");
4682 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4683 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4684 				  stats->stale_count[link_id][0],
4685 				  stats->stale_count[link_id][1],
4686 				  stats->stale_count[link_id][2],
4687 				  stale_count_per_link[link_id]);
4688 		mgmt_rx_reo_alert("\t-------------------------------------------");
4689 	}
4690 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4691 			  stale_count_per_desc_type[0],
4692 			  stale_count_per_desc_type[1],
4693 			  stale_count_per_desc_type[2],
4694 			  total_stale_count);
4695 
4696 	mgmt_rx_reo_alert("\t4) Parallel rx Frame Count:");
4697 	mgmt_rx_reo_alert("\t------------------------------------");
4698 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4699 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4700 	mgmt_rx_reo_alert("\t-------------------------------------------");
4701 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4702 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4703 				  stats->parallel_rx_count[link_id][0],
4704 				  stats->parallel_rx_count[link_id][1],
4705 				  stats->parallel_rx_count[link_id][2],
4706 				  parallel_rx_count_per_link[link_id]);
4707 		mgmt_rx_reo_alert("\t-------------------------------------------");
4708 	}
4709 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4710 			  parallel_rx_per_desc[0], parallel_rx_per_desc[1],
4711 			  parallel_rx_per_desc[2], total_parallel_rx_count);
4712 
4713 	mgmt_rx_reo_alert("\t5) Error Frame Count:");
4714 	mgmt_rx_reo_alert("\t------------------------------------");
4715 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
4716 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
4717 	mgmt_rx_reo_alert("\t-------------------------------------------");
4718 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4719 		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
4720 				  stats->error_count[link_id][0],
4721 				  stats->error_count[link_id][1],
4722 				  stats->error_count[link_id][2],
4723 				  error_count_per_link[link_id]);
4724 		mgmt_rx_reo_alert("\t-------------------------------------------");
4725 	}
4726 	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
4727 			  error_count_per_desc_type[0],
4728 			  error_count_per_desc_type[1],
4729 			  error_count_per_desc_type[2],
4730 			  total_error_count);
4731 
4732 	mgmt_rx_reo_alert("\t6) Drop Frame Count:");
4733 	mgmt_rx_reo_alert("\t--------------------------------------------");
4734 	mgmt_rx_reo_alert("\t|link/|    |    |    |    |    |    |");
4735 	mgmt_rx_reo_alert("\t|reas.|   0|   1|   2|   3|   4|   5|");
4736 	mgmt_rx_reo_alert("\t--------------------------------------------");
4737 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4738 		mgmt_rx_reo_alert("\t|%5u|%4llu|%4llu|%4llu|%4llu|%4llu|%4llu|%7llu",
4739 				  link_id, stats->drop_count[link_id][0],
4740 				  stats->drop_count[link_id][1],
4741 				  stats->drop_count[link_id][2],
4742 				  stats->drop_count[link_id][3],
4743 				  stats->drop_count[link_id][4],
4744 				  stats->drop_count[link_id][5],
4745 				  drop_count_per_link[link_id]);
4746 		mgmt_rx_reo_alert("\t--------------------------------------------");
4747 	}
4748 	mgmt_rx_reo_alert("\t%6s|%4llu|%4llu|%4llu|%4llu|%4llu|%4llu|%7llu\n\n",
4749 			  "", drop_count_per_reason[0],
4750 			  drop_count_per_reason[1], drop_count_per_reason[2],
4751 			  drop_count_per_reason[3], drop_count_per_reason[4],
4752 			  drop_count_per_reason[5], total_drop_count);
4753 
4754 	mgmt_rx_reo_alert("\t7) Per link stats:");
4755 	mgmt_rx_reo_alert("\t----------------------------");
4756 	mgmt_rx_reo_alert("\t|link id   | Missing frame |");
4757 	mgmt_rx_reo_alert("\t|          |     count     |");
4758 	mgmt_rx_reo_alert("\t----------------------------");
4759 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4760 		mgmt_rx_reo_alert("\t|%10u|%15llu|", link_id,
4761 				  stats->missing_count[link_id]);
4762 		mgmt_rx_reo_alert("\t----------------------------");
4763 	}
4764 	mgmt_rx_reo_alert("\t%11s|%15llu|\n\n", "", total_missing_count);
4765 
4766 	mgmt_rx_reo_alert("\t8) Host consumed frames related stats:");
4767 	mgmt_rx_reo_alert("\tOverall:");
4768 	mgmt_rx_reo_alert("\t------------------------------------------------");
4769 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
4770 	mgmt_rx_reo_alert("\t|          |    count    |  count   | delivery |");
4771 	mgmt_rx_reo_alert("\t------------------------------------------------");
4772 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4773 		mgmt_rx_reo_alert("\t|%10u|%13llu|%10llu|%10llu|", link_id,
4774 				  queued_per_link[link_id],
4775 				  zero_wait_count_rx_per_link[link_id],
4776 				  immediate_delivery_per_link[link_id]);
4777 		mgmt_rx_reo_alert("\t------------------------------------------------");
4778 	}
4779 	mgmt_rx_reo_alert("\t%11s|%13llu|%10llu|%10llu|\n\n", "",
4780 			  total_queued,
4781 			  total_zero_wait_count_rx,
4782 			  total_immediate_delivery);
4783 
4784 	mgmt_rx_reo_alert("\t\ta) Ingress List:");
4785 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4786 	mgmt_rx_reo_alert("\t\t|link id   |Queued frame |Zero wait |Immediate |");
4787 	mgmt_rx_reo_alert("\t\t|          |    count    |  count   | delivery |");
4788 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4789 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4790 		mgmt_rx_reo_alert("\t\t|%10u|%13llu|%10llu|%10llu|", link_id,
4791 				  stats->queued_count[link_id][0],
4792 				  stats->zero_wait_count_rx_count[link_id][0],
4793 				  stats->immediate_delivery_count[link_id][0]);
4794 		mgmt_rx_reo_alert("\t\t------------------------------------------------");
4795 	}
4796 	mgmt_rx_reo_alert("\t\t%11s|%13llu|%10llu|%10llu|\n\n", "",
4797 			  queued_per_list[0],
4798 			  zero_wait_count_rx_per_list[0],
4799 			  immediate_delivery_per_list[0]);
4800 
4801 	mgmt_rx_reo_alert("\t\tb) Egress List:");
4802 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4803 	mgmt_rx_reo_alert("\t\t|link id   |Queued frame |Zero wait |Immediate |");
4804 	mgmt_rx_reo_alert("\t\t|          |    count    |  count   | delivery |");
4805 	mgmt_rx_reo_alert("\t\t------------------------------------------------");
4806 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
4807 		mgmt_rx_reo_alert("\t\t|%10u|%13llu|%10llu|%10llu|", link_id,
4808 				  stats->queued_count[link_id][1],
4809 				  stats->zero_wait_count_rx_count[link_id][1],
4810 				  stats->immediate_delivery_count[link_id][1]);
4811 		mgmt_rx_reo_alert("\t\t------------------------------------------------");
4812 	}
4813 	mgmt_rx_reo_alert("\t\t%11s|%13llu|%10llu|%10llu|\n\n", "",
4814 			  queued_per_list[1],
4815 			  zero_wait_count_rx_per_list[1],
4816 			  immediate_delivery_per_list[1]);
4817 
4818 	mgmt_rx_reo_alert("\t9) Misc stats:");
4819 	mgmt_rx_reo_alert("\t\tIngress list overflow count = %llu\n\n",
4820 			  reo_ctx->ingress_list.reo_list.overflow_count);
4821 
4822 	return QDF_STATUS_SUCCESS;
4823 }
4824 
4825 /**
4826  * log_ingress_frame_entry() - Log the information about a frame at the start
4827  * of incoming frame processing
4828  * @reo_ctx: management rx reorder context
4829  * @desc: Pointer to frame descriptor
4830  *
4831  * Return: QDF_STATUS of operation
4832  */
4833 static QDF_STATUS
log_ingress_frame_entry(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_frame_descriptor * desc)4834 log_ingress_frame_entry(struct mgmt_rx_reo_context *reo_ctx,
4835 			struct mgmt_rx_reo_frame_descriptor *desc)
4836 {
4837 	struct reo_ingress_debug_info *ingress_frame_debug_info;
4838 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
4839 
4840 	if (!reo_ctx || !desc)
4841 		return QDF_STATUS_E_NULL_VALUE;
4842 
4843 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
4844 
4845 	if (!mgmt_rx_reo_ingress_frame_debug_info_enabled
4846 						(ingress_frame_debug_info))
4847 		return QDF_STATUS_SUCCESS;
4848 
4849 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
4850 			[ingress_frame_debug_info->next_index];
4851 
4852 	cur_frame_debug_info->link_id =
4853 				mgmt_rx_reo_get_link_id(desc->rx_params);
4854 	cur_frame_debug_info->mgmt_pkt_ctr =
4855 				mgmt_rx_reo_get_pkt_counter(desc->rx_params);
4856 	cur_frame_debug_info->global_timestamp =
4857 				mgmt_rx_reo_get_global_ts(desc->rx_params);
4858 	cur_frame_debug_info->start_timestamp =
4859 				mgmt_rx_reo_get_start_ts(desc->rx_params);
4860 	cur_frame_debug_info->end_timestamp =
4861 				mgmt_rx_reo_get_end_ts(desc->rx_params);
4862 	cur_frame_debug_info->duration_us =
4863 				mgmt_rx_reo_get_duration_us(desc->rx_params);
4864 	cur_frame_debug_info->desc_type = desc->type;
4865 	cur_frame_debug_info->frame_type = desc->frame_type;
4866 	cur_frame_debug_info->frame_subtype = desc->frame_subtype;
4867 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
4868 	cur_frame_debug_info->reo_required = desc->reo_required;
4869 
4870 	return QDF_STATUS_SUCCESS;
4871 }
4872 
4873 /**
4874  * log_ingress_frame_exit() - Log the information about a frame at the end of
4875  * incoming frame processing
4876  * @reo_ctx: management rx reorder context
4877  * @desc: Pointer to frame descriptor
4878  * @is_queued: Indicates whether this frame is queued to reorder list
4879  * @is_error: Indicates whether any error occurred during processing this frame
4880  * @context_id: context identifier
4881  * @link_id: Link ID
4882  *
4883  * Return: QDF_STATUS of operation
4884  */
4885 static QDF_STATUS
log_ingress_frame_exit(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_frame_descriptor * desc,bool is_queued,bool is_error,int32_t context_id,uint8_t link_id)4886 log_ingress_frame_exit(struct mgmt_rx_reo_context *reo_ctx,
4887 		       struct mgmt_rx_reo_frame_descriptor *desc,
4888 		       bool is_queued, bool is_error,
4889 		       int32_t context_id, uint8_t link_id)
4890 {
4891 	struct reo_ingress_debug_info *ingress_frame_debug_info;
4892 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
4893 	struct reo_ingress_frame_stats *stats;
4894 	enum mgmt_rx_reo_list_type queued_list;
4895 
4896 	if (!reo_ctx || !desc)
4897 		return QDF_STATUS_E_NULL_VALUE;
4898 
4899 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
4900 
4901 	stats = &ingress_frame_debug_info->stats;
4902 	queued_list = desc->queued_list;
4903 	stats->ingress_count[link_id][desc->type]++;
4904 	if (desc->reo_required)
4905 		stats->reo_count[link_id][desc->type]++;
4906 	if (is_queued)
4907 		stats->queued_count[link_id][queued_list]++;
4908 	if (desc->zero_wait_count_rx)
4909 		stats->zero_wait_count_rx_count[link_id][queued_list]++;
4910 	if (desc->immediate_delivery)
4911 		stats->immediate_delivery_count[link_id][queued_list]++;
4912 	if (is_error)
4913 		stats->error_count[link_id][desc->type]++;
4914 	if (desc->is_stale)
4915 		stats->stale_count[link_id][desc->type]++;
4916 	if (desc->pkt_ctr_delta > 1)
4917 		stats->missing_count[link_id] += desc->pkt_ctr_delta - 1;
4918 	if (desc->is_parallel_rx)
4919 		stats->parallel_rx_count[link_id][desc->type]++;
4920 	if (desc->drop)
4921 		stats->drop_count[link_id][desc->drop_reason]++;
4922 
4923 	if (!mgmt_rx_reo_ingress_frame_debug_info_enabled
4924 						(ingress_frame_debug_info))
4925 		return QDF_STATUS_SUCCESS;
4926 
4927 	cur_frame_debug_info = &ingress_frame_debug_info->frame_list
4928 			[ingress_frame_debug_info->next_index];
4929 
4930 	cur_frame_debug_info->wait_count = desc->wait_count;
4931 	qdf_mem_copy(cur_frame_debug_info->shared_snapshots,
4932 		     desc->shared_snapshots,
4933 		     qdf_min(sizeof(cur_frame_debug_info->shared_snapshots),
4934 			     sizeof(desc->shared_snapshots)));
4935 	qdf_mem_copy(cur_frame_debug_info->host_snapshot, desc->host_snapshot,
4936 		     qdf_min(sizeof(cur_frame_debug_info->host_snapshot),
4937 			     sizeof(desc->host_snapshot)));
4938 	cur_frame_debug_info->is_queued = is_queued;
4939 	cur_frame_debug_info->is_stale = desc->is_stale;
4940 	cur_frame_debug_info->is_parallel_rx = desc->is_parallel_rx;
4941 	cur_frame_debug_info->queued_list = desc->queued_list;
4942 	cur_frame_debug_info->zero_wait_count_rx = desc->zero_wait_count_rx;
4943 	cur_frame_debug_info->immediate_delivery = desc->immediate_delivery;
4944 	cur_frame_debug_info->is_error = is_error;
4945 	cur_frame_debug_info->last_delivered_frame = desc->last_delivered_frame;
4946 	cur_frame_debug_info->ingress_timestamp = desc->ingress_timestamp;
4947 	cur_frame_debug_info->ingress_duration =
4948 			qdf_get_log_timestamp() - desc->ingress_timestamp;
4949 	cur_frame_debug_info->ingress_list_size_rx =
4950 					desc->ingress_list_size_rx;
4951 	cur_frame_debug_info->ingress_list_insertion_pos =
4952 					desc->ingress_list_insertion_pos;
4953 	cur_frame_debug_info->egress_list_size_rx =
4954 					desc->egress_list_size_rx;
4955 	cur_frame_debug_info->egress_list_insertion_pos =
4956 					desc->egress_list_insertion_pos;
4957 	cur_frame_debug_info->context_id = context_id;
4958 	cur_frame_debug_info->drop = desc->drop;
4959 	cur_frame_debug_info->drop_reason = desc->drop_reason;
4960 
4961 	ingress_frame_debug_info->next_index++;
4962 	ingress_frame_debug_info->next_index %=
4963 				ingress_frame_debug_info->frame_list_size;
4964 	if (ingress_frame_debug_info->next_index == 0)
4965 		ingress_frame_debug_info->wrap_aroud = true;
4966 
4967 	return QDF_STATUS_SUCCESS;
4968 }
4969 
4970 /**
4971  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print the debug information
4972  * about the latest frames entered the reorder module
4973  * @reo_ctx: management rx reorder context
4974  * @num_frames: Number of frames for which the debug information is to be
4975  * printed. If @num_frames is 0, then debug information about all the frames
4976  * in the ring buffer will be  printed.
4977  *
4978  * Return: QDF_STATUS of operation
4979  */
4980 static QDF_STATUS
mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context * reo_ctx,uint16_t num_frames)4981 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx,
4982 					   uint16_t num_frames)
4983 {
4984 	struct reo_ingress_debug_info *ingress_frame_debug_info;
4985 	int start_index;
4986 	uint16_t index;
4987 	uint16_t entry;
4988 	uint16_t num_valid_entries;
4989 	uint16_t num_entries_to_print;
4990 	char *boarder;
4991 
4992 	if (!reo_ctx)
4993 		return QDF_STATUS_E_NULL_VALUE;
4994 
4995 	ingress_frame_debug_info = &reo_ctx->ingress_frame_debug_info;
4996 
4997 	if (ingress_frame_debug_info->wrap_aroud)
4998 		num_valid_entries = ingress_frame_debug_info->frame_list_size;
4999 	else
5000 		num_valid_entries = ingress_frame_debug_info->next_index;
5001 
5002 	if (num_frames == 0) {
5003 		num_entries_to_print = num_valid_entries;
5004 
5005 		if (ingress_frame_debug_info->wrap_aroud)
5006 			start_index = ingress_frame_debug_info->next_index;
5007 		else
5008 			start_index = 0;
5009 	} else {
5010 		num_entries_to_print = qdf_min(num_frames, num_valid_entries);
5011 
5012 		start_index = (ingress_frame_debug_info->next_index -
5013 			       num_entries_to_print +
5014 			       ingress_frame_debug_info->frame_list_size)
5015 			      % ingress_frame_debug_info->frame_list_size;
5016 	}
5017 
5018 	mgmt_rx_reo_alert_no_fl("Ingress Frame Info:-");
5019 	mgmt_rx_reo_alert_no_fl("num_frames = %u, wrap = %u, next_index = %u",
5020 				num_frames,
5021 				ingress_frame_debug_info->wrap_aroud,
5022 				ingress_frame_debug_info->next_index);
5023 	mgmt_rx_reo_alert_no_fl("start_index = %d num_entries_to_print = %u",
5024 				start_index, num_entries_to_print);
5025 
5026 	if (!num_entries_to_print)
5027 		return QDF_STATUS_SUCCESS;
5028 
5029 	boarder = ingress_frame_debug_info->boarder;
5030 
5031 	mgmt_rx_reo_alert_no_fl("%s", boarder);
5032 	mgmt_rx_reo_alert_no_fl("|%5s|%5s|%6s|%6s|%9s|%4s|%5s|%10s|%10s|%10s|%5s|%10s|%11s|%13s|%4s|%11s|%6s|%5s|%6s|%5s|%69s|%94s|%94s|%94s|%94s|%94s|%94s|",
5033 				"Index", "CPU", "D.type", "F.type", "F.subtype",
5034 				"Link", "SeqNo", "Global ts",
5035 				"Start ts", "End ts", "Dur", "Last ts",
5036 				"Ingress ts", "Flags", "List", "Ingress Dur",
5037 				"I Size", "I Pos", "E Size",
5038 				"E Pos", "Wait Count", "Snapshot : link 0",
5039 				"Snapshot : link 1", "Snapshot : link 2",
5040 				"Snapshot : link 3", "Snapshot : link 4",
5041 				"Snapshot : link 5");
5042 	mgmt_rx_reo_alert_no_fl("%s", boarder);
5043 
5044 	index = start_index;
5045 	for (entry = 0; entry < num_entries_to_print; entry++) {
5046 		struct reo_ingress_debug_frame_info *info;
5047 		char flags[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_FLAG_MAX_SIZE + 1] = {0};
5048 		char wait_count[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_WAIT_COUNT_MAX_SIZE + 1] = {0};
5049 		char snapshots[MAX_MLO_LINKS][MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PER_LINK_SNAPSHOTS_MAX_SIZE + 1] = {0};
5050 		char flag_queued = ' ';
5051 		char flag_stale = ' ';
5052 		char flag_parallel_rx = ' ';
5053 		char flag_error = ' ';
5054 		char flag_zero_wait_count_rx = ' ';
5055 		char flag_immediate_delivery = ' ';
5056 		char flag_reo_required = ' ';
5057 		int64_t ts_last_delivered_frame = -1;
5058 		uint8_t link;
5059 
5060 		info = &reo_ctx->ingress_frame_debug_info.frame_list[index];
5061 
5062 		if (info->last_delivered_frame.valid) {
5063 			struct mgmt_rx_reo_params *reo_params;
5064 
5065 			reo_params = &info->last_delivered_frame.reo_params;
5066 			ts_last_delivered_frame = reo_params->global_timestamp;
5067 		}
5068 
5069 		if (info->is_queued)
5070 			flag_queued = 'Q';
5071 
5072 		if (info->is_stale)
5073 			flag_stale = 'S';
5074 
5075 		if (info->is_parallel_rx)
5076 			flag_parallel_rx = 'P';
5077 
5078 		if (info->is_error)
5079 			flag_error = 'E';
5080 
5081 		if (info->zero_wait_count_rx)
5082 			flag_zero_wait_count_rx = 'Z';
5083 
5084 		if (info->immediate_delivery)
5085 			flag_immediate_delivery = 'I';
5086 
5087 		if (!info->reo_required)
5088 			flag_reo_required = 'N';
5089 
5090 		snprintf(flags, sizeof(flags), "%c %c %c %c %c %c %c",flag_error,
5091 			 flag_stale, flag_parallel_rx, flag_queued,
5092 			 flag_zero_wait_count_rx, flag_immediate_delivery,
5093 			 flag_reo_required);
5094 		snprintf(wait_count, sizeof(wait_count),
5095 			 "%9llx(%8x, %8x, %8x, %8x, %8x, %8x)",
5096 			 info->wait_count.total_count,
5097 			 info->wait_count.per_link_count[0],
5098 			 info->wait_count.per_link_count[1],
5099 			 info->wait_count.per_link_count[2],
5100 			 info->wait_count.per_link_count[3],
5101 			 info->wait_count.per_link_count[4],
5102 			 info->wait_count.per_link_count[5]);
5103 
5104 		for (link = 0; link < MAX_MLO_LINKS; link++) {
5105 			char mac_hw[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
5106 			char fw_consumed[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
5107 			char fw_forwarded[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
5108 			char host[MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_SNAPSHOT_MAX_SIZE + 1] = {'\0'};
5109 			struct mgmt_rx_reo_snapshot_params *mac_hw_ss;
5110 			struct mgmt_rx_reo_snapshot_params *fw_consumed_ss;
5111 			struct mgmt_rx_reo_snapshot_params *fw_forwarded_ss;
5112 			struct mgmt_rx_reo_snapshot_params *host_ss;
5113 
5114 			mac_hw_ss = &info->shared_snapshots
5115 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW];
5116 			fw_consumed_ss = &info->shared_snapshots
5117 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED];
5118 			fw_forwarded_ss = &info->shared_snapshots
5119 				[link][MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED];
5120 			host_ss = &info->host_snapshot[link];
5121 
5122 			snprintf(mac_hw, sizeof(mac_hw), "(%1u, %5u, %10u)",
5123 				 mac_hw_ss->valid, mac_hw_ss->mgmt_pkt_ctr,
5124 				 mac_hw_ss->global_timestamp);
5125 			snprintf(fw_consumed, sizeof(fw_consumed),
5126 				 "(%1u, %5u, %10u)",
5127 				 fw_consumed_ss->valid,
5128 				 fw_consumed_ss->mgmt_pkt_ctr,
5129 				 fw_consumed_ss->global_timestamp);
5130 			snprintf(fw_forwarded, sizeof(fw_forwarded),
5131 				 "(%1u, %5u, %10u)",
5132 				 fw_forwarded_ss->valid,
5133 				 fw_forwarded_ss->mgmt_pkt_ctr,
5134 				 fw_forwarded_ss->global_timestamp);
5135 			snprintf(host, sizeof(host), "(%1u, %5u, %10u)",
5136 				 host_ss->valid,
5137 				 host_ss->mgmt_pkt_ctr,
5138 				 host_ss->global_timestamp);
5139 			snprintf(snapshots[link], sizeof(snapshots[link]),
5140 				 "%22s, %22s, %22s, %22s", mac_hw, fw_consumed,
5141 				 fw_forwarded, host);
5142 		}
5143 
5144 		mgmt_rx_reo_alert_no_fl("|%5u|%5d|%6u|%6x|%9x|%4u|%5u|%10u|%10u|%10u|%5u|%10lld|%11llu|%11s|%4u|%11llu|%6d|%5d|%6d|%5d|%69s|%70s|%70s|%70s|%70s|%70s|%70s|",
5145 					entry, info->cpu_id, info->desc_type,
5146 					info->frame_type, info->frame_subtype,
5147 					info->link_id,
5148 					info->mgmt_pkt_ctr,
5149 					info->global_timestamp,
5150 					info->start_timestamp,
5151 					info->end_timestamp,
5152 					info->duration_us,
5153 					ts_last_delivered_frame,
5154 					info->ingress_timestamp, flags,
5155 					info->queued_list,
5156 					info->ingress_duration,
5157 					info->ingress_list_size_rx,
5158 					info->ingress_list_insertion_pos,
5159 					info->egress_list_size_rx,
5160 					info->egress_list_insertion_pos,
5161 					wait_count,
5162 					snapshots[0], snapshots[1],
5163 					snapshots[2], snapshots[3],
5164 					snapshots[4], snapshots[5]);
5165 		mgmt_rx_reo_alert_no_fl("%s", boarder);
5166 
5167 		index++;
5168 		index %= ingress_frame_debug_info->frame_list_size;
5169 	}
5170 
5171 	return QDF_STATUS_SUCCESS;
5172 }
5173 #else
5174 /**
5175  * mgmt_rx_reo_debug_print_ingress_frame_stats() - API to print the stats
5176  * related to frames going into the reorder module
5177  * @reo_ctx: Pointer to reorder context
5178  *
5179  * API to print the stats related to frames going into the management
5180  * Rx reorder module.
5181  *
5182  * Return: QDF_STATUS
5183  */
5184 static QDF_STATUS
mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context * reo_ctx)5185 mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
5186 {
5187 	return QDF_STATUS_SUCCESS;
5188 }
5189 
5190 /**
5191  * log_ingress_frame_entry() - Log the information about a frame at the start
5192  * of incoming frame processing
5193  * @reo_ctx: management rx reorder context
5194  * @desc: Pointer to frame descriptor
5195  *
5196  * Return: QDF_STATUS of operation
5197  */
5198 static QDF_STATUS
log_ingress_frame_entry(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_frame_descriptor * desc)5199 log_ingress_frame_entry(struct mgmt_rx_reo_context *reo_ctx,
5200 			struct mgmt_rx_reo_frame_descriptor *desc)
5201 {
5202 	return QDF_STATUS_SUCCESS;
5203 }
5204 
5205 /**
5206  * log_ingress_frame_exit() - Log the information about a frame at the end of
5207  * incoming frame processing
5208  * @reo_ctx: management rx reorder context
5209  * @desc: Pointer to frame descriptor
5210  * @is_queued: Indicates whether this frame is queued to reorder list
5211  * @is_error: Indicates whether any error occurred during processing this frame
5212  * @context_id: context identifier
5213  * @link_id: Link ID
5214  *
5215  * Return: QDF_STATUS of operation
5216  */
5217 static QDF_STATUS
log_ingress_frame_exit(struct mgmt_rx_reo_context * reo_ctx,struct mgmt_rx_reo_frame_descriptor * desc,bool is_queued,bool is_error,int32_t context_id,uint8_t link_id)5218 log_ingress_frame_exit(struct mgmt_rx_reo_context *reo_ctx,
5219 		       struct mgmt_rx_reo_frame_descriptor *desc,
5220 		       bool is_queued, bool is_error,
5221 		       int32_t context_id, uint8_t link_id)
5222 {
5223 	return QDF_STATUS_SUCCESS;
5224 }
5225 
5226 /**
5227  * mgmt_rx_reo_debug_print_ingress_frame_info() - Print debug information about
5228  * the latest frames entering the reorder module
5229  * @reo_ctx: management rx reorder context
5230  *
5231  * Return: QDF_STATUS of operation
5232  */
5233 static QDF_STATUS
mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context * reo_ctx)5234 mgmt_rx_reo_debug_print_ingress_frame_info(struct mgmt_rx_reo_context *reo_ctx)
5235 {
5236 	return QDF_STATUS_SUCCESS;
5237 }
5238 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
5239 
5240 QDF_STATUS
wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev * pdev,struct mgmt_rx_reo_frame_descriptor * desc,bool * is_queued)5241 wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
5242 			    struct mgmt_rx_reo_frame_descriptor *desc,
5243 			    bool *is_queued)
5244 {
5245 	struct mgmt_rx_reo_context *reo_ctx;
5246 	struct mgmt_rx_reo_ingress_list *ingress_list;
5247 	struct mgmt_rx_reo_egress_list *egress_list;
5248 	QDF_STATUS ret;
5249 	int16_t cur_link;
5250 	struct mgmt_rx_reo_context_info ctx_info = {0};
5251 	int32_t context_id = 0;
5252 
5253 	if (!is_queued) {
5254 		mgmt_rx_reo_err("Pointer to queued indication is null");
5255 		return QDF_STATUS_E_NULL_VALUE;
5256 	}
5257 
5258 	*is_queued = false;
5259 
5260 	if (!desc || !desc->rx_params) {
5261 		mgmt_rx_reo_err("MGMT Rx REO descriptor or rx params are null");
5262 		return QDF_STATUS_E_NULL_VALUE;
5263 	}
5264 
5265 	reo_ctx = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5266 	if (!reo_ctx) {
5267 		mgmt_rx_reo_err("REO context is NULL");
5268 		return QDF_STATUS_E_NULL_VALUE;
5269 	}
5270 	ingress_list = &reo_ctx->ingress_list;
5271 	egress_list = &reo_ctx->egress_list;
5272 
5273 	/**
5274 	 * Critical Section = Host snapshot update + Calculation of wait
5275 	 * counts + Update reorder list. Following section describes the
5276 	 * motivation for making this a critical section.
5277 	 * Lets take an example of 2 links (Link A & B) and each has received
5278 	 * a management frame A1 and B1 such that MLO global time stamp of A1 <
5279 	 * MLO global time stamp of B1. Host is concurrently executing
5280 	 * "wlan_mgmt_rx_reo_algo_entry" for A1 and B1 in 2 different CPUs.
5281 	 *
5282 	 * A lock less version of this API("wlan_mgmt_rx_reo_algo_entry_v1") is
5283 	 * as follows.
5284 	 *
5285 	 * wlan_mgmt_rx_reo_algo_entry()
5286 	 * {
5287 	 *     Host snapshot update
5288 	 *     Calculation of wait counts
5289 	 *     Update reorder list
5290 	 *     Release to upper layer
5291 	 * }
5292 	 *
5293 	 * We may run into race conditions under the following sequence of
5294 	 * operations.
5295 	 *
5296 	 * 1. Host snapshot update for link A in context of frame A1
5297 	 * 2. Host snapshot update for link B in context of frame B1
5298 	 * 3. Calculation of wait count for frame B1
5299 	 *        link A wait count =  0
5300 	 *        link B wait count =  0
5301 	 * 4. Update reorder list with frame B1
5302 	 * 5. Release B1 to upper layer
5303 	 * 6. Calculation of wait count for frame A1
5304 	 *        link A wait count =  0
5305 	 *        link B wait count =  0
5306 	 * 7. Update reorder list with frame A1
5307 	 * 8. Release A1 to upper layer
5308 	 *
5309 	 * This leads to incorrect behaviour as B1 goes to upper layer before
5310 	 * A1.
5311 	 *
5312 	 * To prevent this lets make Host snapshot update + Calculate wait count
5313 	 * a critical section by adding locks. The updated version of the API
5314 	 * ("wlan_mgmt_rx_reo_algo_entry_v2") is as follows.
5315 	 *
5316 	 * wlan_mgmt_rx_reo_algo_entry()
5317 	 * {
5318 	 *     LOCK
5319 	 *         Host snapshot update
5320 	 *         Calculation of wait counts
5321 	 *     UNLOCK
5322 	 *     Update reorder list
5323 	 *     Release to upper layer
5324 	 * }
5325 	 *
5326 	 * With this API also We may run into race conditions under the
5327 	 * following sequence of operations.
5328 	 *
5329 	 * 1. Host snapshot update for link A in context of frame A1 +
5330 	 *    Calculation of wait count for frame A1
5331 	 *        link A wait count =  0
5332 	 *        link B wait count =  0
5333 	 * 2. Host snapshot update for link B in context of frame B1 +
5334 	 *    Calculation of wait count for frame B1
5335 	 *        link A wait count =  0
5336 	 *        link B wait count =  0
5337 	 * 4. Update reorder list with frame B1
5338 	 * 5. Release B1 to upper layer
5339 	 * 7. Update reorder list with frame A1
5340 	 * 8. Release A1 to upper layer
5341 	 *
5342 	 * This also leads to incorrect behaviour as B1 goes to upper layer
5343 	 * before A1.
5344 	 *
5345 	 * To prevent this, let's make Host snapshot update + Calculate wait
5346 	 * count + Update reorder list a critical section by adding locks.
5347 	 * The updated version of the API ("wlan_mgmt_rx_reo_algo_entry_final")
5348 	 * is as follows.
5349 	 *
5350 	 * wlan_mgmt_rx_reo_algo_entry()
5351 	 * {
5352 	 *     LOCK
5353 	 *         Host snapshot update
5354 	 *         Calculation of wait counts
5355 	 *         Update reorder list
5356 	 *     UNLOCK
5357 	 *     Release to upper layer
5358 	 * }
5359 	 */
5360 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
5361 
5362 	cur_link = mgmt_rx_reo_get_link_id(desc->rx_params);
5363 	if (desc->frame_type != IEEE80211_FC0_TYPE_MGT) {
5364 		ret = QDF_STATUS_E_INVAL;
5365 		goto failure;
5366 	}
5367 
5368 	ret = log_ingress_frame_entry(reo_ctx, desc);
5369 	if (QDF_IS_STATUS_ERROR(ret))
5370 		goto failure;
5371 
5372 	ret = check_frame_sanity(pdev, desc);
5373 	if (QDF_IS_STATUS_ERROR(ret))
5374 		goto failure;
5375 
5376 	/* Update the Host snapshot */
5377 	ret = wlan_mgmt_rx_reo_update_host_snapshot(pdev, desc);
5378 	if (QDF_IS_STATUS_ERROR(ret))
5379 		goto failure;
5380 
5381 	if (desc->drop)
5382 		goto failure;
5383 
5384 	/* Compute wait count for this frame/event */
5385 	ret = wlan_mgmt_rx_reo_algo_calculate_wait_count(pdev, desc);
5386 	if (QDF_IS_STATUS_ERROR(ret))
5387 		goto failure;
5388 
5389 	ctx_info.in_reo_params = *desc->rx_params->reo_params;
5390 	/* Update ingress and egress list */
5391 	ret = mgmt_rx_reo_update_lists(ingress_list, egress_list, desc,
5392 				       is_queued);
5393 	if (QDF_IS_STATUS_ERROR(ret))
5394 		goto failure;
5395 
5396 	context_id = qdf_atomic_inc_return(&reo_ctx->context_id);
5397 	ret = log_ingress_frame_exit(reo_ctx, desc, *is_queued,
5398 				     false, context_id, cur_link);
5399 	if (QDF_IS_STATUS_ERROR(ret)) {
5400 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
5401 		return ret;
5402 	}
5403 
5404 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
5405 
5406 	ret = mgmt_rx_reo_move_entries_ingress_to_egress_list(ingress_list,
5407 							      egress_list);
5408 	if (QDF_IS_STATUS_ERROR(ret))
5409 		return ret;
5410 
5411 	ctx_info.context = MGMT_RX_REO_CONTEXT_MGMT_RX;
5412 	ctx_info.context_id = context_id;
5413 
5414 	/* Finally, release the entries for which pending frame is received */
5415 	return mgmt_rx_reo_release_egress_list_entries(reo_ctx, 1 << cur_link,
5416 						       &ctx_info);
5417 
5418 failure:
5419 	/**
5420 	 * Ignore the return value of this function call, return
5421 	 * the actual reason for failure.
5422 	 */
5423 	log_ingress_frame_exit(reo_ctx, desc, *is_queued, true,
5424 			       context_id, cur_link);
5425 
5426 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
5427 
5428 	return ret;
5429 }
5430 
5431 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
5432 /**
5433  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
5434  * context.
5435  * @reo_context: Pointer to reo context
5436  *
5437  * Return: QDF_STATUS of operation
5438  */
5439 static inline QDF_STATUS
mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context * reo_context)5440 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
5441 {
5442 	return QDF_STATUS_SUCCESS;
5443 }
5444 
5445 /**
5446  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
5447  * context.
5448  * @reo_context: Pointer to reo context
5449  *
5450  * Return: QDF_STATUS of operation
5451  */
5452 static inline QDF_STATUS
mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context * reo_context)5453 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
5454 {
5455 	return QDF_STATUS_SUCCESS;
5456 }
5457 
5458 QDF_STATUS
mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev * pdev)5459 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
5460 {
5461 	return QDF_STATUS_SUCCESS;
5462 }
5463 
5464 QDF_STATUS
mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev * pdev)5465 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
5466 {
5467 	return QDF_STATUS_SUCCESS;
5468 }
5469 #else
5470 /**
5471  * mgmt_rx_reo_sim_remove_frame_from_master_list() - Removes frame from the
5472  * master management frame list
5473  * @master_frame_list: pointer to master management frame list
5474  * @frame: pointer to management frame parameters
5475  *
5476  * This API removes frames from the master management frame list. This API is
5477  * used in case of FW consumed management frames or management frames which
5478  * are dropped at host due to any error.
5479  *
5480  * Return: QDF_STATUS of operation
5481  */
5482 static QDF_STATUS
mgmt_rx_reo_sim_remove_frame_from_master_list(struct mgmt_rx_reo_master_frame_list * master_frame_list,const struct mgmt_rx_frame_params * frame)5483 mgmt_rx_reo_sim_remove_frame_from_master_list(
5484 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
5485 		const struct mgmt_rx_frame_params *frame)
5486 {
5487 	struct mgmt_rx_reo_pending_frame_list_entry *pending_entry;
5488 	struct mgmt_rx_reo_pending_frame_list_entry *matching_pend_entry = NULL;
5489 	struct mgmt_rx_reo_stale_frame_list_entry *stale_entry;
5490 	struct mgmt_rx_reo_stale_frame_list_entry *matching_stale_entry = NULL;
5491 	QDF_STATUS status;
5492 
5493 	if (!master_frame_list) {
5494 		mgmt_rx_reo_err("Mgmt master frame list is null");
5495 		return QDF_STATUS_E_NULL_VALUE;
5496 	}
5497 
5498 	if (!frame) {
5499 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
5500 		return QDF_STATUS_E_NULL_VALUE;
5501 	}
5502 
5503 	qdf_spin_lock(&master_frame_list->lock);
5504 
5505 	qdf_list_for_each(&master_frame_list->pending_list, pending_entry,
5506 			  node) {
5507 		if (pending_entry->params.link_id == frame->link_id &&
5508 		    pending_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
5509 		    pending_entry->params.global_timestamp ==
5510 		    frame->global_timestamp) {
5511 			matching_pend_entry = pending_entry;
5512 			break;
5513 		}
5514 	}
5515 
5516 	qdf_list_for_each(&master_frame_list->stale_list, stale_entry, node) {
5517 		if (stale_entry->params.link_id == frame->link_id &&
5518 		    stale_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
5519 		    stale_entry->params.global_timestamp ==
5520 		    frame->global_timestamp) {
5521 			matching_stale_entry = stale_entry;
5522 			break;
5523 		}
5524 	}
5525 
5526 	/* Found in pending and stale list. Duplicate entries, assert */
5527 	qdf_assert_always(!matching_pend_entry || !matching_stale_entry);
5528 
5529 	if (!matching_pend_entry && !matching_stale_entry) {
5530 		qdf_spin_unlock(&master_frame_list->lock);
5531 		mgmt_rx_reo_err("No matching frame in pend/stale list");
5532 		return QDF_STATUS_E_FAILURE;
5533 	}
5534 
5535 	if (matching_pend_entry) {
5536 		status = qdf_list_remove_node(&master_frame_list->pending_list,
5537 					      &matching_pend_entry->node);
5538 		if (QDF_IS_STATUS_ERROR(status)) {
5539 			qdf_spin_unlock(&master_frame_list->lock);
5540 			mgmt_rx_reo_err("Failed to remove the matching entry");
5541 			return status;
5542 		}
5543 
5544 		qdf_mem_free(matching_pend_entry);
5545 	}
5546 
5547 	if (matching_stale_entry) {
5548 		status = qdf_list_remove_node(&master_frame_list->stale_list,
5549 					      &matching_stale_entry->node);
5550 		if (QDF_IS_STATUS_ERROR(status)) {
5551 			qdf_spin_unlock(&master_frame_list->lock);
5552 			mgmt_rx_reo_err("Failed to remove the matching entry");
5553 			return status;
5554 		}
5555 
5556 		qdf_mem_free(matching_stale_entry);
5557 	}
5558 
5559 	qdf_spin_unlock(&master_frame_list->lock);
5560 
5561 	return QDF_STATUS_SUCCESS;
5562 }
5563 
5564 /**
5565  * mgmt_rx_reo_sim_remove_frame_from_pending_list() - Removes frame from the
5566  * pending management frame list
5567  * @master_frame_list: pointer to master management frame list
5568  * @frame: pointer to management frame parameters
5569  *
5570  * This API removes frames from the pending management frame list. This API is
5571  * used in case of FW consumed management frames or management frames which
5572  * are dropped at host due to any error.
5573  *
5574  * Return: QDF_STATUS of operation
5575  */
5576 static QDF_STATUS
mgmt_rx_reo_sim_remove_frame_from_pending_list(struct mgmt_rx_reo_master_frame_list * master_frame_list,const struct mgmt_rx_frame_params * frame)5577 mgmt_rx_reo_sim_remove_frame_from_pending_list(
5578 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
5579 		const struct mgmt_rx_frame_params *frame)
5580 {
5581 	struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
5582 	struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
5583 	QDF_STATUS status;
5584 
5585 	if (!master_frame_list) {
5586 		mgmt_rx_reo_err("Mgmt master frame list is null");
5587 		return QDF_STATUS_E_NULL_VALUE;
5588 	}
5589 
5590 	if (!frame) {
5591 		mgmt_rx_reo_err("Pointer to mgmt frame params is null");
5592 		return QDF_STATUS_E_NULL_VALUE;
5593 	}
5594 
5595 	qdf_spin_lock(&master_frame_list->lock);
5596 
5597 	qdf_list_for_each(&master_frame_list->pending_list, cur_entry, node) {
5598 		if (cur_entry->params.link_id == frame->link_id &&
5599 		    cur_entry->params.mgmt_pkt_ctr == frame->mgmt_pkt_ctr &&
5600 		    cur_entry->params.global_timestamp ==
5601 		    frame->global_timestamp) {
5602 			matching_entry = cur_entry;
5603 			break;
5604 		}
5605 	}
5606 
5607 	if (!matching_entry) {
5608 		qdf_spin_unlock(&master_frame_list->lock);
5609 		mgmt_rx_reo_err("No matching frame in the pend list to remove");
5610 		return QDF_STATUS_E_FAILURE;
5611 	}
5612 
5613 	status = qdf_list_remove_node(&master_frame_list->pending_list,
5614 				      &matching_entry->node);
5615 	if (QDF_IS_STATUS_ERROR(status)) {
5616 		qdf_spin_unlock(&master_frame_list->lock);
5617 		mgmt_rx_reo_err("Failed to remove the matching entry");
5618 		return status;
5619 	}
5620 
5621 	qdf_mem_free(matching_entry);
5622 
5623 	qdf_spin_unlock(&master_frame_list->lock);
5624 
5625 
5626 	return QDF_STATUS_SUCCESS;
5627 }
5628 
5629 /**
5630  * mgmt_rx_reo_sim_add_frame_to_pending_list() - Inserts frame to the
5631  * pending management frame list
5632  * @master_frame_list: pointer to master management frame list
5633  * @frame: pointer to management frame parameters
5634  *
5635  * This API inserts frames to the pending management frame list. This API is
5636  * used to insert frames generated by the MAC HW to the pending frame list.
5637  *
5638  * Return: QDF_STATUS of operation
5639  */
5640 static QDF_STATUS
mgmt_rx_reo_sim_add_frame_to_pending_list(struct mgmt_rx_reo_master_frame_list * master_frame_list,const struct mgmt_rx_frame_params * frame)5641 mgmt_rx_reo_sim_add_frame_to_pending_list(
5642 		struct mgmt_rx_reo_master_frame_list *master_frame_list,
5643 		const struct mgmt_rx_frame_params *frame)
5644 {
5645 	struct mgmt_rx_reo_pending_frame_list_entry *new_entry;
5646 	QDF_STATUS status;
5647 
5648 	if (!master_frame_list) {
5649 		mgmt_rx_reo_err("Mgmt master frame list is null");
5650 		return QDF_STATUS_E_NULL_VALUE;
5651 	}
5652 
5653 	if (!frame) {
5654 		mgmt_rx_reo_err("Pointer mgmt frame params is null");
5655 		return QDF_STATUS_E_NULL_VALUE;
5656 	}
5657 
5658 	new_entry = qdf_mem_malloc(sizeof(*new_entry));
5659 	if (!new_entry) {
5660 		mgmt_rx_reo_err("Failed to allocate new entry to frame list");
5661 		return QDF_STATUS_E_NOMEM;
5662 	}
5663 
5664 	new_entry->params = *frame;
5665 
5666 	qdf_spin_lock(&master_frame_list->lock);
5667 
5668 	status = qdf_list_insert_back(&master_frame_list->pending_list,
5669 				      &new_entry->node);
5670 
5671 	qdf_spin_unlock(&master_frame_list->lock);
5672 
5673 	if (QDF_IS_STATUS_ERROR(status)) {
5674 		mgmt_rx_reo_err("Failed to add frame to pending list");
5675 		qdf_mem_free(new_entry);
5676 		return status;
5677 	}
5678 
5679 	return QDF_STATUS_SUCCESS;
5680 }
5681 
5682 QDF_STATUS
mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev * pdev,qdf_nbuf_t buf,struct mgmt_rx_event_params * mgmt_rx_params)5683 mgmt_rx_reo_sim_process_rx_frame(struct wlan_objmgr_pdev *pdev, qdf_nbuf_t buf,
5684 				 struct mgmt_rx_event_params *mgmt_rx_params)
5685 {
5686 	struct mgmt_rx_reo_context *reo_context;
5687 	struct mgmt_rx_reo_sim_context *sim_context;
5688 	QDF_STATUS status;
5689 	struct mgmt_rx_reo_params *reo_params;
5690 
5691 	if (!mgmt_rx_params) {
5692 		mgmt_rx_reo_err("Mgmt rx params null");
5693 		return QDF_STATUS_E_NULL_VALUE;
5694 	}
5695 
5696 	reo_params = mgmt_rx_params->reo_params;
5697 
5698 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
5699 	if (!reo_context) {
5700 		mgmt_rx_reo_err("Mgmt reo context is null");
5701 		return QDF_STATUS_E_NULL_VALUE;
5702 	}
5703 
5704 	sim_context = &reo_context->sim_context;
5705 
5706 	qdf_spin_lock(&sim_context->master_frame_list.lock);
5707 
5708 	if (qdf_list_empty(&sim_context->master_frame_list.pending_list)) {
5709 		qdf_spin_unlock(&sim_context->master_frame_list.lock);
5710 		mgmt_rx_reo_err("reo sim failure: pending frame list is empty");
5711 		qdf_assert_always(0);
5712 	} else {
5713 		struct mgmt_rx_frame_params *cur_entry_params;
5714 		struct mgmt_rx_reo_pending_frame_list_entry *cur_entry;
5715 		struct mgmt_rx_reo_pending_frame_list_entry *matching_entry = NULL;
5716 
5717 		/**
5718 		 * Make sure the frames delivered to upper layer are in the
5719 		 * increasing order of global time stamp. For that the frame
5720 		 * which is being delivered should be present at the head of the
5721 		 * pending frame list. There could be multiple frames with the
5722 		 * same global time stamp in the pending frame list. Search
5723 		 * among all the frames at the head of the list which has the
5724 		 * same global time stamp as the frame which is being delivered.
5725 		 * To find matching frame, check whether packet counter,
5726 		 * global time stamp and link id are same.
5727 		 */
5728 		qdf_list_for_each(&sim_context->master_frame_list.pending_list,
5729 				  cur_entry, node) {
5730 			cur_entry_params = &cur_entry->params;
5731 
5732 			if (cur_entry_params->global_timestamp !=
5733 			    reo_params->global_timestamp)
5734 				break;
5735 
5736 			if (cur_entry_params->link_id == reo_params->link_id &&
5737 			    cur_entry_params->mgmt_pkt_ctr ==
5738 			    reo_params->mgmt_pkt_ctr) {
5739 				matching_entry = cur_entry;
5740 				break;
5741 			}
5742 		}
5743 
5744 		if (!matching_entry) {
5745 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
5746 			mgmt_rx_reo_err("reo sim failure: mismatch");
5747 			qdf_assert_always(0);
5748 		}
5749 
5750 		status = qdf_list_remove_node(
5751 				&sim_context->master_frame_list.pending_list,
5752 				&matching_entry->node);
5753 		qdf_mem_free(matching_entry);
5754 
5755 		if (QDF_IS_STATUS_ERROR(status)) {
5756 			qdf_spin_unlock(&sim_context->master_frame_list.lock);
5757 			mgmt_rx_reo_err("Failed to remove matching entry");
5758 			return status;
5759 		}
5760 	}
5761 
5762 	qdf_spin_unlock(&sim_context->master_frame_list.lock);
5763 
5764 	mgmt_rx_reo_debug("Successfully processed mgmt frame");
5765 	mgmt_rx_reo_debug("link_id = %u, ctr = %u, ts = %u",
5766 			  reo_params->link_id, reo_params->mgmt_pkt_ctr,
5767 			  reo_params->global_timestamp);
5768 
5769 	return QDF_STATUS_SUCCESS;
5770 }
5771 
5772 /**
5773  * mgmt_rx_reo_sim_get_random_bool() - Generate true/false randomly
5774  * @percentage_true: probability (in percentage) of true
5775  *
5776  * API to generate true with probability @percentage_true % and false with
5777  * probability (100 - @percentage_true) %.
5778  *
5779  * Return: true with probability @percentage_true % and false with probability
5780  * (100 - @percentage_true) %
5781  */
5782 static bool
mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)5783 mgmt_rx_reo_sim_get_random_bool(uint8_t percentage_true)
5784 {
5785 	uint32_t rand;
5786 
5787 	if (percentage_true > 100) {
5788 		mgmt_rx_reo_err("Invalid probability value for true, %u",
5789 				percentage_true);
5790 		return -EINVAL;
5791 	}
5792 
5793 	get_random_bytes(&rand, sizeof(rand));
5794 
5795 	return ((rand % 100) < percentage_true);
5796 }
5797 
5798 /**
5799  * mgmt_rx_reo_sim_get_random_unsigned_int() - Generate random unsigned integer
5800  * value in the range [0, max)
5801  * @max: upper limit for the output
5802  *
5803  * API to generate random unsigned integer value in the range [0, max).
5804  *
5805  * Return: unsigned integer value in the range [0, max)
5806  */
5807 static uint32_t
mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)5808 mgmt_rx_reo_sim_get_random_unsigned_int(uint32_t max)
5809 {
5810 	uint32_t rand;
5811 
5812 	get_random_bytes(&rand, sizeof(rand));
5813 
5814 	return (rand % max);
5815 }
5816 
5817 /**
5818  * mgmt_rx_reo_sim_sleep() - Wrapper API to sleep for given micro seconds
5819  * @sleeptime_us: Sleep time in micro seconds
5820  *
5821  * This API uses msleep() internally. So the granularity is limited to
5822  * milliseconds.
5823  *
5824  * Return: none
5825  */
5826 static void
mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)5827 mgmt_rx_reo_sim_sleep(uint32_t sleeptime_us)
5828 {
5829 	msleep(sleeptime_us / USEC_PER_MSEC);
5830 }
5831 
5832 /**
5833  * mgmt_rx_reo_sim_frame_handler_host() - Management frame handler at the host
5834  * layer
5835  * @arg: Argument
5836  *
5837  * This API handles the management frame at the host layer. This is applicable
5838  * for simulation alone.
5839  *
5840  * Return: none
5841  */
5842 static void
mgmt_rx_reo_sim_frame_handler_host(void * arg)5843 mgmt_rx_reo_sim_frame_handler_host(void *arg)
5844 {
5845 	struct mgmt_rx_frame_fw *frame_fw = (struct mgmt_rx_frame_fw *)arg;
5846 	uint32_t fw_to_host_delay_us;
5847 	bool is_error_frame = false;
5848 	int8_t link_id = -1;
5849 	struct mgmt_rx_event_params *rx_params;
5850 	QDF_STATUS status;
5851 	struct mgmt_rx_reo_sim_context *sim_context;
5852 	struct wlan_objmgr_pdev *pdev;
5853 	uint8_t ml_grp_id;
5854 
5855 	if (!frame_fw) {
5856 		mgmt_rx_reo_err("HOST-%d : Pointer to FW frame struct is null",
5857 				link_id);
5858 		goto error_print;
5859 	}
5860 
5861 	link_id = frame_fw->params.link_id;
5862 
5863 	sim_context = frame_fw->sim_context;
5864 	if (!sim_context) {
5865 		mgmt_rx_reo_err("HOST-%d : Mgmt rx reo simulation context null",
5866 				link_id);
5867 		goto error_free_fw_frame;
5868 	}
5869 
5870 	ml_grp_id = sim_context->mlo_grp_id;
5871 
5872 	fw_to_host_delay_us = MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN +
5873 			      mgmt_rx_reo_sim_get_random_unsigned_int(
5874 			      MGMT_RX_REO_SIM_DELAY_FW_TO_HOST_MIN_MAX_DELTA);
5875 
5876 	mgmt_rx_reo_sim_sleep(fw_to_host_delay_us);
5877 
5878 	if (!frame_fw->is_consumed_by_fw) {
5879 		is_error_frame = mgmt_rx_reo_sim_get_random_bool(
5880 				 MGMT_RX_REO_SIM_PERCENTAGE_ERROR_FRAMES);
5881 
5882 		/**
5883 		 * This frame should be present in pending/stale list of the
5884 		 * master frame list. Error frames need not be reordered
5885 		 * by reorder algorithm. It is just used for book
5886 		 * keeping purposes. Hence remove it from the master list.
5887 		 */
5888 		if (is_error_frame) {
5889 			status = mgmt_rx_reo_sim_remove_frame_from_master_list(
5890 					&sim_context->master_frame_list,
5891 					&frame_fw->params);
5892 
5893 			if (QDF_IS_STATUS_ERROR(status)) {
5894 				mgmt_rx_reo_err("HOST-%d : Failed to remove error frame",
5895 						link_id);
5896 				qdf_assert_always(0);
5897 			}
5898 		}
5899 	}
5900 
5901 	mgmt_rx_reo_debug("HOST-%d : Received frame with ts = %u, ctr = %u, consume = %u, error = %u",
5902 			  link_id, frame_fw->params.global_timestamp,
5903 			  frame_fw->params.mgmt_pkt_ctr,
5904 			  frame_fw->is_consumed_by_fw, is_error_frame);
5905 
5906 	rx_params = alloc_mgmt_rx_event_params();
5907 	if (!rx_params) {
5908 		mgmt_rx_reo_err("HOST-%d : Failed to allocate event params",
5909 				link_id);
5910 		goto error_free_fw_frame;
5911 	}
5912 
5913 	rx_params->reo_params->link_id = frame_fw->params.link_id;
5914 	rx_params->reo_params->global_timestamp =
5915 					frame_fw->params.global_timestamp;
5916 	rx_params->reo_params->mgmt_pkt_ctr = frame_fw->params.mgmt_pkt_ctr;
5917 	rx_params->reo_params->valid = true;
5918 
5919 	pdev = wlan_get_pdev_from_mlo_link_id(
5920 			link_id, ml_grp_id, WLAN_MGMT_RX_REO_SIM_ID);
5921 	if (!pdev) {
5922 		mgmt_rx_reo_err("No pdev corresponding to link_id %d", link_id);
5923 		goto error_free_mgmt_rx_event_params;
5924 	}
5925 
5926 	if (is_error_frame) {
5927 		status = tgt_mgmt_rx_reo_host_drop_handler(
5928 						pdev, rx_params->reo_params);
5929 		free_mgmt_rx_event_params(rx_params);
5930 	} else if (frame_fw->is_consumed_by_fw) {
5931 		status = tgt_mgmt_rx_reo_fw_consumed_event_handler(
5932 						pdev, rx_params->reo_params);
5933 		free_mgmt_rx_event_params(rx_params);
5934 	} else {
5935 		status = tgt_mgmt_rx_reo_frame_handler(pdev, NULL, rx_params);
5936 	}
5937 
5938 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
5939 
5940 	if (QDF_IS_STATUS_ERROR(status)) {
5941 		mgmt_rx_reo_err("Failed to execute reo algorithm");
5942 		goto error_free_fw_frame;
5943 	}
5944 
5945 	qdf_mem_free(frame_fw);
5946 
5947 	return;
5948 
5949 error_free_mgmt_rx_event_params:
5950 	free_mgmt_rx_event_params(rx_params);
5951 error_free_fw_frame:
5952 	qdf_mem_free(frame_fw);
5953 error_print:
5954 	mgmt_rx_reo_err("HOST-%d : Exiting host frame handler due to error",
5955 			link_id);
5956 }
5957 
5958 /**
5959  * mgmt_rx_reo_sim_write_snapshot() - API to write snapshots used for management
5960  * frame reordering
5961  * @link_id: link id
5962  * @id: snapshot id
5963  * @value: snapshot value
5964  * @ml_grp_id: MLO group id which it belongs to
5965  *
5966  * This API writes the snapshots used for management frame reordering. MAC HW
5967  * and FW can use this API to update the MAC HW/FW consumed/FW forwarded
5968  * snapshots.
5969  *
5970  * Return: QDF_STATUS
5971  */
5972 static QDF_STATUS
mgmt_rx_reo_sim_write_snapshot(uint8_t link_id,uint8_t ml_grp_id,enum mgmt_rx_reo_shared_snapshot_id id,struct mgmt_rx_reo_shared_snapshot value)5973 mgmt_rx_reo_sim_write_snapshot(uint8_t link_id, uint8_t ml_grp_id,
5974 			       enum mgmt_rx_reo_shared_snapshot_id id,
5975 			       struct mgmt_rx_reo_shared_snapshot value)
5976 {
5977 	struct wlan_objmgr_pdev *pdev;
5978 	struct mgmt_rx_reo_shared_snapshot *snapshot_address;
5979 	QDF_STATUS status;
5980 
5981 	pdev = wlan_get_pdev_from_mlo_link_id(
5982 			link_id, ml_grp_id,
5983 			WLAN_MGMT_RX_REO_SIM_ID);
5984 
5985 	if (!pdev) {
5986 		mgmt_rx_reo_err("pdev is null");
5987 		return QDF_STATUS_E_NULL_VALUE;
5988 	}
5989 
5990 	status = mgmt_rx_reo_sim_get_snapshot_address(pdev, id,
5991 						      &snapshot_address);
5992 
5993 	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_SIM_ID);
5994 
5995 	if (QDF_IS_STATUS_ERROR(status)) {
5996 		mgmt_rx_reo_err("Failed to get snapshot address %d of pdev %pK",
5997 				id, pdev);
5998 		return QDF_STATUS_E_FAILURE;
5999 	}
6000 
6001 	snapshot_address->mgmt_rx_reo_snapshot_low =
6002 						value.mgmt_rx_reo_snapshot_low;
6003 	snapshot_address->mgmt_rx_reo_snapshot_high =
6004 						value.mgmt_rx_reo_snapshot_high;
6005 
6006 	return QDF_STATUS_SUCCESS;
6007 }
6008 
6009 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS                       (0)
6010 #define MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE                      (1)
6011 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS                (1)
6012 #define MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE               (16)
6013 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS            (17)
6014 #define MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE           (15)
6015 
6016 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS           (0)
6017 #define MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE          (17)
6018 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS     (17)
6019 #define MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE    (15)
6020 
6021 /**
6022  * mgmt_rx_reo_sim_get_snapshot_value() - API to get snapshot value for a given
6023  * management frame
6024  * @global_timestamp: global time stamp
6025  * @mgmt_pkt_ctr: management packet counter
6026  *
6027  * This API gets the snapshot value for a frame with time stamp
6028  * @global_timestamp and sequence number @mgmt_pkt_ctr.
6029  *
6030  * Return: snapshot value (struct mgmt_rx_reo_shared_snapshot)
6031  */
6032 static struct mgmt_rx_reo_shared_snapshot
mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,uint16_t mgmt_pkt_ctr)6033 mgmt_rx_reo_sim_get_snapshot_value(uint32_t global_timestamp,
6034 				   uint16_t mgmt_pkt_ctr)
6035 {
6036 	struct mgmt_rx_reo_shared_snapshot snapshot = {0};
6037 
6038 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
6039 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_POS,
6040 		     MGMT_RX_REO_SNAPSHOT_LOW_VALID_SIZE, 1);
6041 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
6042 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_POS,
6043 		     MGMT_RX_REO_SNAPSHOT_LOW_MGMT_PKT_CTR_SIZE, mgmt_pkt_ctr);
6044 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_low,
6045 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_POS,
6046 		     MGMT_RX_REO_SNAPSHOT_LOW_GLOBAL_TIMESTAMP_SIZE,
6047 		     global_timestamp);
6048 
6049 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
6050 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_POS,
6051 		     MGMT_RX_REO_SNAPSHOT_HIGH_GLOBAL_TIMESTAMP_SIZE,
6052 		     global_timestamp >> 15);
6053 	QDF_SET_BITS(snapshot.mgmt_rx_reo_snapshot_high,
6054 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_POS,
6055 		     MGMT_RX_REO_SNAPSHOT_HIGH_MGMT_PKT_CTR_REDUNDANT_SIZE,
6056 		     mgmt_pkt_ctr);
6057 
6058 	return snapshot;
6059 }
6060 
6061 /**
6062  * mgmt_rx_reo_sim_frame_handler_fw() - Management frame handler at the fw layer
6063  * @arg: Argument
6064  *
6065  * This API handles the management frame at the fw layer. This is applicable
6066  * for simulation alone.
6067  *
6068  * Return: none
6069  */
6070 static void
mgmt_rx_reo_sim_frame_handler_fw(void * arg)6071 mgmt_rx_reo_sim_frame_handler_fw(void *arg)
6072 {
6073 	struct mgmt_rx_frame_mac_hw *frame_hw =
6074 					(struct mgmt_rx_frame_mac_hw *)arg;
6075 	uint32_t mac_hw_to_fw_delay_us;
6076 	bool is_consumed_by_fw;
6077 	struct  mgmt_rx_frame_fw *frame_fw;
6078 	int8_t link_id = -1;
6079 	QDF_STATUS status;
6080 	struct mgmt_rx_reo_sim_context *sim_context;
6081 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
6082 	struct mgmt_rx_reo_shared_snapshot snapshot_value;
6083 	bool ret;
6084 	uint8_t ml_grp_id;
6085 
6086 	if (!frame_hw) {
6087 		mgmt_rx_reo_err("FW-%d : Pointer to HW frame struct is null",
6088 				link_id);
6089 		qdf_assert_always(0);
6090 	}
6091 
6092 	link_id = frame_hw->params.link_id;
6093 
6094 	sim_context = frame_hw->sim_context;
6095 	if (!sim_context) {
6096 		mgmt_rx_reo_err("FW-%d : Mgmt rx reo simulation context null",
6097 				link_id);
6098 		goto error_free_mac_hw_frame;
6099 	}
6100 
6101 	ml_grp_id = sim_context->mlo_grp_id;
6102 
6103 	mac_hw_to_fw_delay_us = MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN +
6104 			mgmt_rx_reo_sim_get_random_unsigned_int(
6105 			MGMT_RX_REO_SIM_DELAY_MAC_HW_TO_FW_MIN_MAX_DELTA);
6106 	mgmt_rx_reo_sim_sleep(mac_hw_to_fw_delay_us);
6107 
6108 	is_consumed_by_fw = mgmt_rx_reo_sim_get_random_bool(
6109 			    MGMT_RX_REO_SIM_PERCENTAGE_FW_CONSUMED_FRAMES);
6110 
6111 	if (is_consumed_by_fw) {
6112 		/**
6113 		 * This frame should be present in pending/stale list of the
6114 		 * master frame list. FW consumed frames need not be reordered
6115 		 * by reorder algorithm. It is just used for book
6116 		 * keeping purposes. Hence remove it from the master list.
6117 		 */
6118 		status = mgmt_rx_reo_sim_remove_frame_from_master_list(
6119 					&sim_context->master_frame_list,
6120 					&frame_hw->params);
6121 
6122 		if (QDF_IS_STATUS_ERROR(status)) {
6123 			mgmt_rx_reo_err("FW-%d : Failed to remove FW consumed frame",
6124 					link_id);
6125 			qdf_assert_always(0);
6126 		}
6127 	}
6128 
6129 	mgmt_rx_reo_debug("FW-%d : Processing frame with ts = %u, ctr = %u, consume = %u",
6130 			  link_id, frame_hw->params.global_timestamp,
6131 			  frame_hw->params.mgmt_pkt_ctr, is_consumed_by_fw);
6132 
6133 	frame_fw = qdf_mem_malloc(sizeof(*frame_fw));
6134 	if (!frame_fw) {
6135 		mgmt_rx_reo_err("FW-%d : Failed to allocate FW mgmt frame",
6136 				link_id);
6137 		goto error_free_mac_hw_frame;
6138 	}
6139 
6140 	frame_fw->params = frame_hw->params;
6141 	frame_fw->is_consumed_by_fw = is_consumed_by_fw;
6142 	frame_fw->sim_context = frame_hw->sim_context;
6143 
6144 	snapshot_id = is_consumed_by_fw ?
6145 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_CONSUMED :
6146 		      MGMT_RX_REO_SHARED_SNAPSHOT_FW_FORWARDED;
6147 
6148 	snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
6149 					frame_hw->params.global_timestamp,
6150 					frame_hw->params.mgmt_pkt_ctr);
6151 
6152 	status = mgmt_rx_reo_sim_write_snapshot(
6153 			link_id, ml_grp_id,
6154 			snapshot_id, snapshot_value);
6155 
6156 	if (QDF_IS_STATUS_ERROR(status)) {
6157 		mgmt_rx_reo_err("FW-%d : Failed to write snapshot %d",
6158 				link_id, snapshot_id);
6159 		goto error_free_fw_frame;
6160 	}
6161 
6162 	status = qdf_create_work(NULL, &frame_fw->frame_handler_host,
6163 				 mgmt_rx_reo_sim_frame_handler_host, frame_fw);
6164 	if (QDF_IS_STATUS_ERROR(status)) {
6165 		mgmt_rx_reo_err("FW-%d : Failed to create work", link_id);
6166 		goto error_free_fw_frame;
6167 	}
6168 
6169 	ret = qdf_queue_work(
6170 			NULL, sim_context->host_mgmt_frame_handler[link_id],
6171 			&frame_fw->frame_handler_host);
6172 	if (!ret) {
6173 		mgmt_rx_reo_err("FW-%d : Work is already present on the queue",
6174 				link_id);
6175 		goto error_free_fw_frame;
6176 	}
6177 
6178 	qdf_mem_free(frame_hw);
6179 
6180 	return;
6181 
6182 error_free_fw_frame:
6183 	qdf_mem_free(frame_fw);
6184 error_free_mac_hw_frame:
6185 	qdf_mem_free(frame_hw);
6186 
6187 	mgmt_rx_reo_err("FW-%d : Exiting fw frame handler due to error",
6188 			link_id);
6189 }
6190 
6191 /**
6192  * mgmt_rx_reo_sim_get_link_id() - Helper API to get the link id value
6193  * from the index to the valid link list
6194  * @valid_link_list_index: Index to list of valid links
6195  *
6196  * Return: link id
6197  */
6198 static int8_t
mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)6199 mgmt_rx_reo_sim_get_link_id(uint8_t valid_link_list_index)
6200 {
6201 	struct mgmt_rx_reo_sim_context *sim_context;
6202 
6203 	if (valid_link_list_index >= MAX_MLO_LINKS) {
6204 		mgmt_rx_reo_err("Invalid index %u to valid link list",
6205 				valid_link_list_index);
6206 		return MGMT_RX_REO_INVALID_LINK;
6207 	}
6208 
6209 	sim_context = mgmt_rx_reo_sim_get_context();
6210 	if (!sim_context) {
6211 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
6212 		return MGMT_RX_REO_INVALID_LINK;
6213 	}
6214 
6215 	return sim_context->link_id_to_pdev_map.valid_link_list
6216 						[valid_link_list_index];
6217 }
6218 
6219 /**
6220  * mgmt_rx_reo_sim_receive_from_air() - Simulate management frame reception from
6221  * the air
6222  * @mac_hw: pointer to structure representing MAC HW
6223  * @num_mlo_links: number of MLO HW links
6224  * @frame: pointer to management frame parameters
6225  *
6226  * This API simulates the management frame reception from air.
6227  *
6228  * Return: QDF_STATUS
6229  */
6230 static QDF_STATUS
mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw * mac_hw,uint8_t num_mlo_links,struct mgmt_rx_frame_params * frame)6231 mgmt_rx_reo_sim_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
6232 				 uint8_t num_mlo_links,
6233 				 struct mgmt_rx_frame_params *frame)
6234 {
6235 	uint8_t valid_link_list_index;
6236 	int8_t link_id;
6237 
6238 	if (!mac_hw) {
6239 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
6240 		return QDF_STATUS_E_NULL_VALUE;
6241 	}
6242 
6243 	if (num_mlo_links == 0 || num_mlo_links > MAX_MLO_LINKS) {
6244 		mgmt_rx_reo_err("Invalid number of MLO links %u",
6245 				num_mlo_links);
6246 		return QDF_STATUS_E_INVAL;
6247 	}
6248 
6249 	if (!frame) {
6250 		mgmt_rx_reo_err("pointer to frame parameters is null");
6251 		return QDF_STATUS_E_NULL_VALUE;
6252 	}
6253 
6254 	valid_link_list_index = mgmt_rx_reo_sim_get_random_unsigned_int(
6255 							num_mlo_links);
6256 	link_id = mgmt_rx_reo_sim_get_link_id(valid_link_list_index);
6257 	qdf_assert_always(link_id >= 0);
6258 	qdf_assert_always(link_id < MAX_MLO_LINKS);
6259 
6260 	frame->global_timestamp = div_u64(ktime_get_ns(), NSEC_PER_USEC);
6261 	frame->mgmt_pkt_ctr = ++mac_hw->mgmt_pkt_ctr[link_id];
6262 	frame->link_id = link_id;
6263 
6264 	return QDF_STATUS_SUCCESS;
6265 }
6266 
6267 /**
6268  * mgmt_rx_reo_sim_undo_receive_from_air() - API to restore the state of MAC
6269  * HW in case of any Rx error.
6270  * @mac_hw: pointer to structure representing MAC HW
6271  * @frame: pointer to management frame parameters
6272  *
6273  * Return: QDF_STATUS
6274  */
6275 static QDF_STATUS
mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw * mac_hw,struct mgmt_rx_frame_params * frame)6276 mgmt_rx_reo_sim_undo_receive_from_air(struct mgmt_rx_reo_sim_mac_hw *mac_hw,
6277 				      struct mgmt_rx_frame_params *frame)
6278 {
6279 	if (!mac_hw) {
6280 		mgmt_rx_reo_err("pointer to MAC HW struct is null");
6281 		return QDF_STATUS_E_NULL_VALUE;
6282 	}
6283 
6284 	if (!frame) {
6285 		mgmt_rx_reo_err("pointer to frame parameters is null");
6286 		return QDF_STATUS_E_NULL_VALUE;
6287 	}
6288 
6289 	if (frame->link_id >= MAX_MLO_LINKS) {
6290 		mgmt_rx_reo_err("Invalid link id %u", frame->link_id);
6291 		return QDF_STATUS_E_INVAL;
6292 	}
6293 
6294 	--mac_hw->mgmt_pkt_ctr[frame->link_id];
6295 
6296 	return QDF_STATUS_SUCCESS;
6297 }
6298 
6299 /**
6300  * mgmt_rx_reo_sim_mac_hw_thread() - kthread to simulate MAC HW
6301  * @data: pointer to data input
6302  *
6303  * kthread handler to simulate MAC HW.
6304  *
6305  * Return: 0 for success, else failure
6306  */
6307 static int
mgmt_rx_reo_sim_mac_hw_thread(void * data)6308 mgmt_rx_reo_sim_mac_hw_thread(void *data)
6309 {
6310 	struct mgmt_rx_reo_sim_context *sim_context = data;
6311 	struct mgmt_rx_reo_sim_mac_hw *mac_hw;
6312 
6313 	if (!sim_context) {
6314 		mgmt_rx_reo_err("HW: Mgmt rx reo simulation context is null");
6315 		return -EINVAL;
6316 	}
6317 
6318 	mac_hw = &sim_context->mac_hw_sim.mac_hw_info;
6319 
6320 	while (!qdf_thread_should_stop()) {
6321 		uint32_t inter_frame_delay_us;
6322 		struct mgmt_rx_frame_params frame;
6323 		struct mgmt_rx_frame_mac_hw *frame_mac_hw;
6324 		int8_t link_id = -1;
6325 		QDF_STATUS status;
6326 		enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
6327 		struct mgmt_rx_reo_shared_snapshot snapshot_value;
6328 		int8_t num_mlo_links;
6329 		bool ret;
6330 		uint8_t ml_grp_id;
6331 
6332 		num_mlo_links = mgmt_rx_reo_sim_get_num_mlo_links(sim_context);
6333 		if (num_mlo_links < 0 ||
6334 		    num_mlo_links > MAX_MLO_LINKS) {
6335 			mgmt_rx_reo_err("Invalid number of MLO links %d",
6336 					num_mlo_links);
6337 			qdf_assert_always(0);
6338 		}
6339 
6340 		status = mgmt_rx_reo_sim_receive_from_air(mac_hw, num_mlo_links,
6341 							  &frame);
6342 		if (QDF_IS_STATUS_ERROR(status)) {
6343 			mgmt_rx_reo_err("Receive from the air failed");
6344 			/**
6345 			 * Frame reception failed and we are not sure about the
6346 			 * link id. Without link id there is no way to restore
6347 			 * the mac hw state. Hence assert unconditionally.
6348 			 */
6349 			qdf_assert_always(0);
6350 		}
6351 		link_id = frame.link_id;
6352 
6353 		mgmt_rx_reo_debug("HW-%d: received frame with ts = %u, ctr = %u",
6354 				  link_id, frame.global_timestamp,
6355 				  frame.mgmt_pkt_ctr);
6356 
6357 		frame_mac_hw = qdf_mem_malloc(sizeof(*frame_mac_hw));
6358 		if (!frame_mac_hw) {
6359 			mgmt_rx_reo_err("HW-%d: Failed to alloc mac hw frame",
6360 					link_id);
6361 
6362 			/* Cleanup */
6363 			status = mgmt_rx_reo_sim_undo_receive_from_air(
6364 								mac_hw, &frame);
6365 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
6366 
6367 			continue;
6368 		}
6369 
6370 		frame_mac_hw->params = frame;
6371 		frame_mac_hw->sim_context = sim_context;
6372 		ml_grp_id = sim_context->ml_grp_id;
6373 
6374 		status = mgmt_rx_reo_sim_add_frame_to_pending_list(
6375 				&sim_context->master_frame_list, &frame);
6376 		if (QDF_IS_STATUS_ERROR(status)) {
6377 			mgmt_rx_reo_err("HW-%d: Failed to add frame to list",
6378 					link_id);
6379 
6380 			/* Cleanup */
6381 			status = mgmt_rx_reo_sim_undo_receive_from_air(
6382 								mac_hw, &frame);
6383 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
6384 
6385 			qdf_mem_free(frame_mac_hw);
6386 
6387 			continue;
6388 		}
6389 
6390 		snapshot_id = MGMT_RX_REO_SHARED_SNAPSHOT_MAC_HW;
6391 		snapshot_value = mgmt_rx_reo_sim_get_snapshot_value(
6392 						frame.global_timestamp,
6393 						frame.mgmt_pkt_ctr);
6394 
6395 		status = mgmt_rx_reo_sim_write_snapshot(
6396 				link_id, ml_grp_id
6397 				snapshot_id, snapshot_value);
6398 		if (QDF_IS_STATUS_ERROR(status)) {
6399 			mgmt_rx_reo_err("HW-%d : Failed to write snapshot %d",
6400 					link_id, snapshot_id);
6401 
6402 			/* Cleanup */
6403 			status = mgmt_rx_reo_sim_remove_frame_from_pending_list(
6404 				&sim_context->master_frame_list, &frame);
6405 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
6406 
6407 			status = mgmt_rx_reo_sim_undo_receive_from_air(
6408 								mac_hw, &frame);
6409 			qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
6410 
6411 			qdf_mem_free(frame_mac_hw);
6412 
6413 			continue;
6414 		}
6415 
6416 		status = qdf_create_work(NULL, &frame_mac_hw->frame_handler_fw,
6417 					 mgmt_rx_reo_sim_frame_handler_fw,
6418 					 frame_mac_hw);
6419 		if (QDF_IS_STATUS_ERROR(status)) {
6420 			mgmt_rx_reo_err("HW-%d : Failed to create work",
6421 					link_id);
6422 			qdf_assert_always(0);
6423 		}
6424 
6425 		ret = qdf_queue_work(
6426 			NULL, sim_context->fw_mgmt_frame_handler[link_id],
6427 			&frame_mac_hw->frame_handler_fw);
6428 		if (!ret) {
6429 			mgmt_rx_reo_err("HW-%d : Work is already present in Q",
6430 					link_id);
6431 			qdf_assert_always(0);
6432 		}
6433 
6434 		inter_frame_delay_us = MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN +
6435 			mgmt_rx_reo_sim_get_random_unsigned_int(
6436 			MGMT_RX_REO_SIM_INTER_FRAME_DELAY_MIN_MAX_DELTA);
6437 
6438 		mgmt_rx_reo_sim_sleep(inter_frame_delay_us);
6439 	}
6440 
6441 	return 0;
6442 }
6443 
6444 /**
6445  * mgmt_rx_reo_sim_init_master_frame_list() - Initializes the master
6446  * management frame list
6447  * @master_frame_list: Pointer to master frame list
6448  *
6449  * This API initializes the master management frame list
6450  *
6451  * Return: QDF_STATUS
6452  */
6453 static QDF_STATUS
mgmt_rx_reo_sim_init_master_frame_list(struct mgmt_rx_reo_master_frame_list * master_frame_list)6454 mgmt_rx_reo_sim_init_master_frame_list(
6455 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
6456 {
6457 	qdf_spinlock_create(&master_frame_list->lock);
6458 
6459 	qdf_list_create(&master_frame_list->pending_list,
6460 			MGMT_RX_REO_SIM_PENDING_FRAME_LIST_MAX_SIZE);
6461 	qdf_list_create(&master_frame_list->stale_list,
6462 			MGMT_RX_REO_SIM_STALE_FRAME_LIST_MAX_SIZE);
6463 
6464 	return QDF_STATUS_SUCCESS;
6465 }
6466 
6467 /**
6468  * mgmt_rx_reo_sim_deinit_master_frame_list() - De initializes the master
6469  * management frame list
6470  * @master_frame_list: Pointer to master frame list
6471  *
6472  * This API de initializes the master management frame list
6473  *
6474  * Return: QDF_STATUS
6475  */
6476 static QDF_STATUS
mgmt_rx_reo_sim_deinit_master_frame_list(struct mgmt_rx_reo_master_frame_list * master_frame_list)6477 mgmt_rx_reo_sim_deinit_master_frame_list(
6478 		struct mgmt_rx_reo_master_frame_list *master_frame_list)
6479 {
6480 	qdf_spin_lock(&master_frame_list->lock);
6481 	qdf_list_destroy(&master_frame_list->stale_list);
6482 	qdf_list_destroy(&master_frame_list->pending_list);
6483 	qdf_spin_unlock(&master_frame_list->lock);
6484 
6485 	qdf_spinlock_destroy(&master_frame_list->lock);
6486 
6487 	return QDF_STATUS_SUCCESS;
6488 }
6489 
6490 /**
6491  * mgmt_rx_reo_sim_generate_unique_link_id() - Helper API to generate
6492  * unique link id values
6493  * @link_id_to_pdev_map: pointer to link id to pdev map
6494  * @link_id: Pointer to unique link id
6495  *
6496  * This API generates unique link id values for each pdev. This API should be
6497  * called after acquiring the spin lock protecting link id to pdev map.
6498  *
6499  * Return: QDF_STATUS
6500  */
6501 static QDF_STATUS
mgmt_rx_reo_sim_generate_unique_link_id(struct wlan_objmgr_pdev ** link_id_to_pdev_map,uint8_t * link_id)6502 mgmt_rx_reo_sim_generate_unique_link_id(
6503 		struct wlan_objmgr_pdev **link_id_to_pdev_map, uint8_t *link_id)
6504 {
6505 	uint8_t random_link_id;
6506 	uint8_t link;
6507 
6508 	if (!link_id_to_pdev_map || !link_id)
6509 		return QDF_STATUS_E_NULL_VALUE;
6510 
6511 	for (link = 0; link < MAX_MLO_LINKS; link++)
6512 		if (!link_id_to_pdev_map[link])
6513 			break;
6514 
6515 	if (link == MAX_MLO_LINKS) {
6516 		mgmt_rx_reo_err("All link ids are already allocated");
6517 		return QDF_STATUS_E_FAILURE;
6518 	}
6519 
6520 	while (1) {
6521 		random_link_id = mgmt_rx_reo_sim_get_random_unsigned_int(
6522 							MAX_MLO_LINKS);
6523 
6524 		if (!link_id_to_pdev_map[random_link_id])
6525 			break;
6526 	}
6527 
6528 	*link_id = random_link_id;
6529 
6530 	return QDF_STATUS_SUCCESS;
6531 }
6532 
6533 /**
6534  * mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map() - Builds the MLO HW link id
6535  * to pdev map
6536  * @link_id_to_pdev_map: pointer to link id to pdev map
6537  * @pdev: pointer to pdev object
6538  *
6539  * This API incrementally builds the MLO HW link id to pdev map. This API is
6540  * used only for simulation.
6541  *
6542  * Return: QDF_STATUS
6543  */
6544 static QDF_STATUS
mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(struct mgmt_rx_reo_sim_link_id_to_pdev_map * link_id_to_pdev_map,struct wlan_objmgr_pdev * pdev)6545 mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
6546 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
6547 		struct wlan_objmgr_pdev *pdev)
6548 {
6549 	uint8_t link_id;
6550 	QDF_STATUS status;
6551 
6552 	if (!link_id_to_pdev_map) {
6553 		mgmt_rx_reo_err("Link id to pdev map is null");
6554 		return QDF_STATUS_E_NULL_VALUE;
6555 	}
6556 
6557 	if (!pdev) {
6558 		mgmt_rx_reo_err("pdev is null");
6559 		return QDF_STATUS_E_NULL_VALUE;
6560 	}
6561 
6562 	qdf_spin_lock(&link_id_to_pdev_map->lock);
6563 
6564 	status = mgmt_rx_reo_sim_generate_unique_link_id(
6565 					link_id_to_pdev_map->map, &link_id);
6566 	if (QDF_IS_STATUS_ERROR(status)) {
6567 		qdf_spin_unlock(&link_id_to_pdev_map->lock);
6568 		return QDF_STATUS_E_FAILURE;
6569 	}
6570 	qdf_assert_always(link_id < MAX_MLO_LINKS);
6571 
6572 	link_id_to_pdev_map->map[link_id] = pdev;
6573 	link_id_to_pdev_map->valid_link_list
6574 			[link_id_to_pdev_map->num_mlo_links] = link_id;
6575 	link_id_to_pdev_map->num_mlo_links++;
6576 
6577 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
6578 
6579 	return QDF_STATUS_SUCCESS;
6580 }
6581 
6582 /**
6583  * mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map() - Destroys the MLO HW link
6584  * id to pdev map
6585  * @link_id_to_pdev_map: pointer to link id to pdev map
6586  * @pdev: pointer to pdev object
6587  *
6588  * This API incrementally destroys the MLO HW link id to pdev map. This API is
6589  * used only for simulation.
6590  *
6591  * Return: QDF_STATUS
6592  */
6593 static QDF_STATUS
mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(struct mgmt_rx_reo_sim_link_id_to_pdev_map * link_id_to_pdev_map,struct wlan_objmgr_pdev * pdev)6594 mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
6595 		struct mgmt_rx_reo_sim_link_id_to_pdev_map *link_id_to_pdev_map,
6596 		struct wlan_objmgr_pdev *pdev)
6597 {
6598 	uint8_t link_id;
6599 
6600 	if (!link_id_to_pdev_map) {
6601 		mgmt_rx_reo_err("Link id to pdev map is null");
6602 		return QDF_STATUS_E_NULL_VALUE;
6603 	}
6604 
6605 	if (!pdev) {
6606 		mgmt_rx_reo_err("pdev is null");
6607 		return QDF_STATUS_E_NULL_VALUE;
6608 	}
6609 
6610 	qdf_spin_lock(&link_id_to_pdev_map->lock);
6611 
6612 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
6613 		if (link_id_to_pdev_map->map[link_id] == pdev) {
6614 			link_id_to_pdev_map->map[link_id] = NULL;
6615 			qdf_spin_unlock(&link_id_to_pdev_map->lock);
6616 
6617 			return QDF_STATUS_SUCCESS;
6618 		}
6619 	}
6620 
6621 	qdf_spin_unlock(&link_id_to_pdev_map->lock);
6622 
6623 	mgmt_rx_reo_err("Pdev %pK is not found in map", pdev);
6624 
6625 	return QDF_STATUS_E_FAILURE;
6626 }
6627 
6628 QDF_STATUS
mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev * pdev)6629 mgmt_rx_reo_sim_pdev_object_create_notification(struct wlan_objmgr_pdev *pdev)
6630 {
6631 	struct mgmt_rx_reo_sim_context *sim_context;
6632 	QDF_STATUS status;
6633 
6634 	sim_context = mgmt_rx_reo_sim_get_context();
6635 	if (!sim_context) {
6636 		mgmt_rx_reo_err("Mgmt simulation context is null");
6637 		return QDF_STATUS_E_NULL_VALUE;
6638 	}
6639 
6640 	status = mgmt_rx_reo_sim_insert_into_link_id_to_pdev_map(
6641 				&sim_context->link_id_to_pdev_map, pdev);
6642 
6643 	if (QDF_IS_STATUS_ERROR(status)) {
6644 		mgmt_rx_reo_err("Failed to add pdev to the map %pK", pdev);
6645 		return status;
6646 	}
6647 
6648 	return QDF_STATUS_SUCCESS;
6649 }
6650 
6651 QDF_STATUS
mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev * pdev)6652 mgmt_rx_reo_sim_pdev_object_destroy_notification(struct wlan_objmgr_pdev *pdev)
6653 {
6654 	struct mgmt_rx_reo_sim_context *sim_context;
6655 	QDF_STATUS status;
6656 
6657 	sim_context = mgmt_rx_reo_sim_get_context();
6658 	if (!sim_context) {
6659 		mgmt_rx_reo_err("Mgmt simulation context is null");
6660 		return QDF_STATUS_E_NULL_VALUE;
6661 	}
6662 
6663 	status = mgmt_rx_reo_sim_remove_from_link_id_to_pdev_map(
6664 				&sim_context->link_id_to_pdev_map, pdev);
6665 
6666 	if (QDF_IS_STATUS_ERROR(status)) {
6667 		mgmt_rx_reo_err("Failed to remove pdev from the map");
6668 		return status;
6669 	}
6670 
6671 	return QDF_STATUS_SUCCESS;
6672 }
6673 
6674 QDF_STATUS
mgmt_rx_reo_sim_start(uint8_t ml_grp_id)6675 mgmt_rx_reo_sim_start(uint8_t ml_grp_id)
6676 {
6677 	struct mgmt_rx_reo_context *reo_context;
6678 	struct mgmt_rx_reo_sim_context *sim_context;
6679 	qdf_thread_t *mac_hw_thread;
6680 	uint8_t link_id;
6681 	uint8_t id;
6682 	QDF_STATUS status;
6683 
6684 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6685 	if (!reo_context) {
6686 		mgmt_rx_reo_err("reo context is null");
6687 		return QDF_STATUS_E_NULL_VALUE;
6688 	}
6689 
6690 	reo_context->simulation_in_progress = true;
6691 
6692 	sim_context = &reo_context->sim_context;
6693 
6694 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
6695 		struct workqueue_struct *wq;
6696 
6697 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_host-%u", 0,
6698 					     link_id);
6699 		if (!wq) {
6700 			mgmt_rx_reo_err("Host workqueue creation failed");
6701 			status = QDF_STATUS_E_FAILURE;
6702 			goto error_destroy_fw_and_host_work_queues_till_last_link;
6703 		}
6704 		sim_context->host_mgmt_frame_handler[link_id] = wq;
6705 
6706 		wq = alloc_ordered_workqueue("mgmt_rx_reo_sim_fw-%u", 0,
6707 					     link_id);
6708 		if (!wq) {
6709 			mgmt_rx_reo_err("FW workqueue creation failed");
6710 			status = QDF_STATUS_E_FAILURE;
6711 			goto error_destroy_host_work_queue_of_last_link;
6712 		}
6713 		sim_context->fw_mgmt_frame_handler[link_id] = wq;
6714 	}
6715 
6716 	mac_hw_thread = qdf_create_thread(mgmt_rx_reo_sim_mac_hw_thread,
6717 					  sim_context, "MAC_HW_thread");
6718 	if (!mac_hw_thread) {
6719 		mgmt_rx_reo_err("MAC HW thread creation failed");
6720 		status = QDF_STATUS_E_FAILURE;
6721 		goto error_destroy_fw_and_host_work_queues_of_last_link;
6722 	}
6723 
6724 	sim_context->mac_hw_sim.mac_hw_thread = mac_hw_thread;
6725 
6726 	qdf_wake_up_process(sim_context->mac_hw_sim.mac_hw_thread);
6727 
6728 	return QDF_STATUS_SUCCESS;
6729 
6730 error_destroy_fw_and_host_work_queues_of_last_link:
6731 	drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6732 	destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6733 
6734 error_destroy_host_work_queue_of_last_link:
6735 	drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
6736 	destroy_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
6737 
6738 error_destroy_fw_and_host_work_queues_till_last_link:
6739 	for (id = 0; id < link_id; id++) {
6740 		drain_workqueue(sim_context->fw_mgmt_frame_handler[id]);
6741 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[id]);
6742 
6743 		drain_workqueue(sim_context->host_mgmt_frame_handler[id]);
6744 		destroy_workqueue(sim_context->host_mgmt_frame_handler[id]);
6745 	}
6746 
6747 	return status;
6748 }
6749 
6750 QDF_STATUS
mgmt_rx_reo_sim_stop(uint8_t ml_grp_id)6751 mgmt_rx_reo_sim_stop(uint8_t ml_grp_id)
6752 {
6753 	struct mgmt_rx_reo_context *reo_context;
6754 	struct mgmt_rx_reo_sim_context *sim_context;
6755 	struct mgmt_rx_reo_master_frame_list *master_frame_list;
6756 	uint8_t link_id;
6757 	QDF_STATUS status;
6758 
6759 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
6760 	if (!reo_context) {
6761 		mgmt_rx_reo_err("reo context is null");
6762 		return QDF_STATUS_E_NULL_VALUE;
6763 	}
6764 
6765 	sim_context = &reo_context->sim_context;
6766 
6767 	status = qdf_thread_join(sim_context->mac_hw_sim.mac_hw_thread);
6768 	if (QDF_IS_STATUS_ERROR(status)) {
6769 		mgmt_rx_reo_err("Failed to stop the thread");
6770 		return status;
6771 	}
6772 
6773 	sim_context->mac_hw_sim.mac_hw_thread = NULL;
6774 
6775 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
6776 		/* Wait for all the pending frames to be processed by FW */
6777 		drain_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6778 		destroy_workqueue(sim_context->fw_mgmt_frame_handler[link_id]);
6779 
6780 		/* Wait for all the pending frames to be processed by host */
6781 		drain_workqueue(sim_context->host_mgmt_frame_handler[link_id]);
6782 		destroy_workqueue(
6783 				sim_context->host_mgmt_frame_handler[link_id]);
6784 	}
6785 
6786 	status = mgmt_rx_reo_print_ingress_frame_info
6787 			(MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
6788 	if (QDF_IS_STATUS_ERROR(status)) {
6789 		mgmt_rx_reo_err("Failed to print ingress frame debug info");
6790 		return status;
6791 	}
6792 
6793 	status = mgmt_rx_reo_print_egress_frame_info
6794 			(MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_PRINT_MAX_FRAMES);
6795 	if (QDF_IS_STATUS_ERROR(status)) {
6796 		mgmt_rx_reo_err("Failed to print egress frame debug info");
6797 		return status;
6798 	}
6799 
6800 	master_frame_list = &sim_context->master_frame_list;
6801 	if (!qdf_list_empty(&master_frame_list->pending_list) ||
6802 	    !qdf_list_empty(&master_frame_list->stale_list)) {
6803 		mgmt_rx_reo_err("reo sim failure: pending/stale frame list non empty");
6804 
6805 		status = mgmt_rx_reo_list_display(&reo_context->reo_list);
6806 		if (QDF_IS_STATUS_ERROR(status)) {
6807 			mgmt_rx_reo_err("Failed to print reorder list");
6808 			return status;
6809 		}
6810 
6811 		qdf_assert_always(0);
6812 	} else {
6813 		mgmt_rx_reo_err("reo sim passed");
6814 	}
6815 
6816 	reo_context->simulation_in_progress = false;
6817 
6818 	return QDF_STATUS_SUCCESS;
6819 }
6820 
6821 /**
6822  * mgmt_rx_reo_sim_init() - Initialize management rx reorder simulation
6823  * context.
6824  * @reo_context: Pointer to reo context
6825  *
6826  * Return: QDF_STATUS of operation
6827  */
6828 static QDF_STATUS
mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context * reo_context)6829 mgmt_rx_reo_sim_init(struct mgmt_rx_reo_context *reo_context)
6830 {
6831 	QDF_STATUS status;
6832 	struct mgmt_rx_reo_sim_context *sim_context;
6833 	uint8_t link_id;
6834 
6835 	if (!reo_context) {
6836 		mgmt_rx_reo_err("reo context is null");
6837 		return QDF_STATUS_E_NULL_VALUE;
6838 	}
6839 
6840 	sim_context = &reo_context->sim_context;
6841 
6842 	qdf_mem_zero(sim_context, sizeof(*sim_context));
6843 	sim_context->mlo_grp_id = reo_context->mlo_grp_id;
6844 
6845 	status = mgmt_rx_reo_sim_init_master_frame_list(
6846 					&sim_context->master_frame_list);
6847 	if (QDF_IS_STATUS_ERROR(status)) {
6848 		mgmt_rx_reo_err("Failed to create master mgmt frame list");
6849 		return status;
6850 	}
6851 
6852 	qdf_spinlock_create(&sim_context->link_id_to_pdev_map.lock);
6853 
6854 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
6855 		sim_context->link_id_to_pdev_map.valid_link_list[link_id] =
6856 					MGMT_RX_REO_INVALID_LINK;
6857 
6858 	return QDF_STATUS_SUCCESS;
6859 }
6860 
6861 /**
6862  * mgmt_rx_reo_sim_deinit() - De initialize management rx reorder simulation
6863  * context.
6864  * @reo_context: Pointer to reo context
6865  *
6866  * Return: QDF_STATUS of operation
6867  */
6868 static QDF_STATUS
mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context * reo_context)6869 mgmt_rx_reo_sim_deinit(struct mgmt_rx_reo_context *reo_context)
6870 {
6871 	QDF_STATUS status;
6872 	struct mgmt_rx_reo_sim_context *sim_context;
6873 
6874 	if (!reo_context) {
6875 		mgmt_rx_reo_err("reo context is null");
6876 		return QDF_STATUS_E_NULL_VALUE;
6877 	}
6878 
6879 	sim_context = &reo_context->sim_context;
6880 
6881 	qdf_spinlock_destroy(&sim_context->link_id_to_pdev_map.lock);
6882 
6883 	status = mgmt_rx_reo_sim_deinit_master_frame_list(
6884 					&sim_context->master_frame_list);
6885 	if (QDF_IS_STATUS_ERROR(status)) {
6886 		mgmt_rx_reo_err("Failed to destroy master frame list");
6887 		return status;
6888 	}
6889 
6890 	return QDF_STATUS_SUCCESS;
6891 }
6892 
6893 QDF_STATUS
mgmt_rx_reo_sim_get_snapshot_address(struct wlan_objmgr_pdev * pdev,enum mgmt_rx_reo_shared_snapshot_id id,struct mgmt_rx_reo_shared_snapshot ** address)6894 mgmt_rx_reo_sim_get_snapshot_address(
6895 			struct wlan_objmgr_pdev *pdev,
6896 			enum mgmt_rx_reo_shared_snapshot_id id,
6897 			struct mgmt_rx_reo_shared_snapshot **address)
6898 {
6899 	int8_t link_id;
6900 	struct mgmt_rx_reo_sim_context *sim_context;
6901 
6902 	sim_context = mgmt_rx_reo_sim_get_context();
6903 	if (!sim_context) {
6904 		mgmt_rx_reo_err("Mgmt reo simulation context is null");
6905 		return QDF_STATUS_E_NULL_VALUE;
6906 	}
6907 
6908 	if (!pdev) {
6909 		mgmt_rx_reo_err("pdev is NULL");
6910 		return QDF_STATUS_E_NULL_VALUE;
6911 	}
6912 
6913 	if (id < 0 || id >= MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
6914 		mgmt_rx_reo_err("Invalid snapshot ID %d", id);
6915 		return QDF_STATUS_E_INVAL;
6916 	}
6917 
6918 	if (!address) {
6919 		mgmt_rx_reo_err("Pointer to snapshot address is null");
6920 		return QDF_STATUS_E_NULL_VALUE;
6921 	}
6922 
6923 	link_id = wlan_get_mlo_link_id_from_pdev(pdev);
6924 	if (link_id < 0 || link_id >= MAX_MLO_LINKS) {
6925 		mgmt_rx_reo_err("Invalid link id %d for the pdev %pK", link_id,
6926 				pdev);
6927 		return QDF_STATUS_E_INVAL;
6928 	}
6929 
6930 	*address = &sim_context->snapshot[link_id][id];
6931 
6932 	return QDF_STATUS_SUCCESS;
6933 }
6934 #endif /* WLAN_MGMT_RX_REO_SIM_SUPPORT */
6935 
6936 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
6937 /**
6938  * mgmt_rx_reo_ingress_debug_info_init() - Initialize the management rx-reorder
6939  * ingress frame debug info
6940  * @psoc: Pointer to psoc
6941  * @ingress_debug_info_init_count: Initialization count
6942  * @ingress_frame_debug_info: Ingress frame debug info object
6943  *
6944  * API to initialize the management rx-reorder ingress frame debug info.
6945  *
6946  * Return: QDF_STATUS
6947  */
6948 static QDF_STATUS
mgmt_rx_reo_ingress_debug_info_init(struct wlan_objmgr_psoc * psoc,qdf_atomic_t * ingress_debug_info_init_count,struct reo_ingress_debug_info * ingress_frame_debug_info)6949 mgmt_rx_reo_ingress_debug_info_init
6950 		(struct wlan_objmgr_psoc *psoc,
6951 		 qdf_atomic_t *ingress_debug_info_init_count,
6952 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
6953 {
6954 	if (!psoc) {
6955 		mgmt_rx_reo_err("psoc is null");
6956 		return QDF_STATUS_E_NULL_VALUE;
6957 	}
6958 
6959 	if (!ingress_frame_debug_info) {
6960 		mgmt_rx_reo_err("Ingress frame debug info is null");
6961 		return QDF_STATUS_E_NULL_VALUE;
6962 	}
6963 
6964 	/* We need to initialize only for the first invocation */
6965 	if (qdf_atomic_read(ingress_debug_info_init_count))
6966 		goto success;
6967 
6968 	ingress_frame_debug_info->frame_list_size =
6969 		wlan_mgmt_rx_reo_get_ingress_frame_debug_list_size(psoc);
6970 
6971 	if (ingress_frame_debug_info->frame_list_size) {
6972 		ingress_frame_debug_info->frame_list = qdf_mem_malloc
6973 			(ingress_frame_debug_info->frame_list_size *
6974 			 sizeof(*ingress_frame_debug_info->frame_list));
6975 
6976 		if (!ingress_frame_debug_info->frame_list) {
6977 			mgmt_rx_reo_err("Failed to allocate debug info");
6978 			return QDF_STATUS_E_NOMEM;
6979 		}
6980 	}
6981 
6982 	/* Initialize the string for storing the debug info table boarder */
6983 	qdf_mem_set(ingress_frame_debug_info->boarder,
6984 		    MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
6985 
6986 success:
6987 	qdf_atomic_inc(ingress_debug_info_init_count);
6988 	return QDF_STATUS_SUCCESS;
6989 }
6990 
6991 /**
6992  * mgmt_rx_reo_egress_debug_info_init() - Initialize the management rx-reorder
6993  * egress frame debug info
6994  * @psoc: Pointer to psoc
6995  * @egress_debug_info_init_count: Initialization count
6996  * @egress_frame_debug_info: Egress frame debug info object
6997  *
6998  * API to initialize the management rx-reorder egress frame debug info.
6999  *
7000  * Return: QDF_STATUS
7001  */
7002 static QDF_STATUS
mgmt_rx_reo_egress_debug_info_init(struct wlan_objmgr_psoc * psoc,qdf_atomic_t * egress_debug_info_init_count,struct reo_egress_debug_info * egress_frame_debug_info)7003 mgmt_rx_reo_egress_debug_info_init
7004 		(struct wlan_objmgr_psoc *psoc,
7005 		 qdf_atomic_t *egress_debug_info_init_count,
7006 		 struct reo_egress_debug_info *egress_frame_debug_info)
7007 {
7008 	if (!psoc) {
7009 		mgmt_rx_reo_err("psoc is null");
7010 		return QDF_STATUS_E_NULL_VALUE;
7011 	}
7012 
7013 	if (!egress_frame_debug_info) {
7014 		mgmt_rx_reo_err("Egress frame debug info is null");
7015 		return QDF_STATUS_E_NULL_VALUE;
7016 	}
7017 
7018 	/* We need to initialize only for the first invocation */
7019 	if (qdf_atomic_read(egress_debug_info_init_count))
7020 		goto success;
7021 
7022 	egress_frame_debug_info->frame_list_size =
7023 		wlan_mgmt_rx_reo_get_egress_frame_debug_list_size(psoc);
7024 
7025 	if (egress_frame_debug_info->frame_list_size) {
7026 		egress_frame_debug_info->frame_list = qdf_mem_malloc
7027 				(egress_frame_debug_info->frame_list_size *
7028 				 sizeof(*egress_frame_debug_info->frame_list));
7029 
7030 		if (!egress_frame_debug_info->frame_list) {
7031 			mgmt_rx_reo_err("Failed to allocate debug info");
7032 			return QDF_STATUS_E_NOMEM;
7033 		}
7034 	}
7035 
7036 	/* Initialize the string for storing the debug info table boarder */
7037 	qdf_mem_set(egress_frame_debug_info->boarder,
7038 		    MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE, '-');
7039 
7040 success:
7041 	qdf_atomic_inc(egress_debug_info_init_count);
7042 	return QDF_STATUS_SUCCESS;
7043 }
7044 
7045 /**
7046  * mgmt_rx_reo_scheduler_debug_info_init() - Initialize the management
7047  * rx-reorder scheduler debug info
7048  * @psoc: Pointer to psoc
7049  * @scheduler_debug_info_init_count: Initialization count
7050  * @scheduler_debug_info: Scheduler debug info object
7051  *
7052  * API to initialize the management rx-reorder Scheduler debug info.
7053  *
7054  * Return: QDF_STATUS
7055  */
7056 static QDF_STATUS
mgmt_rx_reo_scheduler_debug_info_init(struct wlan_objmgr_psoc * psoc,qdf_atomic_t * scheduler_debug_info_init_count,struct reo_scheduler_debug_info * scheduler_debug_info)7057 mgmt_rx_reo_scheduler_debug_info_init
7058 		(struct wlan_objmgr_psoc *psoc,
7059 		 qdf_atomic_t *scheduler_debug_info_init_count,
7060 		 struct reo_scheduler_debug_info *scheduler_debug_info)
7061 {
7062 	if (!psoc) {
7063 		mgmt_rx_reo_err("psoc is null");
7064 		return QDF_STATUS_E_NULL_VALUE;
7065 	}
7066 
7067 	if (!scheduler_debug_info) {
7068 		mgmt_rx_reo_err("scheduler debug info is null");
7069 		return QDF_STATUS_E_NULL_VALUE;
7070 	}
7071 
7072 	/* We need to initialize only for the first invocation */
7073 	if (qdf_atomic_read(scheduler_debug_info_init_count))
7074 		goto success;
7075 
7076 	scheduler_debug_info->frame_list_size =
7077 		wlan_mgmt_rx_reo_get_scheduler_debug_list_size(psoc);
7078 
7079 	if (scheduler_debug_info->frame_list_size) {
7080 		scheduler_debug_info->frame_list = qdf_mem_malloc
7081 			(scheduler_debug_info->frame_list_size *
7082 			 sizeof(*scheduler_debug_info->frame_list));
7083 
7084 		if (!scheduler_debug_info->frame_list) {
7085 			mgmt_rx_reo_err("Failed to allocate debug info");
7086 			return QDF_STATUS_E_NOMEM;
7087 		}
7088 	}
7089 
7090 success:
7091 	qdf_atomic_inc(scheduler_debug_info_init_count);
7092 	return QDF_STATUS_SUCCESS;
7093 }
7094 
7095 /**
7096  * mgmt_rx_reo_debug_info_init() - Initialize the management rx-reorder debug
7097  * info
7098  * @pdev: pointer to pdev object
7099  *
7100  * API to initialize the management rx-reorder debug info.
7101  *
7102  * Return: QDF_STATUS
7103  */
7104 static QDF_STATUS
mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev * pdev)7105 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev *pdev)
7106 {
7107 	struct mgmt_rx_reo_context *reo_context;
7108 	QDF_STATUS status;
7109 	struct wlan_objmgr_psoc *psoc;
7110 
7111 	psoc = wlan_pdev_get_psoc(pdev);
7112 
7113 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
7114 		return QDF_STATUS_SUCCESS;
7115 
7116 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
7117 	if (!reo_context) {
7118 		mgmt_rx_reo_err("reo context is null");
7119 		return QDF_STATUS_E_NULL_VALUE;
7120 	}
7121 
7122 	status = mgmt_rx_reo_ingress_debug_info_init
7123 			(psoc, &reo_context->ingress_debug_info_init_count,
7124 			 &reo_context->ingress_frame_debug_info);
7125 	if (QDF_IS_STATUS_ERROR(status)) {
7126 		mgmt_rx_reo_err("Failed to initialize ingress debug info");
7127 		return QDF_STATUS_E_FAILURE;
7128 	}
7129 
7130 	status = mgmt_rx_reo_egress_debug_info_init
7131 			(psoc, &reo_context->egress_debug_info_init_count,
7132 			 &reo_context->egress_frame_debug_info);
7133 	if (QDF_IS_STATUS_ERROR(status)) {
7134 		mgmt_rx_reo_err("Failed to initialize egress debug info");
7135 		return QDF_STATUS_E_FAILURE;
7136 	}
7137 
7138 	status = mgmt_rx_reo_scheduler_debug_info_init
7139 			(psoc, &reo_context->scheduler_debug_info_init_count,
7140 			 &reo_context->scheduler_debug_info);
7141 	if (QDF_IS_STATUS_ERROR(status)) {
7142 		mgmt_rx_reo_err("Failed to initialize scheduler debug info");
7143 		return QDF_STATUS_E_FAILURE;
7144 	}
7145 
7146 	return QDF_STATUS_SUCCESS;
7147 }
7148 
7149 /**
7150  * mgmt_rx_reo_ingress_debug_info_deinit() - De initialize the management
7151  * rx-reorder ingress frame debug info
7152  * @psoc: Pointer to psoc
7153  * @ingress_debug_info_init_count: Initialization count
7154  * @ingress_frame_debug_info: Ingress frame debug info object
7155  *
7156  * API to de initialize the management rx-reorder ingress frame debug info.
7157  *
7158  * Return: QDF_STATUS
7159  */
7160 static QDF_STATUS
mgmt_rx_reo_ingress_debug_info_deinit(struct wlan_objmgr_psoc * psoc,qdf_atomic_t * ingress_debug_info_init_count,struct reo_ingress_debug_info * ingress_frame_debug_info)7161 mgmt_rx_reo_ingress_debug_info_deinit
7162 		(struct wlan_objmgr_psoc *psoc,
7163 		 qdf_atomic_t *ingress_debug_info_init_count,
7164 		 struct reo_ingress_debug_info *ingress_frame_debug_info)
7165 {
7166 	if (!psoc) {
7167 		mgmt_rx_reo_err("psoc is null");
7168 		return QDF_STATUS_E_NULL_VALUE;
7169 	}
7170 
7171 	if (!ingress_frame_debug_info) {
7172 		mgmt_rx_reo_err("Ingress frame debug info is null");
7173 		return QDF_STATUS_E_NULL_VALUE;
7174 	}
7175 
7176 	if (!qdf_atomic_read(ingress_debug_info_init_count)) {
7177 		mgmt_rx_reo_err("Ingress debug info ref cnt is 0");
7178 		return QDF_STATUS_E_FAILURE;
7179 	}
7180 
7181 	/* We need to de-initialize only for the last invocation */
7182 	if (qdf_atomic_dec_and_test(ingress_debug_info_init_count))
7183 		goto success;
7184 
7185 	if (ingress_frame_debug_info->frame_list) {
7186 		qdf_mem_free(ingress_frame_debug_info->frame_list);
7187 		ingress_frame_debug_info->frame_list = NULL;
7188 	}
7189 	ingress_frame_debug_info->frame_list_size = 0;
7190 
7191 	qdf_mem_zero(ingress_frame_debug_info->boarder,
7192 		     MGMT_RX_REO_INGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
7193 
7194 success:
7195 	return QDF_STATUS_SUCCESS;
7196 }
7197 
7198 /**
7199  * mgmt_rx_reo_egress_debug_info_deinit() - De initialize the management
7200  * rx-reorder egress frame debug info
7201  * @psoc: Pointer to psoc
7202  * @egress_debug_info_init_count: Initialization count
7203  * @egress_frame_debug_info: Egress frame debug info object
7204  *
7205  * API to de initialize the management rx-reorder egress frame debug info.
7206  *
7207  * Return: QDF_STATUS
7208  */
7209 static QDF_STATUS
mgmt_rx_reo_egress_debug_info_deinit(struct wlan_objmgr_psoc * psoc,qdf_atomic_t * egress_debug_info_init_count,struct reo_egress_debug_info * egress_frame_debug_info)7210 mgmt_rx_reo_egress_debug_info_deinit
7211 		(struct wlan_objmgr_psoc *psoc,
7212 		 qdf_atomic_t *egress_debug_info_init_count,
7213 		 struct reo_egress_debug_info *egress_frame_debug_info)
7214 {
7215 	if (!psoc) {
7216 		mgmt_rx_reo_err("psoc is null");
7217 		return QDF_STATUS_E_NULL_VALUE;
7218 	}
7219 
7220 	if (!egress_frame_debug_info) {
7221 		mgmt_rx_reo_err("Egress frame debug info is null");
7222 		return QDF_STATUS_E_NULL_VALUE;
7223 	}
7224 
7225 	if (!qdf_atomic_read(egress_debug_info_init_count)) {
7226 		mgmt_rx_reo_err("Egress debug info ref cnt is 0");
7227 		return QDF_STATUS_E_FAILURE;
7228 	}
7229 
7230 	/* We need to de-initialize only for the last invocation */
7231 	if (qdf_atomic_dec_and_test(egress_debug_info_init_count))
7232 		goto success;
7233 
7234 	if (egress_frame_debug_info->frame_list) {
7235 		qdf_mem_free(egress_frame_debug_info->frame_list);
7236 		egress_frame_debug_info->frame_list = NULL;
7237 	}
7238 	egress_frame_debug_info->frame_list_size = 0;
7239 
7240 	qdf_mem_zero(egress_frame_debug_info->boarder,
7241 		     MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE + 1);
7242 
7243 success:
7244 	return QDF_STATUS_SUCCESS;
7245 }
7246 
7247 /**
7248  * mgmt_rx_reo_scheduler_debug_info_deinit() - De initialize the management
7249  * rx-reorder scheduler debug info
7250  * @psoc: Pointer to psoc
7251  * @scheduler_debug_info_init_count: Initialization count
7252  * @scheduler_debug_info: Scheduler debug info object
7253  *
7254  * API to de initialize the management rx-reorder scheduler debug info.
7255  *
7256  * Return: QDF_STATUS
7257  */
7258 static QDF_STATUS
mgmt_rx_reo_scheduler_debug_info_deinit(struct wlan_objmgr_psoc * psoc,qdf_atomic_t * scheduler_debug_info_init_count,struct reo_scheduler_debug_info * scheduler_debug_info)7259 mgmt_rx_reo_scheduler_debug_info_deinit
7260 		(struct wlan_objmgr_psoc *psoc,
7261 		 qdf_atomic_t *scheduler_debug_info_init_count,
7262 		 struct reo_scheduler_debug_info *scheduler_debug_info)
7263 {
7264 	if (!psoc) {
7265 		mgmt_rx_reo_err("psoc is null");
7266 		return QDF_STATUS_E_NULL_VALUE;
7267 	}
7268 
7269 	if (!scheduler_debug_info) {
7270 		mgmt_rx_reo_err("Scheduler debug info is null");
7271 		return QDF_STATUS_E_NULL_VALUE;
7272 	}
7273 
7274 	if (!qdf_atomic_read(scheduler_debug_info_init_count)) {
7275 		mgmt_rx_reo_err("Scheduler debug info ref cnt is 0");
7276 		return QDF_STATUS_E_FAILURE;
7277 	}
7278 
7279 	/* We need to de-initialize only for the last invocation */
7280 	if (qdf_atomic_dec_and_test(scheduler_debug_info_init_count))
7281 		goto success;
7282 
7283 	if (scheduler_debug_info->frame_list) {
7284 		qdf_mem_free(scheduler_debug_info->frame_list);
7285 		scheduler_debug_info->frame_list = NULL;
7286 	}
7287 	scheduler_debug_info->frame_list_size = 0;
7288 
7289 success:
7290 	return QDF_STATUS_SUCCESS;
7291 }
7292 
7293 /**
7294  * mgmt_rx_reo_debug_info_deinit() - De initialize the management rx-reorder
7295  * debug info
7296  * @pdev: Pointer to pdev object
7297  *
7298  * API to de initialize the management rx-reorder debug info.
7299  *
7300  * Return: QDF_STATUS
7301  */
7302 static QDF_STATUS
mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev * pdev)7303 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev *pdev)
7304 {
7305 	struct mgmt_rx_reo_context *reo_context;
7306 	QDF_STATUS status;
7307 	struct wlan_objmgr_psoc *psoc;
7308 
7309 	psoc = wlan_pdev_get_psoc(pdev);
7310 
7311 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(psoc))
7312 		return QDF_STATUS_SUCCESS;
7313 
7314 	reo_context = wlan_mgmt_rx_reo_get_ctx_from_pdev(pdev);
7315 	if (!reo_context) {
7316 		mgmt_rx_reo_err("reo context is null");
7317 		return QDF_STATUS_E_NULL_VALUE;
7318 	}
7319 
7320 	status = mgmt_rx_reo_ingress_debug_info_deinit
7321 			(psoc, &reo_context->ingress_debug_info_init_count,
7322 			 &reo_context->ingress_frame_debug_info);
7323 	if (QDF_IS_STATUS_ERROR(status)) {
7324 		mgmt_rx_reo_err("Failed to deinitialize ingress debug info");
7325 		return QDF_STATUS_E_FAILURE;
7326 	}
7327 
7328 	status = mgmt_rx_reo_egress_debug_info_deinit
7329 			(psoc, &reo_context->egress_debug_info_init_count,
7330 			 &reo_context->egress_frame_debug_info);
7331 	if (QDF_IS_STATUS_ERROR(status)) {
7332 		mgmt_rx_reo_err("Failed to deinitialize egress debug info");
7333 		return QDF_STATUS_E_FAILURE;
7334 	}
7335 
7336 	status = mgmt_rx_reo_scheduler_debug_info_deinit
7337 			(psoc, &reo_context->scheduler_debug_info_init_count,
7338 			 &reo_context->scheduler_debug_info);
7339 	if (QDF_IS_STATUS_ERROR(status)) {
7340 		mgmt_rx_reo_err("Failed to deinitialize scheduler debug info");
7341 		return QDF_STATUS_E_FAILURE;
7342 	}
7343 
7344 	return QDF_STATUS_SUCCESS;
7345 }
7346 #else
7347 static QDF_STATUS
mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev * pdev)7348 mgmt_rx_reo_debug_info_init(struct wlan_objmgr_pdev *pdev)
7349 {
7350 	return QDF_STATUS_SUCCESS;
7351 }
7352 
7353 static QDF_STATUS
mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev * pdev)7354 mgmt_rx_reo_debug_info_deinit(struct wlan_objmgr_pdev *pdev)
7355 {
7356 	return QDF_STATUS_SUCCESS;
7357 }
7358 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
7359 
7360 /**
7361  * mgmt_rx_reo_flush_list() - Flush all entries in the reorder list
7362  * @reo_list: Pointer to reorder list
7363  *
7364  * API to flush all the entries of the reorder list. This API would acquire
7365  * the lock protecting the list.
7366  *
7367  * Return: QDF_STATUS
7368  */
7369 static QDF_STATUS
mgmt_rx_reo_flush_list(struct mgmt_rx_reo_list * reo_list)7370 mgmt_rx_reo_flush_list(struct mgmt_rx_reo_list *reo_list)
7371 {
7372 	struct mgmt_rx_reo_list_entry *cur_entry;
7373 	struct mgmt_rx_reo_list_entry *temp;
7374 
7375 	if (!reo_list) {
7376 		mgmt_rx_reo_err("reorder list is null");
7377 		return QDF_STATUS_E_NULL_VALUE;
7378 	}
7379 
7380 	qdf_spin_lock_bh(&reo_list->list_lock);
7381 
7382 	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
7383 		free_mgmt_rx_event_params(cur_entry->rx_params);
7384 
7385 		/**
7386 		 * Release the reference taken when the entry is inserted into
7387 		 * the reorder list.
7388 		 */
7389 		wlan_objmgr_pdev_release_ref(cur_entry->pdev,
7390 					     WLAN_MGMT_RX_REO_ID);
7391 
7392 		qdf_mem_free(cur_entry);
7393 	}
7394 
7395 	qdf_spin_unlock_bh(&reo_list->list_lock);
7396 
7397 	return QDF_STATUS_SUCCESS;
7398 }
7399 
7400 /**
7401  * mgmt_rx_reo_ingress_list_deinit() - De initialize the management rx-reorder
7402  * ingress list
7403  * @ingress_list: Pointer to ingress reorder list
7404  *
7405  * API to de initialize the management rx-reorder ingress list.
7406  *
7407  * Return: QDF_STATUS
7408  */
7409 static QDF_STATUS
mgmt_rx_reo_ingress_list_deinit(struct mgmt_rx_reo_ingress_list * ingress_list)7410 mgmt_rx_reo_ingress_list_deinit(struct mgmt_rx_reo_ingress_list *ingress_list)
7411 {
7412 	QDF_STATUS status;
7413 	struct mgmt_rx_reo_list *reo_ingress_list;
7414 
7415 	if (!ingress_list) {
7416 		mgmt_rx_reo_err("Ingress list is null");
7417 		return QDF_STATUS_E_NULL_VALUE;
7418 	}
7419 	reo_ingress_list = &ingress_list->reo_list;
7420 
7421 	qdf_timer_sync_cancel(&ingress_list->ageout_timer);
7422 	qdf_timer_free(&ingress_list->ageout_timer);
7423 
7424 	status = mgmt_rx_reo_flush_list(reo_ingress_list);
7425 	if (QDF_IS_STATUS_ERROR(status)) {
7426 		mgmt_rx_reo_err("Failed to flush the ingress list");
7427 		return status;
7428 	}
7429 	qdf_spinlock_destroy(&reo_ingress_list->list_lock);
7430 	qdf_list_destroy(&reo_ingress_list->list);
7431 
7432 	return QDF_STATUS_SUCCESS;
7433 }
7434 
7435 /**
7436  * mgmt_rx_reo_egress_list_deinit() - De initialize the management rx-reorder
7437  * egress list
7438  * @egress_list: Pointer to egress reorder list
7439  *
7440  * API to de initialize the management rx-reorder egress list.
7441  *
7442  * Return: QDF_STATUS
7443  */
7444 static QDF_STATUS
mgmt_rx_reo_egress_list_deinit(struct mgmt_rx_reo_egress_list * egress_list)7445 mgmt_rx_reo_egress_list_deinit(struct mgmt_rx_reo_egress_list *egress_list)
7446 {
7447 	QDF_STATUS status;
7448 	struct mgmt_rx_reo_list *reo_egress_list;
7449 
7450 	if (!egress_list) {
7451 		mgmt_rx_reo_err("Egress list is null");
7452 		return QDF_STATUS_E_NULL_VALUE;
7453 	}
7454 	reo_egress_list = &egress_list->reo_list;
7455 
7456 	qdf_timer_sync_cancel(&egress_list->egress_inactivity_timer);
7457 	qdf_timer_free(&egress_list->egress_inactivity_timer);
7458 
7459 	status = mgmt_rx_reo_flush_list(reo_egress_list);
7460 	if (QDF_IS_STATUS_ERROR(status)) {
7461 		mgmt_rx_reo_err("Failed to flush the egress list");
7462 		return QDF_STATUS_E_FAILURE;
7463 	}
7464 	qdf_spinlock_destroy(&reo_egress_list->list_lock);
7465 	qdf_list_destroy(&reo_egress_list->list);
7466 
7467 	return QDF_STATUS_SUCCESS;
7468 }
7469 
7470 QDF_STATUS
mgmt_rx_reo_deinit_context(uint8_t ml_grp_id)7471 mgmt_rx_reo_deinit_context(uint8_t ml_grp_id)
7472 {
7473 	QDF_STATUS status;
7474 	struct mgmt_rx_reo_context *reo_context;
7475 
7476 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7477 	if (!reo_context) {
7478 		mgmt_rx_reo_err("reo context is null");
7479 		return QDF_STATUS_E_NULL_VALUE;
7480 	}
7481 
7482 	qdf_spinlock_destroy(&reo_context->frame_release_lock);
7483 	qdf_spinlock_destroy(&reo_context->reo_algo_entry_lock);
7484 
7485 	status = mgmt_rx_reo_sim_deinit(reo_context);
7486 	if (QDF_IS_STATUS_ERROR(status)) {
7487 		mgmt_rx_reo_err("Failed to de initialize reo sim context");
7488 		qdf_mem_free(reo_context);
7489 		return QDF_STATUS_E_FAILURE;
7490 	}
7491 
7492 	status = mgmt_rx_reo_egress_list_deinit(&reo_context->egress_list);
7493 	if (QDF_IS_STATUS_ERROR(status)) {
7494 		mgmt_rx_reo_err("Failed to de-initialize Rx reo egress list");
7495 		qdf_mem_free(reo_context);
7496 		return status;
7497 	}
7498 
7499 	status = mgmt_rx_reo_ingress_list_deinit(&reo_context->ingress_list);
7500 	if (QDF_IS_STATUS_ERROR(status)) {
7501 		mgmt_rx_reo_err("Failed to de-initialize Rx reo ingress list");
7502 		qdf_mem_free(reo_context);
7503 		return status;
7504 	}
7505 
7506 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
7507 	qdf_mem_free(reo_context);
7508 
7509 	return QDF_STATUS_SUCCESS;
7510 }
7511 
7512 QDF_STATUS
mgmt_rx_reo_init_context(uint8_t ml_grp_id)7513 mgmt_rx_reo_init_context(uint8_t ml_grp_id)
7514 {
7515 	QDF_STATUS status;
7516 	QDF_STATUS temp;
7517 	struct mgmt_rx_reo_context *reo_context;
7518 
7519 	reo_context = qdf_mem_malloc(sizeof(struct mgmt_rx_reo_context));
7520 	if (!reo_context) {
7521 		mgmt_rx_reo_err("Failed to allocate reo context");
7522 		return QDF_STATUS_E_NULL_VALUE;
7523 	}
7524 	reo_context->mlo_grp_id = ml_grp_id;
7525 
7526 	mgmt_rx_reo_set_context(ml_grp_id, reo_context);
7527 
7528 	status = mgmt_rx_reo_ingress_list_init(&reo_context->ingress_list);
7529 	if (QDF_IS_STATUS_ERROR(status)) {
7530 		mgmt_rx_reo_err("Failed to initialize Rx reo ingress list");
7531 		goto free_reo_context;
7532 	}
7533 
7534 	status = mgmt_rx_reo_egress_list_init(&reo_context->egress_list);
7535 	if (QDF_IS_STATUS_ERROR(status)) {
7536 		mgmt_rx_reo_err("Failed to initialize Rx reo egress list");
7537 		goto deinit_reo_ingress_list;
7538 	}
7539 
7540 	status = mgmt_rx_reo_sim_init(reo_context);
7541 	if (QDF_IS_STATUS_ERROR(status)) {
7542 		mgmt_rx_reo_err("Failed to initialize reo simulation context");
7543 		goto deinit_reo_egress_list;
7544 	}
7545 
7546 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
7547 	qdf_spinlock_create(&reo_context->frame_release_lock);
7548 	qdf_atomic_init(&reo_context->context_id);
7549 
7550 	return QDF_STATUS_SUCCESS;
7551 
7552 deinit_reo_egress_list:
7553 	temp = mgmt_rx_reo_egress_list_deinit(&reo_context->egress_list);
7554 	if (QDF_IS_STATUS_ERROR(temp)) {
7555 		mgmt_rx_reo_err("Failed to de-initialize Rx reo egress list");
7556 		return temp;
7557 	}
7558 deinit_reo_ingress_list:
7559 	temp = mgmt_rx_reo_ingress_list_deinit(&reo_context->ingress_list);
7560 	if (QDF_IS_STATUS_ERROR(temp)) {
7561 		mgmt_rx_reo_err("Failed to de-initialize Rx reo ingress list");
7562 		return temp;
7563 	}
7564 free_reo_context:
7565 	mgmt_rx_reo_set_context(ml_grp_id, NULL);
7566 	qdf_mem_free(reo_context);
7567 
7568 	return status;
7569 }
7570 
7571 /**
7572  * wlan_mgmt_rx_reo_initialize_snapshot_params() - Initialize a given snapshot
7573  * params object
7574  * @snapshot_params: Pointer to snapshot params object
7575  *
7576  * Return: void
7577  */
7578 static void
wlan_mgmt_rx_reo_initialize_snapshot_params(struct mgmt_rx_reo_snapshot_params * snapshot_params)7579 wlan_mgmt_rx_reo_initialize_snapshot_params(
7580 			struct mgmt_rx_reo_snapshot_params *snapshot_params)
7581 {
7582 	snapshot_params->valid = false;
7583 	snapshot_params->mgmt_pkt_ctr = 0;
7584 	snapshot_params->global_timestamp = 0;
7585 }
7586 
7587 /**
7588  * mgmt_rx_reo_initialize_snapshot_address() - Initialize management Rx reorder
7589  * snapshot addresses for a given pdev
7590  * @pdev: pointer to pdev object
7591  *
7592  * Return: QDF_STATUS
7593  */
7594 static QDF_STATUS
mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev * pdev)7595 mgmt_rx_reo_initialize_snapshot_address(struct wlan_objmgr_pdev *pdev)
7596 {
7597 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
7598 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7599 	QDF_STATUS status;
7600 
7601 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7602 	if (!mgmt_rx_reo_pdev_ctx) {
7603 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7604 		return QDF_STATUS_E_NULL_VALUE;
7605 	}
7606 
7607 	snapshot_id = 0;
7608 
7609 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
7610 		struct mgmt_rx_reo_snapshot_info *snapshot_info;
7611 
7612 		snapshot_info =
7613 			&mgmt_rx_reo_pdev_ctx->host_target_shared_snapshot_info
7614 			[snapshot_id];
7615 		status = wlan_mgmt_rx_reo_get_snapshot_info
7616 					(pdev, snapshot_id, snapshot_info);
7617 		if (QDF_IS_STATUS_ERROR(status)) {
7618 			mgmt_rx_reo_err("Get snapshot info failed, id = %u",
7619 					snapshot_id);
7620 			return status;
7621 		}
7622 
7623 		snapshot_id++;
7624 	}
7625 
7626 	return QDF_STATUS_SUCCESS;
7627 }
7628 
7629 /**
7630  * mgmt_rx_reo_initialize_snapshot_value() - Initialize management Rx reorder
7631  * snapshot values for a given pdev
7632  * @pdev: pointer to pdev object
7633  *
7634  * Return: QDF_STATUS
7635  */
7636 static QDF_STATUS
mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev * pdev)7637 mgmt_rx_reo_initialize_snapshot_value(struct wlan_objmgr_pdev *pdev)
7638 {
7639 	enum mgmt_rx_reo_shared_snapshot_id snapshot_id;
7640 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7641 
7642 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7643 	if (!mgmt_rx_reo_pdev_ctx) {
7644 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7645 		return QDF_STATUS_E_NULL_VALUE;
7646 	}
7647 
7648 	snapshot_id = 0;
7649 	while (snapshot_id < MGMT_RX_REO_SHARED_SNAPSHOT_MAX) {
7650 		wlan_mgmt_rx_reo_initialize_snapshot_params
7651 			(&mgmt_rx_reo_pdev_ctx->last_valid_shared_snapshot
7652 			 [snapshot_id]);
7653 		snapshot_id++;
7654 	}
7655 
7656 	/* Initialize Host snapshot params */
7657 	wlan_mgmt_rx_reo_initialize_snapshot_params
7658 				(&mgmt_rx_reo_pdev_ctx->host_snapshot);
7659 
7660 	return QDF_STATUS_SUCCESS;
7661 }
7662 
7663 /**
7664  * mgmt_rx_reo_set_initialization_complete() - Set initialization completion
7665  * for management Rx REO pdev component private object
7666  * @pdev: pointer to pdev object
7667  *
7668  * Return: QDF_STATUS
7669  */
7670 static QDF_STATUS
mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev * pdev)7671 mgmt_rx_reo_set_initialization_complete(struct wlan_objmgr_pdev *pdev)
7672 {
7673 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7674 
7675 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7676 	if (!mgmt_rx_reo_pdev_ctx) {
7677 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7678 		return QDF_STATUS_E_NULL_VALUE;
7679 	}
7680 
7681 	mgmt_rx_reo_pdev_ctx->init_complete = true;
7682 
7683 	return QDF_STATUS_SUCCESS;
7684 }
7685 
7686 /**
7687  * mgmt_rx_reo_clear_initialization_complete() - Clear initialization completion
7688  * for management Rx REO pdev component private object
7689  * @pdev: pointer to pdev object
7690  *
7691  * Return: QDF_STATUS
7692  */
7693 static QDF_STATUS
mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev * pdev)7694 mgmt_rx_reo_clear_initialization_complete(struct wlan_objmgr_pdev *pdev)
7695 {
7696 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx;
7697 
7698 	mgmt_rx_reo_pdev_ctx = wlan_mgmt_rx_reo_get_priv_object(pdev);
7699 	if (!mgmt_rx_reo_pdev_ctx) {
7700 		mgmt_rx_reo_err("Mgmt Rx REO priv object is null");
7701 		return QDF_STATUS_E_NULL_VALUE;
7702 	}
7703 
7704 	mgmt_rx_reo_pdev_ctx->init_complete = false;
7705 
7706 	return QDF_STATUS_SUCCESS;
7707 }
7708 
7709 /**
7710  * mgmt_rx_reo_initialize_snapshots() - Initialize management Rx reorder
7711  * snapshot related data structures for a given pdev
7712  * @pdev: pointer to pdev object
7713  *
7714  * Return: QDF_STATUS
7715  */
7716 static QDF_STATUS
mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev * pdev)7717 mgmt_rx_reo_initialize_snapshots(struct wlan_objmgr_pdev *pdev)
7718 {
7719 	QDF_STATUS status;
7720 
7721 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
7722 	if (QDF_IS_STATUS_ERROR(status)) {
7723 		mgmt_rx_reo_err("Failed to initialize snapshot value");
7724 		return status;
7725 	}
7726 
7727 	status = mgmt_rx_reo_initialize_snapshot_address(pdev);
7728 	if (QDF_IS_STATUS_ERROR(status)) {
7729 		mgmt_rx_reo_err("Failed to initialize snapshot address");
7730 		return status;
7731 	}
7732 
7733 	return QDF_STATUS_SUCCESS;
7734 }
7735 
7736 /**
7737  * mgmt_rx_reo_clear_snapshots() - Clear management Rx reorder snapshot related
7738  * data structures for a given pdev
7739  * @pdev: pointer to pdev object
7740  *
7741  * Return: QDF_STATUS
7742  */
7743 static QDF_STATUS
mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev * pdev)7744 mgmt_rx_reo_clear_snapshots(struct wlan_objmgr_pdev *pdev)
7745 {
7746 	QDF_STATUS status;
7747 
7748 	status = mgmt_rx_reo_initialize_snapshot_value(pdev);
7749 	if (QDF_IS_STATUS_ERROR(status)) {
7750 		mgmt_rx_reo_err("Failed to initialize snapshot value");
7751 		return status;
7752 	}
7753 
7754 	return QDF_STATUS_SUCCESS;
7755 }
7756 
7757 QDF_STATUS
mgmt_rx_reo_pdev_attach(struct wlan_objmgr_pdev * pdev)7758 mgmt_rx_reo_pdev_attach(struct wlan_objmgr_pdev *pdev)
7759 {
7760 	QDF_STATUS status;
7761 
7762 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
7763 		return QDF_STATUS_SUCCESS;
7764 
7765 	status = mgmt_rx_reo_initialize_snapshots(pdev);
7766 	if (QDF_IS_STATUS_ERROR(status)) {
7767 		mgmt_rx_reo_err("Failed to initialize mgmt Rx REO snapshots");
7768 		return status;
7769 	}
7770 
7771 	status = mgmt_rx_reo_set_initialization_complete(pdev);
7772 	if (QDF_IS_STATUS_ERROR(status)) {
7773 		mgmt_rx_reo_err("Failed to set initialization complete");
7774 		return status;
7775 	}
7776 
7777 	return QDF_STATUS_SUCCESS;
7778 }
7779 
7780 QDF_STATUS
mgmt_rx_reo_psoc_attach(struct wlan_objmgr_psoc * psoc)7781 mgmt_rx_reo_psoc_attach(struct wlan_objmgr_psoc *psoc)
7782 {
7783 	return QDF_STATUS_SUCCESS;
7784 }
7785 
7786 QDF_STATUS
mgmt_rx_reo_pdev_detach(struct wlan_objmgr_pdev * pdev)7787 mgmt_rx_reo_pdev_detach(struct wlan_objmgr_pdev *pdev)
7788 {
7789 	QDF_STATUS status;
7790 
7791 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
7792 		return QDF_STATUS_SUCCESS;
7793 
7794 	status = mgmt_rx_reo_clear_initialization_complete(pdev);
7795 	if (QDF_IS_STATUS_ERROR(status)) {
7796 		mgmt_rx_reo_err("Failed to clear initialization complete");
7797 		return status;
7798 	}
7799 
7800 	status = mgmt_rx_reo_clear_snapshots(pdev);
7801 	if (QDF_IS_STATUS_ERROR(status)) {
7802 		mgmt_rx_reo_err("Failed to clear mgmt Rx REO snapshots");
7803 		return status;
7804 	}
7805 
7806 	return QDF_STATUS_SUCCESS;
7807 }
7808 
7809 QDF_STATUS
mgmt_rx_reo_psoc_detach(struct wlan_objmgr_psoc * psoc)7810 mgmt_rx_reo_psoc_detach(struct wlan_objmgr_psoc *psoc)
7811 {
7812 	return QDF_STATUS_SUCCESS;
7813 }
7814 
7815 QDF_STATUS
mgmt_rx_reo_pdev_obj_create_notification(struct wlan_objmgr_pdev * pdev,struct mgmt_txrx_priv_pdev_context * mgmt_txrx_pdev_ctx)7816 mgmt_rx_reo_pdev_obj_create_notification(
7817 	struct wlan_objmgr_pdev *pdev,
7818 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
7819 {
7820 	QDF_STATUS status;
7821 	struct mgmt_rx_reo_pdev_info *mgmt_rx_reo_pdev_ctx = NULL;
7822 
7823 	if (!pdev) {
7824 		mgmt_rx_reo_err("pdev is null");
7825 		status = QDF_STATUS_E_NULL_VALUE;
7826 		goto failure;
7827 	}
7828 
7829 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev)) {
7830 		status = QDF_STATUS_SUCCESS;
7831 		goto failure;
7832 	}
7833 
7834 	status = mgmt_rx_reo_sim_pdev_object_create_notification(pdev);
7835 	if (QDF_IS_STATUS_ERROR(status)) {
7836 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
7837 		goto failure;
7838 	}
7839 
7840 	mgmt_rx_reo_pdev_ctx = qdf_mem_malloc(sizeof(*mgmt_rx_reo_pdev_ctx));
7841 	if (!mgmt_rx_reo_pdev_ctx) {
7842 		mgmt_rx_reo_err("Allocation failure for REO pdev context");
7843 		status = QDF_STATUS_E_NOMEM;
7844 		goto failure;
7845 	}
7846 
7847 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = mgmt_rx_reo_pdev_ctx;
7848 
7849 	status = mgmt_rx_reo_debug_info_init(pdev);
7850 	if (QDF_IS_STATUS_ERROR(status)) {
7851 		mgmt_rx_reo_err("Failed to initialize debug info");
7852 		status = QDF_STATUS_E_NOMEM;
7853 		goto failure;
7854 	}
7855 
7856 	return QDF_STATUS_SUCCESS;
7857 
7858 failure:
7859 	if (mgmt_rx_reo_pdev_ctx)
7860 		qdf_mem_free(mgmt_rx_reo_pdev_ctx);
7861 
7862 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
7863 
7864 	return status;
7865 }
7866 
7867 QDF_STATUS
mgmt_rx_reo_pdev_obj_destroy_notification(struct wlan_objmgr_pdev * pdev,struct mgmt_txrx_priv_pdev_context * mgmt_txrx_pdev_ctx)7868 mgmt_rx_reo_pdev_obj_destroy_notification(
7869 	struct wlan_objmgr_pdev *pdev,
7870 	struct mgmt_txrx_priv_pdev_context *mgmt_txrx_pdev_ctx)
7871 {
7872 	QDF_STATUS status;
7873 
7874 	if (!wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(pdev))
7875 		return QDF_STATUS_SUCCESS;
7876 
7877 	status = mgmt_rx_reo_debug_info_deinit(pdev);
7878 	if (QDF_IS_STATUS_ERROR(status)) {
7879 		mgmt_rx_reo_err("Failed to de-initialize debug info");
7880 		return status;
7881 	}
7882 
7883 	qdf_mem_free(mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx);
7884 	mgmt_txrx_pdev_ctx->mgmt_rx_reo_pdev_ctx = NULL;
7885 
7886 	status = mgmt_rx_reo_sim_pdev_object_destroy_notification(pdev);
7887 	if (QDF_IS_STATUS_ERROR(status)) {
7888 		mgmt_rx_reo_err("Failed to handle pdev create for reo sim");
7889 		return status;
7890 	}
7891 
7892 	return QDF_STATUS_SUCCESS;
7893 }
7894 
7895 QDF_STATUS
mgmt_rx_reo_psoc_obj_create_notification(struct wlan_objmgr_psoc * psoc)7896 mgmt_rx_reo_psoc_obj_create_notification(struct wlan_objmgr_psoc *psoc)
7897 {
7898 	return QDF_STATUS_SUCCESS;
7899 }
7900 
7901 QDF_STATUS
mgmt_rx_reo_psoc_obj_destroy_notification(struct wlan_objmgr_psoc * psoc)7902 mgmt_rx_reo_psoc_obj_destroy_notification(struct wlan_objmgr_psoc *psoc)
7903 {
7904 	return QDF_STATUS_SUCCESS;
7905 }
7906 
7907 bool
mgmt_rx_reo_is_simulation_in_progress(uint8_t ml_grp_id)7908 mgmt_rx_reo_is_simulation_in_progress(uint8_t ml_grp_id)
7909 {
7910 	struct mgmt_rx_reo_context *reo_context;
7911 
7912 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7913 	if (!reo_context) {
7914 		mgmt_rx_reo_err("reo context is null");
7915 		return false;
7916 	}
7917 
7918 	return reo_context->simulation_in_progress;
7919 }
7920 
7921 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
7922 QDF_STATUS
mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)7923 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
7924 {
7925 	struct mgmt_rx_reo_context *reo_context;
7926 	QDF_STATUS status;
7927 
7928 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7929 	if (!reo_context) {
7930 		mgmt_rx_reo_err("reo context is null");
7931 		return QDF_STATUS_E_NULL_VALUE;
7932 	}
7933 
7934 	status = mgmt_rx_reo_debug_print_ingress_frame_stats(reo_context);
7935 	if (QDF_IS_STATUS_ERROR(status)) {
7936 		mgmt_rx_reo_err("Failed to print ingress frame stats");
7937 		return status;
7938 	}
7939 
7940 	return QDF_STATUS_SUCCESS;
7941 }
7942 
7943 QDF_STATUS
mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id,uint16_t num_frames)7944 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
7945 {
7946 	struct mgmt_rx_reo_context *reo_context;
7947 	QDF_STATUS status;
7948 
7949 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7950 	if (!reo_context) {
7951 		mgmt_rx_reo_err("reo context is null");
7952 		return QDF_STATUS_E_NULL_VALUE;
7953 	}
7954 
7955 	status = mgmt_rx_reo_debug_print_ingress_frame_info(reo_context,
7956 							    num_frames);
7957 	if (QDF_IS_STATUS_ERROR(status)) {
7958 		mgmt_rx_reo_err("Failed to print ingress frame info");
7959 		return status;
7960 	}
7961 
7962 	return QDF_STATUS_SUCCESS;
7963 }
7964 
7965 QDF_STATUS
mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)7966 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
7967 {
7968 	struct mgmt_rx_reo_context *reo_context;
7969 	QDF_STATUS status;
7970 
7971 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7972 	if (!reo_context) {
7973 		mgmt_rx_reo_err("reo context is null");
7974 		return QDF_STATUS_E_NULL_VALUE;
7975 	}
7976 
7977 	status = mgmt_rx_reo_debug_print_egress_frame_stats(reo_context);
7978 	if (QDF_IS_STATUS_ERROR(status)) {
7979 		mgmt_rx_reo_err("Failed to print egress frame stats");
7980 		return status;
7981 	}
7982 
7983 	return QDF_STATUS_SUCCESS;
7984 }
7985 
7986 QDF_STATUS
mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id,uint16_t num_frames)7987 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
7988 {
7989 	struct mgmt_rx_reo_context *reo_context;
7990 	QDF_STATUS status;
7991 
7992 	reo_context = mgmt_rx_reo_get_context(ml_grp_id);
7993 	if (!reo_context) {
7994 		mgmt_rx_reo_err("reo context is null");
7995 		return QDF_STATUS_E_NULL_VALUE;
7996 	}
7997 
7998 	status = mgmt_rx_reo_debug_print_egress_frame_info(reo_context,
7999 							   num_frames);
8000 	if (QDF_IS_STATUS_ERROR(status)) {
8001 		mgmt_rx_reo_err("Failed to print egress frame info");
8002 		return status;
8003 	}
8004 
8005 	return QDF_STATUS_SUCCESS;
8006 }
8007 #else
8008 QDF_STATUS
mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)8009 mgmt_rx_reo_print_ingress_frame_stats(uint8_t ml_grp_id)
8010 {
8011 	return QDF_STATUS_SUCCESS;
8012 }
8013 
8014 QDF_STATUS
mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id,uint16_t num_frames)8015 mgmt_rx_reo_print_ingress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
8016 {
8017 	return QDF_STATUS_SUCCESS;
8018 }
8019 
8020 QDF_STATUS
mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)8021 mgmt_rx_reo_print_egress_frame_stats(uint8_t ml_grp_id)
8022 {
8023 	return QDF_STATUS_SUCCESS;
8024 }
8025 
8026 QDF_STATUS
mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id,uint16_t num_frames)8027 mgmt_rx_reo_print_egress_frame_info(uint8_t ml_grp_id, uint16_t num_frames)
8028 {
8029 	return QDF_STATUS_SUCCESS;
8030 }
8031 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
8032