xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_rx_mon.c (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_peer.h"
24 #include "hal_rx.h"
25 #include "hal_api.h"
26 #include "qdf_trace.h"
27 #include "qdf_nbuf.h"
28 #include "hal_api_mon.h"
29 #include "dp_internal.h"
30 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
31 #include "dp_htt.h"
32 #include "dp_mon.h"
33 #include "dp_rx_mon.h"
34 
35 #include "htt.h"
36 #ifdef FEATURE_PERPKT_INFO
37 #include "dp_ratetable.h"
38 #endif
39 
40 #ifndef IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK
41 #define IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK 0xe0
42 #endif
43 
44 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
45 void
dp_rx_mon_handle_cfr_mu_info(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)46 dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev,
47 			     struct hal_rx_ppdu_info *ppdu_info,
48 			     struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
49 {
50 	struct dp_peer *peer;
51 	struct dp_soc *soc = pdev->soc;
52 	struct mon_rx_user_status *rx_user_status;
53 	struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
54 	uint32_t num_users;
55 	int user_id;
56 	uint16_t sw_peer_id;
57 
58 	num_users = ppdu_info->com_info.num_users;
59 	for (user_id = 0; user_id < num_users; user_id++) {
60 		if (user_id >= OFDMA_NUM_USERS)
61 			return;
62 
63 		rx_user_status =  &ppdu_info->rx_user_status[user_id];
64 		rx_stats_peruser = &cdp_rx_ppdu->user[user_id];
65 		sw_peer_id = rx_user_status->sw_peer_id;
66 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
67 					     DP_MOD_ID_RX_PPDU_STATS);
68 		if (!peer) {
69 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
70 			continue;
71 		}
72 
73 		qdf_mem_copy(rx_stats_peruser->mac_addr,
74 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
75 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
76 	}
77 }
78 
79 void
dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)80 dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev,
81 				 struct hal_rx_ppdu_info *ppdu_info,
82 				 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
83 {
84 	struct dp_peer *peer;
85 	struct dp_soc *soc = pdev->soc;
86 	int chain;
87 	uint16_t sw_peer_id;
88 	struct mon_rx_user_status *rx_user_status;
89 	uint32_t num_users = ppdu_info->com_info.num_users;
90 
91 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
92 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
93 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
94 
95 	for (chain = 0; chain < MAX_CHAIN; chain++)
96 		cdp_rx_ppdu->per_chain_rssi[chain] =
97 			ppdu_info->rx_status.rssi[chain];
98 
99 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
100 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
101 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
102 
103 	if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
104 	    (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
105 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
106 	else
107 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
108 
109 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
110 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
111 	} else if (ppdu_info->rx_status.preamble_type ==
112 			HAL_RX_PKT_TYPE_11AX) {
113 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
114 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
115 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
116 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
117 	}
118 
119 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
120 	dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu);
121 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
122 	sw_peer_id = rx_user_status->sw_peer_id;
123 	cdp_rx_ppdu->num_users = num_users;
124 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id, DP_MOD_ID_RX_PPDU_STATS);
125 	if (!peer) {
126 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
127 		return;
128 	}
129 
130 	cdp_rx_ppdu->peer_id = peer->peer_id;
131 	cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
132 
133 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
134 }
135 
136 bool
dp_cfr_rcc_mode_status(struct dp_pdev * pdev)137 dp_cfr_rcc_mode_status(struct dp_pdev *pdev)
138 {
139 	return pdev->cfr_rcc_mode;
140 }
141 
142 void
dp_rx_mon_populate_cfr_info(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)143 dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev,
144 			    struct hal_rx_ppdu_info *ppdu_info,
145 			    struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
146 {
147 	struct cdp_rx_ppdu_cfr_info *cfr_info;
148 
149 	if (!qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
150 		return;
151 
152 	cfr_info = &cdp_rx_ppdu->cfr_info;
153 
154 	cfr_info->bb_captured_channel
155 		= ppdu_info->cfr_info.bb_captured_channel;
156 	cfr_info->bb_captured_timeout
157 		= ppdu_info->cfr_info.bb_captured_timeout;
158 	cfr_info->bb_captured_reason
159 		= ppdu_info->cfr_info.bb_captured_reason;
160 	cfr_info->rx_location_info_valid
161 		= ppdu_info->cfr_info.rx_location_info_valid;
162 	cfr_info->chan_capture_status
163 		= ppdu_info->cfr_info.chan_capture_status;
164 	cfr_info->rtt_che_buffer_pointer_high8
165 		= ppdu_info->cfr_info.rtt_che_buffer_pointer_high8;
166 	cfr_info->rtt_che_buffer_pointer_low32
167 		= ppdu_info->cfr_info.rtt_che_buffer_pointer_low32;
168 	cfr_info->rtt_cfo_measurement
169 		= (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement;
170 	cfr_info->agc_gain_info0
171 		= ppdu_info->cfr_info.agc_gain_info0;
172 	cfr_info->agc_gain_info1
173 		= ppdu_info->cfr_info.agc_gain_info1;
174 	cfr_info->agc_gain_info2
175 		= ppdu_info->cfr_info.agc_gain_info2;
176 	cfr_info->agc_gain_info3
177 		= ppdu_info->cfr_info.agc_gain_info3;
178 	cfr_info->rx_start_ts
179 		= ppdu_info->cfr_info.rx_start_ts;
180 	cfr_info->mcs_rate
181 		= ppdu_info->cfr_info.mcs_rate;
182 	cfr_info->gi_type
183 		= ppdu_info->cfr_info.gi_type;
184 }
185 
186 void
dp_update_cfr_dbg_stats(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)187 dp_update_cfr_dbg_stats(struct dp_pdev *pdev,
188 			struct hal_rx_ppdu_info *ppdu_info)
189 {
190 	struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
191 
192 	DP_STATS_INC(pdev,
193 		     rcc.chan_capture_status[cfr->chan_capture_status], 1);
194 	if (cfr->rx_location_info_valid) {
195 		DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1);
196 		if (cfr->bb_captured_channel) {
197 			DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1);
198 			DP_STATS_INC(pdev,
199 				     rcc.reason_cnt[cfr->bb_captured_reason],
200 				     1);
201 		} else if (cfr->bb_captured_timeout) {
202 			DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1);
203 			DP_STATS_INC(pdev,
204 				     rcc.reason_cnt[cfr->bb_captured_reason],
205 				     1);
206 		}
207 	}
208 }
209 
210 void
dp_rx_handle_cfr(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)211 dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev,
212 		 struct hal_rx_ppdu_info *ppdu_info)
213 {
214 	qdf_nbuf_t ppdu_nbuf;
215 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
216 
217 	dp_update_cfr_dbg_stats(pdev, ppdu_info);
218 	if (!ppdu_info->cfr_info.bb_captured_channel)
219 		return;
220 
221 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
222 				   sizeof(struct cdp_rx_indication_ppdu),
223 				   0,
224 				   0,
225 				   FALSE);
226 	if (ppdu_nbuf) {
227 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
228 
229 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
230 		dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
231 		qdf_nbuf_put_tail(ppdu_nbuf,
232 				  sizeof(struct cdp_rx_indication_ppdu));
233 		dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
234 				     ppdu_nbuf, HTT_INVALID_PEER,
235 				     WDI_NO_VAL, pdev->pdev_id);
236 	}
237 }
238 
239 void
dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)240 dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
241 				 struct hal_rx_ppdu_info *ppdu_info,
242 				 struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
243 {
244 	if (!dp_cfr_rcc_mode_status(pdev))
245 		return;
246 
247 	if (ppdu_info->cfr_info.bb_captured_channel)
248 		dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
249 }
250 
251 /**
252  * dp_bb_captured_chan_status() - Get the bb_captured_channel status
253  * @pdev: pdev ctx
254  * @ppdu_info: structure for rx ppdu ring
255  *
256  * Return: Success/ Failure
257  */
258 static inline QDF_STATUS
dp_bb_captured_chan_status(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)259 dp_bb_captured_chan_status(struct dp_pdev *pdev,
260 			   struct hal_rx_ppdu_info *ppdu_info)
261 {
262 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
263 	struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
264 
265 	if (dp_cfr_rcc_mode_status(pdev)) {
266 		if (cfr->bb_captured_channel)
267 			status = QDF_STATUS_SUCCESS;
268 	}
269 
270 	return status;
271 }
272 #else
273 static inline QDF_STATUS
dp_bb_captured_chan_status(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)274 dp_bb_captured_chan_status(struct dp_pdev *pdev,
275 			   struct hal_rx_ppdu_info *ppdu_info)
276 {
277 	return QDF_STATUS_E_NOSUPPORT;
278 }
279 #endif /* WLAN_CFR_ENABLE */
280 
281 #ifdef QCA_ENHANCED_STATS_SUPPORT
282 #ifdef QCA_RSSI_DB2DBM
283 /**
284  * dp_rx_mon_rf_index_conv() - this function will convert BB index to RF
285  *			index in the rssi_chain[chain][bw] array
286  *
287  * @chain: BB chain index
288  * @mon_pdev: pdev structure
289  *
290  * Return: return RF chain index
291  *
292  * Computation:
293  *  3 Bytes of xbar_config are used for RF to BB mapping
294  *  Samples of xbar_config,
295  *
296  * If xbar_config is 0x688FAC(hex):
297  *     RF chains 0-3 are connected to BB chains 4-7
298  *     RF chains 4-7 are connected to BB chains 0-3
299  *     here,
300  *     bits 0 to 2 = 4, maps BB chain 4 for RF chain 0
301  *     bits 3 to 5 = 5, maps BB chain 5 for RF chain 1
302  *     bits 6 to 8 = 6, maps BB chain 6 for RF chain 2
303  *     bits 9 to 11 = 7, maps BB chain 7 for RF chain 3
304  *     bits 12 to 14 = 0, maps BB chain 0 for RF chain 4
305  *     bits 15 to 17 = 1, maps BB chain 1 for RF chain 5
306  *     bits 18 to 20 = 2, maps BB chain 2 for RF chain 6
307  *     bits 21 to 23 = 3, maps BB chain 3 for RF chain 7
308  */
dp_rx_mon_rf_index_conv(uint8_t chain,struct dp_mon_pdev * mon_pdev)309 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain,
310 				       struct dp_mon_pdev *mon_pdev)
311 {
312 	uint32_t xbar_config = mon_pdev->rssi_offsets.xbar_config;
313 
314 	if (mon_pdev->rssi_dbm_conv_support && xbar_config)
315 		return ((xbar_config >> (3 * chain)) & 0x07);
316 	return chain;
317 }
318 #else
dp_rx_mon_rf_index_conv(uint8_t chain,struct dp_mon_pdev * mon_pdev)319 static uint8_t dp_rx_mon_rf_index_conv(uint8_t chain,
320 				       struct dp_mon_pdev *mon_pdev)
321 {
322 	return chain;
323 }
324 #endif
325 void
dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu,struct dp_pdev * pdev)326 dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
327 			     struct cdp_rx_indication_ppdu *cdp_rx_ppdu,
328 			     struct dp_pdev *pdev)
329 {
330 	uint8_t chain, bw;
331 	uint8_t rssi;
332 	uint8_t chain_rf;
333 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
334 
335 	for (chain = 0; chain < SS_COUNT; chain++) {
336 		for (bw = 0; bw < MAX_BW; bw++) {
337 			chain_rf = dp_rx_mon_rf_index_conv(chain, mon_pdev);
338 			rssi = ppdu_info->rx_status.rssi_chain[chain_rf][bw];
339 			if (rssi != DP_RSSI_INVAL)
340 				cdp_rx_ppdu->rssi_chain[chain_rf][bw] = rssi;
341 			else
342 				cdp_rx_ppdu->rssi_chain[chain_rf][bw] = 0;
343 		}
344 	}
345 }
346 
347 void
dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)348 dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
349 			      struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
350 {
351 	uint16_t pilot_evm;
352 	uint16_t nss_count;
353 	uint16_t pilot_count;
354 
355 	nss_count = ppdu_info->evm_info.nss_count;
356 	pilot_count = ppdu_info->evm_info.pilot_count;
357 
358 	if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
359 		qdf_debug("pilot evm count is more than expected");
360 		return;
361 	}
362 	cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
363 	cdp_rx_ppdu->evm_info.nss_count = nss_count;
364 
365 	/* Populate evm for pilot_evm  = nss_count*pilot_count */
366 	for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
367 		cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
368 			ppdu_info->evm_info.pilot_evm[pilot_evm];
369 	}
370 }
371 
372 /**
373  * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
374  * @pdev: pdev ctx
375  * @rx_user_status: mon rx user status
376  *
377  * Return: bool
378  */
379 static inline bool
dp_rx_inc_rusize_cnt(struct dp_pdev * pdev,struct mon_rx_user_status * rx_user_status)380 dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
381 		     struct mon_rx_user_status *rx_user_status)
382 {
383 	uint32_t ru_size;
384 	bool is_data;
385 
386 	ru_size = rx_user_status->ofdma_ru_size;
387 
388 	if (dp_is_subtype_data(rx_user_status->frame_control)) {
389 		DP_STATS_INC(pdev,
390 			     ul_ofdma.data_rx_ru_size[ru_size], 1);
391 		is_data = true;
392 	} else {
393 		DP_STATS_INC(pdev,
394 			     ul_ofdma.nondata_rx_ru_size[ru_size], 1);
395 		is_data = false;
396 	}
397 
398 	return is_data;
399 }
400 
401 /**
402  * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
403  * @pdev: pdev ctx
404  * @ppdu_info: ppdu info structure from ppdu ring
405  * @cdp_rx_ppdu: Rx PPDU indication structure
406  *
407  * Return: none
408  */
409 static void
dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)410 dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
411 					struct hal_rx_ppdu_info *ppdu_info,
412 					struct cdp_rx_indication_ppdu
413 					*cdp_rx_ppdu)
414 {
415 	struct dp_peer *peer;
416 	struct dp_soc *soc = pdev->soc;
417 	int i;
418 	struct mon_rx_user_status *rx_user_status;
419 	struct mon_rx_user_info *rx_user_info;
420 	struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
421 	int ru_size;
422 	bool is_data = false;
423 	uint32_t num_users;
424 	struct dp_mon_ops *mon_ops;
425 	uint16_t sw_peer_id;
426 
427 	num_users = ppdu_info->com_info.num_users;
428 	for (i = 0; i < num_users; i++) {
429 		if (i >= OFDMA_NUM_USERS)
430 			return;
431 
432 		rx_user_status =  &ppdu_info->rx_user_status[i];
433 		rx_user_info = &ppdu_info->rx_user_info[i];
434 		rx_stats_peruser = &cdp_rx_ppdu->user[i];
435 
436 		sw_peer_id = rx_user_status->sw_peer_id;
437 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
438 					     DP_MOD_ID_RX_PPDU_STATS);
439 		if (qdf_unlikely(!peer)) {
440 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
441 			continue;
442 		}
443 		rx_stats_peruser->is_bss_peer = peer->bss_peer;
444 
445 		rx_stats_peruser->first_data_seq_ctrl =
446 			rx_user_status->first_data_seq_ctrl;
447 
448 		rx_stats_peruser->frame_control_info_valid =
449 			rx_user_status->frame_control_info_valid;
450 		rx_stats_peruser->frame_control =
451 			rx_user_status->frame_control;
452 
453 		rx_stats_peruser->qos_control_info_valid =
454 			rx_user_info->qos_control_info_valid;
455 		rx_stats_peruser->qos_control =
456 			rx_user_info->qos_control;
457 		rx_stats_peruser->tcp_msdu_count =
458 			rx_user_status->tcp_msdu_count;
459 		rx_stats_peruser->udp_msdu_count =
460 			rx_user_status->udp_msdu_count;
461 		rx_stats_peruser->other_msdu_count =
462 			rx_user_status->other_msdu_count;
463 
464 		rx_stats_peruser->num_msdu =
465 			rx_stats_peruser->tcp_msdu_count +
466 			rx_stats_peruser->udp_msdu_count +
467 			rx_stats_peruser->other_msdu_count;
468 
469 		rx_stats_peruser->preamble_type =
470 				cdp_rx_ppdu->u.preamble;
471 		rx_stats_peruser->mpdu_cnt_fcs_ok =
472 			rx_user_status->mpdu_cnt_fcs_ok;
473 		rx_stats_peruser->mpdu_cnt_fcs_err =
474 			rx_user_status->mpdu_cnt_fcs_err;
475 		qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap,
476 			     &rx_user_status->mpdu_fcs_ok_bitmap,
477 			     HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
478 			     sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
479 		rx_stats_peruser->mpdu_ok_byte_count =
480 			rx_user_status->mpdu_ok_byte_count;
481 		rx_stats_peruser->mpdu_err_byte_count =
482 			rx_user_status->mpdu_err_byte_count;
483 
484 		cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
485 		cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu;
486 		rx_stats_peruser->retries =
487 			CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
488 			rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
489 		cdp_rx_ppdu->retries += rx_stats_peruser->retries;
490 
491 		if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
492 			rx_stats_peruser->is_ampdu = 1;
493 		else
494 			rx_stats_peruser->is_ampdu = 0;
495 
496 		rx_stats_peruser->tid = ppdu_info->rx_status.tid;
497 
498 		qdf_mem_copy(rx_stats_peruser->mac_addr,
499 			     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
500 		rx_stats_peruser->peer_id = peer->peer_id;
501 		cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
502 		rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
503 		rx_stats_peruser->mu_ul_info_valid = 0;
504 
505 		mon_ops = dp_mon_ops_get(soc);
506 		if (mon_ops && mon_ops->mon_rx_populate_ppdu_usr_info)
507 			mon_ops->mon_rx_populate_ppdu_usr_info(rx_user_status,
508 							       rx_stats_peruser);
509 
510 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
511 		if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
512 		    cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
513 			if (rx_user_status->mu_ul_info_valid) {
514 				rx_stats_peruser->nss = rx_user_status->nss;
515 				cdp_rx_ppdu->usr_nss_sum += rx_stats_peruser->nss;
516 				rx_stats_peruser->mcs = rx_user_status->mcs;
517 				rx_stats_peruser->mu_ul_info_valid =
518 					rx_user_status->mu_ul_info_valid;
519 				rx_stats_peruser->ofdma_ru_start_index =
520 					rx_user_status->ofdma_ru_start_index;
521 				rx_stats_peruser->ofdma_ru_width =
522 					rx_user_status->ofdma_ru_width;
523 				cdp_rx_ppdu->usr_ru_tones_sum +=
524 					rx_stats_peruser->ofdma_ru_width;
525 				rx_stats_peruser->user_index = i;
526 				ru_size = rx_user_status->ofdma_ru_size;
527 				/*
528 				 * max RU size will be equal to
529 				 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
530 				 */
531 				if (qdf_unlikely(ru_size >= OFDMA_NUM_RU_SIZE)) {
532 					dp_err("invalid ru_size %d", ru_size);
533 					return;
534 				}
535 				is_data = dp_rx_inc_rusize_cnt(pdev,
536 							       rx_user_status);
537 			}
538 			if (is_data) {
539 				/* counter to get number of MU OFDMA */
540 				pdev->stats.ul_ofdma.data_rx_ppdu++;
541 				pdev->stats.ul_ofdma.data_users[num_users]++;
542 			}
543 		}
544 	}
545 }
546 
547 /**
548  * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
549  * @pdev: pdev ctx
550  * @ppdu_info: ppdu info structure from ppdu ring
551  * @cdp_rx_ppdu: Rx PPDU indication structure
552  *
553  * Return: none
554  */
555 static void
dp_rx_populate_cdp_indication_ppdu(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)556 dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
557 				   struct hal_rx_ppdu_info *ppdu_info,
558 				   struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
559 {
560 	struct dp_peer *peer;
561 	struct dp_soc *soc = pdev->soc;
562 	uint32_t i;
563 	struct dp_mon_ops *mon_ops;
564 	uint16_t sw_peer_id;
565 	struct mon_rx_user_status *rx_user_status;
566 	uint32_t num_users = ppdu_info->com_info.num_users;
567 
568 	cdp_rx_ppdu->first_data_seq_ctrl =
569 		ppdu_info->rx_status.first_data_seq_ctrl;
570 	cdp_rx_ppdu->frame_ctrl =
571 		ppdu_info->rx_status.frame_control;
572 	cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
573 	cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
574 	cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
575 	/* num mpdu is consolidated and added together in num user loop */
576 	cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
577 	/* num msdu is consolidated and added together in num user loop */
578 	cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
579 				 cdp_rx_ppdu->udp_msdu_count +
580 				 cdp_rx_ppdu->other_msdu_count);
581 
582 	cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
583 		ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
584 
585 	if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
586 		cdp_rx_ppdu->is_ampdu = 1;
587 	else
588 		cdp_rx_ppdu->is_ampdu = 0;
589 	cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
590 
591 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
592 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
593 	sw_peer_id = rx_user_status->sw_peer_id;
594 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
595 				     DP_MOD_ID_RX_PPDU_STATS);
596 	if (qdf_unlikely(!peer)) {
597 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
598 		cdp_rx_ppdu->num_users = 0;
599 		goto end;
600 	}
601 
602 	qdf_mem_copy(cdp_rx_ppdu->mac_addr,
603 		     peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
604 	cdp_rx_ppdu->peer_id = peer->peer_id;
605 	cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
606 
607 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
608 	cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
609 	cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
610 	cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
611 	cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
612 	if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
613 	    (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
614 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
615 	else
616 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
617 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
618 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
619 	cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
620 				   QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
621 	cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
622 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
623 	cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
624 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
625 	cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
626 	cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
627 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
628 
629 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
630 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
631 	} else if (ppdu_info->rx_status.preamble_type ==
632 			HAL_RX_PKT_TYPE_11AX) {
633 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
634 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
635 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
636 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
637 	}
638 	dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev);
639 	dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
640 	cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
641 
642 	mon_ops = dp_mon_ops_get(pdev->soc);
643 	if (mon_ops && mon_ops->mon_rx_populate_ppdu_info)
644 		mon_ops->mon_rx_populate_ppdu_info(ppdu_info,
645 						   cdp_rx_ppdu);
646 
647 	cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
648 	for (i = 0; i < MAX_CHAIN; i++)
649 		cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
650 
651 	cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
652 
653 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
654 
655 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
656 
657 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
658 
659 	return;
660 end:
661 	dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu);
662 }
663 
664 /*
665  * dp_mon_eval_avg_rate_filter() - Evaluates rate value against filter
666  * @peer: dp peer
667  * @ratekbps: last packet rate in kbps
668  * @avg_rate: average rate for which new rate is to be evaluated
669  *
670  * Return: true when average need to be evaluated else false
671  */
672 static inline bool
dp_mon_eval_avg_rate_filter(struct dp_peer * peer,uint32_t ratekbps,uint32_t avg_rate)673 dp_mon_eval_avg_rate_filter(struct dp_peer *peer, uint32_t ratekbps,
674 			    uint32_t avg_rate) {
675 	uint16_t filter_val = 0;
676 
677 	if (qdf_unlikely(!peer || !peer->vdev ||
678 			  !peer->vdev->pdev->soc->wlan_cfg_ctx)) {
679 		return true;
680 	}
681 
682 	filter_val =
683 		peer->vdev->pdev->soc->wlan_cfg_ctx->avg_rate_stats_filter_val;
684 
685 	if (!filter_val || avg_rate < filter_val || ratekbps > filter_val) {
686 		return true;
687 	}
688 	return false;
689 }
690 
691 /**
692  * dp_rx_rate_stats_update() - Update per-peer rate statistics
693  * @peer: Datapath peer handle
694  * @ppdu: PPDU Descriptor
695  * @user: user index
696  *
697  * Return: None
698  */
dp_rx_rate_stats_update(struct dp_peer * peer,struct cdp_rx_indication_ppdu * ppdu,uint32_t user)699 static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
700 					   struct cdp_rx_indication_ppdu *ppdu,
701 					   uint32_t user)
702 {
703 	uint32_t ratekbps = 0;
704 	uint32_t ppdu_rx_rate = 0;
705 	uint32_t nss = 0;
706 	uint8_t mcs = 0;
707 	uint32_t rix;
708 	uint16_t ratecode = 0;
709 	struct cdp_rx_stats_ppdu_user *ppdu_user = NULL;
710 	struct dp_mon_peer *mon_peer = NULL;
711 
712 	if (!peer || !ppdu)
713 		return;
714 
715 	mon_peer = peer->monitor_peer;
716 	ppdu_user = &ppdu->user[user];
717 
718 	if (!mon_peer)
719 		return;
720 
721 	if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) {
722 		if (ppdu_user->nss == 0)
723 			nss = 0;
724 		else
725 			nss = ppdu_user->nss - 1;
726 		mcs = ppdu_user->mcs;
727 
728 		mon_peer->stats.rx.nss_info = ppdu_user->nss;
729 		mon_peer->stats.rx.mcs_info = ppdu_user->mcs;
730 	} else {
731 		if (ppdu->u.nss == 0)
732 			nss = 0;
733 		else
734 			nss = ppdu->u.nss - 1;
735 		mcs = ppdu->u.mcs;
736 
737 		mon_peer->stats.rx.nss_info = ppdu->u.nss;
738 		mon_peer->stats.rx.mcs_info = ppdu->u.mcs;
739 	}
740 
741 	ratekbps = dp_getrateindex(ppdu->u.gi,
742 				   mcs,
743 				   nss,
744 				   ppdu->u.preamble,
745 				   ppdu->u.bw,
746 				   ppdu->punc_bw,
747 				   &rix,
748 				   &ratecode);
749 
750 	if (!ratekbps) {
751 		ppdu->rix = 0;
752 		ppdu_user->rix = 0;
753 		ppdu->rx_ratekbps = 0;
754 		ppdu->rx_ratecode = 0;
755 		ppdu_user->rx_ratekbps = 0;
756 		return;
757 	}
758 
759 	mon_peer->stats.rx.bw_info = ppdu->u.bw;
760 	mon_peer->stats.rx.gi_info = ppdu->u.gi;
761 	mon_peer->stats.rx.preamble_info = ppdu->u.preamble;
762 
763 	ppdu->rix = rix;
764 	ppdu_user->rix = rix;
765 	DP_STATS_UPD(mon_peer, rx.last_rx_rate, ratekbps);
766 	if (qdf_likely(dp_mon_eval_avg_rate_filter(peer, ratekbps,
767 					mon_peer->stats.rx.avg_rx_rate))) {
768 		mon_peer->stats.rx.avg_rx_rate =
769 			dp_ath_rate_lpf(mon_peer->stats.rx.avg_rx_rate,
770 					ratekbps);
771 	}
772 	ppdu_rx_rate = dp_ath_rate_out(mon_peer->stats.rx.avg_rx_rate);
773 	DP_STATS_UPD(mon_peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
774 	ppdu->rx_ratekbps = ratekbps;
775 	ppdu->rx_ratecode = ratecode;
776 	ppdu_user->rx_ratekbps = ratekbps;
777 
778 	if (peer->vdev)
779 		peer->vdev->stats.rx.last_rx_rate = ratekbps;
780 }
781 
782 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
783 static void
dp_ppdu_desc_user_rx_time_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_rx_indication_ppdu * ppdu_desc,struct cdp_rx_stats_ppdu_user * user)784 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev,
785 				 struct dp_peer *peer,
786 				 struct cdp_rx_indication_ppdu *ppdu_desc,
787 				 struct cdp_rx_stats_ppdu_user *user)
788 {
789 	uint32_t nss_ru_width_sum = 0;
790 	struct dp_mon_peer *mon_peer = NULL;
791 	uint8_t ac = 0;
792 
793 	if (!pdev || !ppdu_desc || !user || !peer)
794 		return;
795 
796 	nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
797 	if (!nss_ru_width_sum)
798 		nss_ru_width_sum = 1;
799 
800 	if (ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
801 	    ppdu_desc->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
802 		user->rx_time_us = (ppdu_desc->duration *
803 				    user->nss * user->ofdma_ru_width) /
804 				    nss_ru_width_sum;
805 	} else {
806 		user->rx_time_us = ppdu_desc->duration;
807 	}
808 
809 	mon_peer = peer->monitor_peer;
810 	if (qdf_unlikely(!mon_peer))
811 		return;
812 
813 	ac = TID_TO_WME_AC(user->tid);
814 	DP_STATS_INC(mon_peer, airtime_stats.rx_airtime_consumption[ac].consumption,
815 		     user->rx_time_us);
816 }
817 
818 /**
819  * dp_rx_mon_update_user_deter_stats() - Update per-peer deterministic stats
820  * @pdev: Datapath pdev handle
821  * @peer: Datapath peer handle
822  * @ppdu: PPDU Descriptor
823  * @user: Per user RX stats
824  *
825  * Return: None
826  */
827 static inline
dp_rx_mon_update_user_deter_stats(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_rx_indication_ppdu * ppdu,struct cdp_rx_stats_ppdu_user * user)828 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev,
829 				       struct dp_peer *peer,
830 				       struct cdp_rx_indication_ppdu *ppdu,
831 				       struct cdp_rx_stats_ppdu_user *user)
832 {
833 	struct dp_mon_peer *mon_peer;
834 	uint8_t tid;
835 
836 	if (!pdev || !ppdu || !user || !peer)
837 		return;
838 
839 	if (!dp_is_subtype_data(ppdu->frame_ctrl))
840 		return;
841 
842 	if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU)
843 		return;
844 
845 	mon_peer = peer->monitor_peer;
846 	if (!mon_peer)
847 		return;
848 
849 	tid = user->tid;
850 	if (tid >= CDP_DATA_TID_MAX)
851 		return;
852 
853 	DP_STATS_INC(mon_peer,
854 		     deter_stats.deter[tid].rx_det.mode_cnt,
855 		     1);
856 	DP_STATS_UPD(mon_peer,
857 		     deter_stats.deter[tid].rx_det.avg_rate,
858 		     mon_peer->stats.rx.avg_rx_rate);
859 }
860 
861 /**
862  * dp_rx_mon_update_pdev_deter_stats() - Update pdev deterministic stats
863  * @pdev: Datapath pdev handle
864  * @ppdu: PPDU Descriptor
865  *
866  * Return: None
867  */
868 static inline
dp_rx_mon_update_pdev_deter_stats(struct dp_pdev * pdev,struct cdp_rx_indication_ppdu * ppdu)869 void dp_rx_mon_update_pdev_deter_stats(struct dp_pdev *pdev,
870 				       struct cdp_rx_indication_ppdu *ppdu)
871 {
872 	if (!dp_is_subtype_data(ppdu->frame_ctrl))
873 		return;
874 
875 	DP_STATS_INC(pdev,
876 		     deter_stats.rx_su_cnt,
877 		     1);
878 }
879 #else
880 static inline void
dp_ppdu_desc_user_rx_time_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_rx_indication_ppdu * ppdu_desc,struct cdp_rx_stats_ppdu_user * user)881 dp_ppdu_desc_user_rx_time_update(struct dp_pdev *pdev,
882 				 struct dp_peer *peer,
883 				 struct cdp_rx_indication_ppdu *ppdu_desc,
884 				 struct cdp_rx_stats_ppdu_user *user)
885 { }
886 
887 static inline
dp_rx_mon_update_user_deter_stats(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_rx_indication_ppdu * ppdu,struct cdp_rx_stats_ppdu_user * user)888 void dp_rx_mon_update_user_deter_stats(struct dp_pdev *pdev,
889 				       struct dp_peer *peer,
890 				       struct cdp_rx_indication_ppdu *ppdu,
891 				       struct cdp_rx_stats_ppdu_user *user)
892 { }
893 
894 static inline
dp_rx_mon_update_pdev_deter_stats(struct dp_pdev * pdev,struct cdp_rx_indication_ppdu * ppdu)895 void dp_rx_mon_update_pdev_deter_stats(struct dp_pdev *pdev,
896 				       struct cdp_rx_indication_ppdu *ppdu)
897 { }
898 #endif
899 
dp_rx_stats_update(struct dp_pdev * pdev,struct cdp_rx_indication_ppdu * ppdu)900 static void dp_rx_stats_update(struct dp_pdev *pdev,
901 			       struct cdp_rx_indication_ppdu *ppdu)
902 {
903 	struct dp_soc *soc = NULL;
904 	uint8_t mcs, preamble, ac = 0, nss, ppdu_type, res_mcs = 0;
905 	uint32_t num_msdu;
906 	struct dp_peer *peer;
907 	struct dp_mon_peer *mon_peer;
908 	struct cdp_rx_stats_ppdu_user *ppdu_user;
909 	uint32_t i;
910 	enum cdp_mu_packet_type mu_pkt_type;
911 	struct dp_mon_ops *mon_ops;
912 	struct dp_mon_pdev *mon_pdev = NULL;
913 	uint64_t byte_count;
914 	bool is_preamble_valid = true;
915 
916 	if (qdf_likely(pdev))
917 		soc = pdev->soc;
918 	else
919 		return;
920 
921 	if (qdf_likely(!soc) || soc->process_rx_status)
922 		return;
923 
924 	mon_pdev = pdev->monitor_pdev;
925 
926 	preamble = ppdu->u.preamble;
927 	ppdu_type = ppdu->u.ppdu_type;
928 
929 	for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) {
930 		peer = NULL;
931 		ppdu_user = &ppdu->user[i];
932 		peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id,
933 					     DP_MOD_ID_RX_PPDU_STATS);
934 
935 		if (qdf_unlikely(!peer))
936 			mon_peer = mon_pdev->invalid_mon_peer;
937 		else
938 			mon_peer = peer->monitor_peer;
939 
940 		if (qdf_unlikely(!mon_peer)) {
941 			if (peer)
942 				dp_peer_unref_delete(peer,
943 						     DP_MOD_ID_RX_PPDU_STATS);
944 
945 			continue;
946 		}
947 
948 		if ((preamble == DOT11_A) || (preamble == DOT11_B))
949 			ppdu->u.nss = 1;
950 
951 		if (ppdu_type == HAL_RX_TYPE_SU) {
952 			mcs = ppdu->u.mcs;
953 			nss = ppdu->u.nss;
954 		} else {
955 			mcs = ppdu_user->mcs;
956 			nss = ppdu_user->nss;
957 		}
958 
959 		num_msdu = ppdu_user->num_msdu;
960 		byte_count = ppdu_user->mpdu_ok_byte_count +
961 			ppdu_user->mpdu_err_byte_count;
962 
963 		DP_STATS_UPD(mon_peer, rx.snr, ppdu->rssi);
964 
965 		if (qdf_unlikely(mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR))
966 			mon_peer->stats.rx.avg_snr =
967 				CDP_SNR_IN(mon_peer->stats.rx.snr);
968 		else
969 			CDP_SNR_UPDATE_AVG(mon_peer->stats.rx.avg_snr,
970 					   mon_peer->stats.rx.snr);
971 
972 		if (ppdu_type == HAL_RX_TYPE_SU) {
973 			if (nss) {
974 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
975 				DP_STATS_INC(mon_peer, rx.ppdu_nss[nss - 1], 1);
976 			}
977 
978 			DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_ok,
979 				     ppdu_user->mpdu_cnt_fcs_ok);
980 			DP_STATS_INC(mon_peer, rx.mpdu_cnt_fcs_err,
981 				     ppdu_user->mpdu_cnt_fcs_err);
982 		}
983 
984 		if (ppdu_type >= HAL_RX_TYPE_MU_MIMO &&
985 		    ppdu_type <= HAL_RX_TYPE_MU_OFDMA) {
986 			if (ppdu_type == HAL_RX_TYPE_MU_MIMO)
987 				mu_pkt_type = TXRX_TYPE_MU_MIMO;
988 			else
989 				mu_pkt_type = TXRX_TYPE_MU_OFDMA;
990 
991 			if (qdf_likely(nss)) {
992 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
993 				DP_STATS_INC(mon_peer,
994 					rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1],
995 					1);
996 			}
997 
998 			DP_STATS_INC(mon_peer,
999 				     rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok,
1000 				     ppdu_user->mpdu_cnt_fcs_ok);
1001 			DP_STATS_INC(mon_peer,
1002 				     rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err,
1003 				     ppdu_user->mpdu_cnt_fcs_err);
1004 		}
1005 
1006 		DP_STATS_INC(mon_peer, rx.sgi_count[ppdu->u.gi], num_msdu);
1007 		DP_STATS_INC(mon_peer, rx.bw[ppdu->u.bw], num_msdu);
1008 		DP_STATS_INC(mon_peer, rx.reception_type[ppdu->u.ppdu_type],
1009 			     num_msdu);
1010 		DP_STATS_INC(mon_peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1);
1011 		DP_STATS_INCC(mon_peer, rx.ampdu_cnt, num_msdu,
1012 			      ppdu_user->is_ampdu);
1013 		DP_STATS_INCC(mon_peer, rx.non_ampdu_cnt, num_msdu,
1014 			      !(ppdu_user->is_ampdu));
1015 		DP_STATS_UPD(mon_peer, rx.rx_rate, mcs);
1016 
1017 		switch (preamble) {
1018 		case DOT11_A:
1019 			res_mcs = (mcs < MAX_MCS_11A) ? mcs : (MAX_MCS - 1);
1020 		break;
1021 		case DOT11_B:
1022 			res_mcs = (mcs < MAX_MCS_11B) ? mcs : (MAX_MCS - 1);
1023 		break;
1024 		case DOT11_N:
1025 			res_mcs = (mcs < MAX_MCS_11N) ? mcs : (MAX_MCS - 1);
1026 		break;
1027 		case DOT11_AC:
1028 			res_mcs = (mcs < MAX_MCS_11AC) ? mcs : (MAX_MCS - 1);
1029 		break;
1030 		case DOT11_AX:
1031 			res_mcs = (mcs < MAX_MCS_11AX) ? mcs : (MAX_MCS - 1);
1032 		break;
1033 		default:
1034 			is_preamble_valid = false;
1035 		}
1036 
1037 		DP_STATS_INCC(mon_peer,
1038 			      rx.pkt_type[preamble].mcs_count[res_mcs], num_msdu,
1039 			      is_preamble_valid);
1040 
1041 		if (preamble == DOT11_AX) {
1042 			DP_STATS_INCC(mon_peer,
1043 				      rx.su_ax_ppdu_cnt.mcs_count[res_mcs], 1,
1044 				      (ppdu_type == HAL_RX_TYPE_SU));
1045 			DP_STATS_INCC(mon_peer,
1046 				      rx.rx_mu[TXRX_TYPE_MU_OFDMA].ppdu.mcs_count[res_mcs],
1047 				      1, (ppdu_type == HAL_RX_TYPE_MU_OFDMA));
1048 			DP_STATS_INCC(mon_peer,
1049 				      rx.rx_mu[TXRX_TYPE_MU_MIMO].ppdu.mcs_count[res_mcs],
1050 				      1, (ppdu_type == HAL_RX_TYPE_MU_MIMO));
1051 		}
1052 
1053 		/*
1054 		 * If invalid TID, it could be a non-qos frame, hence do not
1055 		 * update any AC counters
1056 		 */
1057 		ac = TID_TO_WME_AC(ppdu_user->tid);
1058 
1059 		if (qdf_likely(ppdu->tid != HAL_TID_INVALID)) {
1060 			DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu);
1061 			DP_STATS_INC(mon_peer, rx.wme_ac_type_bytes[ac],
1062 				     byte_count);
1063 		}
1064 
1065 		DP_STATS_INC(mon_peer, rx.rx_ppdus, 1);
1066 		DP_STATS_INC(mon_peer, rx.rx_mpdus,
1067 			(ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err));
1068 
1069 		mon_ops = dp_mon_ops_get(soc);
1070 		if (qdf_likely(mon_ops && mon_ops->mon_rx_stats_update))
1071 			mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user);
1072 
1073 		if (qdf_unlikely(!peer))
1074 			continue;
1075 
1076 		dp_peer_stats_notify(pdev, peer);
1077 		DP_STATS_UPD(mon_peer, rx.last_snr, ppdu->rssi);
1078 
1079 		dp_peer_qos_stats_notify(pdev, ppdu_user);
1080 
1081 		if (dp_is_subtype_data(ppdu->frame_ctrl))
1082 			dp_rx_rate_stats_update(peer, ppdu, i);
1083 
1084 		dp_send_stats_event(pdev, peer, ppdu_user->peer_id);
1085 
1086 		dp_ppdu_desc_user_rx_time_update(pdev, peer, ppdu, ppdu_user);
1087 
1088 		if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx))
1089 			dp_rx_mon_update_user_deter_stats(pdev, peer,
1090 							  ppdu, ppdu_user);
1091 
1092 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
1093 	}
1094 }
1095 
1096 void
dp_rx_handle_ppdu_stats(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)1097 dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
1098 			struct hal_rx_ppdu_info *ppdu_info)
1099 {
1100 	qdf_nbuf_t ppdu_nbuf;
1101 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
1102 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1103 	uint64_t size = 0;
1104 	uint8_t num_users = 0;
1105 
1106 	/*
1107 	 * Do not allocate if fcs error,
1108 	 * ast idx invalid / fctl invalid
1109 	 *
1110 	 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed
1111 	 */
1112 	if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt_fcs_ok == 0))
1113 		return;
1114 
1115 	if (qdf_unlikely(mon_pdev->neighbour_peers_added)) {
1116 		if (ppdu_info->nac_info.fc_valid &&
1117 		    ppdu_info->nac_info.to_ds_flag &&
1118 		    ppdu_info->nac_info.mac_addr2_valid) {
1119 			struct dp_neighbour_peer *peer = NULL;
1120 			uint8_t rssi = ppdu_info->rx_status.rssi_comb;
1121 
1122 			qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1123 			if (mon_pdev->neighbour_peers_added) {
1124 				TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1125 					      neighbour_peer_list_elem) {
1126 					if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
1127 							 &ppdu_info->nac_info.mac_addr2,
1128 							 QDF_MAC_ADDR_SIZE)) {
1129 						peer->rssi = rssi;
1130 						break;
1131 					}
1132 				}
1133 			}
1134 			qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1135 		} else {
1136 			dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d",
1137 					ppdu_info->nac_info.fc_valid,
1138 					ppdu_info->nac_info.to_ds_flag,
1139 					ppdu_info->nac_info.mac_addr2_valid);
1140 		}
1141 	}
1142 
1143 	/* need not generate wdi event when mcopy, cfr rcc mode and
1144 	 * enhanced stats are not enabled
1145 	 */
1146 	if (qdf_unlikely(!mon_pdev->mcopy_mode &&
1147 			 !mon_pdev->enhanced_stats_en &&
1148 			 !dp_cfr_rcc_mode_status(pdev)))
1149 		return;
1150 
1151 	if (qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
1152 		dp_update_cfr_dbg_stats(pdev, ppdu_info);
1153 
1154 	if (qdf_unlikely(!ppdu_info->rx_status.frame_control_info_valid ||
1155 			 ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) {
1156 		if (!(mon_pdev->mcopy_mode ||
1157 		      (dp_bb_captured_chan_status(pdev, ppdu_info) ==
1158 		       QDF_STATUS_SUCCESS)))
1159 			return;
1160 	}
1161 	num_users = ppdu_info->com_info.num_users;
1162 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
1163 	size = sizeof(struct cdp_rx_indication_ppdu) +
1164 		num_users * sizeof(struct cdp_rx_stats_ppdu_user);
1165 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
1166 				   size,
1167 				   0, 0, FALSE);
1168 	if (qdf_likely(ppdu_nbuf)) {
1169 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf);
1170 
1171 		qdf_mem_zero(cdp_rx_ppdu, size);
1172 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
1173 		dp_rx_populate_cdp_indication_ppdu(pdev,
1174 						   ppdu_info, cdp_rx_ppdu);
1175 		if (!qdf_unlikely(qdf_nbuf_put_tail(ppdu_nbuf,
1176 				       sizeof(struct cdp_rx_indication_ppdu))))
1177 			return;
1178 
1179 		if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) {
1180 			if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_SU)
1181 				dp_rx_mon_update_pdev_deter_stats(pdev,
1182 								  cdp_rx_ppdu);
1183 		}
1184 
1185 		dp_rx_stats_update(pdev, cdp_rx_ppdu);
1186 
1187 		if (qdf_unlikely(cdp_rx_ppdu->peer_id != HTT_INVALID_PEER)) {
1188 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
1189 					     soc, ppdu_nbuf,
1190 					     cdp_rx_ppdu->peer_id,
1191 					     WDI_NO_VAL, pdev->pdev_id);
1192 		} else if (qdf_unlikely(mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev))) {
1193 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
1194 					     ppdu_nbuf, HTT_INVALID_PEER,
1195 					     WDI_NO_VAL, pdev->pdev_id);
1196 		} else {
1197 			qdf_nbuf_free(ppdu_nbuf);
1198 		}
1199 	}
1200 }
1201 #endif/* QCA_ENHANCED_STATS_SUPPORT */
1202 
1203 #ifdef QCA_UNDECODED_METADATA_SUPPORT
1204 #define RX_PHYERR_MASK_GET64(_val1, _val2) (((uint64_t)(_val2) << 32) | (_val1))
1205 /**
1206  * dp_rx_populate_cdp_indication_ppdu_undecoded_metadata() - Populate cdp
1207  * rx indication structure
1208  * @pdev: pdev ctx
1209  * @ppdu_info: ppdu info structure from ppdu ring
1210  * @cdp_rx_ppdu: Rx PPDU indication structure
1211  *
1212  * Return: none
1213  */
1214 static void
dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,struct cdp_rx_indication_ppdu * cdp_rx_ppdu)1215 dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(struct dp_pdev *pdev,
1216 				struct hal_rx_ppdu_info *ppdu_info,
1217 				struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
1218 {
1219 	uint32_t chain;
1220 
1221 	cdp_rx_ppdu->phyrx_abort = ppdu_info->rx_status.phyrx_abort;
1222 	cdp_rx_ppdu->phyrx_abort_reason =
1223 		ppdu_info->rx_status.phyrx_abort_reason;
1224 
1225 	cdp_rx_ppdu->first_data_seq_ctrl =
1226 		ppdu_info->rx_status.first_data_seq_ctrl;
1227 	cdp_rx_ppdu->frame_ctrl =
1228 		ppdu_info->rx_status.frame_control;
1229 	cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
1230 	cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
1231 	cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
1232 	cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
1233 	cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
1234 	cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
1235 				 cdp_rx_ppdu->udp_msdu_count +
1236 				 cdp_rx_ppdu->other_msdu_count);
1237 
1238 	cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
1239 		ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
1240 
1241 	if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
1242 		cdp_rx_ppdu->is_ampdu = 1;
1243 	else
1244 		cdp_rx_ppdu->is_ampdu = 0;
1245 	cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
1246 
1247 	cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
1248 	cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
1249 	cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
1250 	cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
1251 	cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
1252 	cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
1253 	if (ppdu_info->rx_status.sgi == VHT_SGI_NYSM &&
1254 	    ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC)
1255 		cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
1256 	else
1257 		cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
1258 
1259 	cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
1260 	cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
1261 	cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
1262 				   QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
1263 
1264 	cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
1265 	cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
1266 	cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
1267 	cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
1268 	cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
1269 	cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
1270 	cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
1271 
1272 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC) {
1273 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.is_stbc;
1274 		cdp_rx_ppdu->vht_no_txop_ps =
1275 			ppdu_info->rx_status.vht_no_txop_ps;
1276 		cdp_rx_ppdu->vht_crc = ppdu_info->rx_status.vht_crc;
1277 		cdp_rx_ppdu->group_id = ppdu_info->rx_status.vht_flag_values5;
1278 	} else if (ppdu_info->rx_status.preamble_type ==
1279 			HAL_RX_PKT_TYPE_11AX) {
1280 		cdp_rx_ppdu->u.stbc = (ppdu_info->rx_status.he_data3 >>
1281 				       QDF_MON_STATUS_STBC_SHIFT) & 0x1;
1282 		cdp_rx_ppdu->u.dcm = (ppdu_info->rx_status.he_data3 >>
1283 				      QDF_MON_STATUS_DCM_SHIFT) & 0x1;
1284 	} else {
1285 		cdp_rx_ppdu->u.stbc = ppdu_info->rx_status.ht_stbc;
1286 		cdp_rx_ppdu->ht_length = ppdu_info->rx_status.ht_length;
1287 		cdp_rx_ppdu->ht_smoothing = ppdu_info->rx_status.smoothing;
1288 		cdp_rx_ppdu->ht_not_sounding =
1289 			ppdu_info->rx_status.not_sounding;
1290 		cdp_rx_ppdu->ht_aggregation = ppdu_info->rx_status.aggregation;
1291 		cdp_rx_ppdu->ht_stbc = ppdu_info->rx_status.ht_stbc;
1292 		cdp_rx_ppdu->ht_crc = ppdu_info->rx_status.ht_crc;
1293 	}
1294 
1295 	cdp_rx_ppdu->l_sig_length = ppdu_info->rx_status.l_sig_length;
1296 	cdp_rx_ppdu->l_sig_a_parity = ppdu_info->rx_status.l_sig_a_parity;
1297 	cdp_rx_ppdu->l_sig_a_pkt_type = ppdu_info->rx_status.l_sig_a_pkt_type;
1298 
1299 	if (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AX) {
1300 		cdp_rx_ppdu->he_crc = ppdu_info->rx_status.he_crc;
1301 		cdp_rx_ppdu->bss_color_id =
1302 			ppdu_info->rx_status.he_data3 & 0x3F;
1303 		cdp_rx_ppdu->beam_change = (ppdu_info->rx_status.he_data3 >>
1304 				QDF_MON_STATUS_BEAM_CHANGE_SHIFT) & 0x1;
1305 		cdp_rx_ppdu->dl_ul_flag = (ppdu_info->rx_status.he_data3 >>
1306 		QDF_MON_STATUS_DL_UL_SHIFT) & 0x1;
1307 		cdp_rx_ppdu->ldpc_extra_sym = (ppdu_info->rx_status.he_data3 >>
1308 				QDF_MON_STATUS_LDPC_EXTRA_SYMBOL_SHIFT) & 0x1;
1309 		cdp_rx_ppdu->special_reuse =
1310 			ppdu_info->rx_status.he_data4 & 0xF;
1311 		cdp_rx_ppdu->ltf_sym = (ppdu_info->rx_status.he_data5 >>
1312 				QDF_MON_STATUS_HE_LTF_SYM_SHIFT) & 0x7;
1313 		cdp_rx_ppdu->txbf = (ppdu_info->rx_status.he_data5 >>
1314 				QDF_MON_STATUS_TXBF_SHIFT) & 0x1;
1315 		cdp_rx_ppdu->pe_disambiguity = (ppdu_info->rx_status.he_data5 >>
1316 				QDF_MON_STATUS_PE_DISAMBIGUITY_SHIFT) & 0x1;
1317 		cdp_rx_ppdu->pre_fec_pad = (ppdu_info->rx_status.he_data5 >>
1318 				QDF_MON_STATUS_PRE_FEC_PAD_SHIFT) & 0x3;
1319 		cdp_rx_ppdu->dopplar = (ppdu_info->rx_status.he_data6 >>
1320 				QDF_MON_STATUS_DOPPLER_SHIFT) & 0x1;
1321 		cdp_rx_ppdu->txop_duration = (ppdu_info->rx_status.he_data6 >>
1322 				QDF_MON_STATUS_TXOP_SHIFT) & 0x7F;
1323 		cdp_rx_ppdu->sig_b_mcs = ppdu_info->rx_status.he_flags1 & 0x7;
1324 		cdp_rx_ppdu->sig_b_dcm = (ppdu_info->rx_status.he_flags1 >>
1325 				QDF_MON_STATUS_DCM_FLAG_1_SHIFT) & 0x1;
1326 		cdp_rx_ppdu->sig_b_sym = (ppdu_info->rx_status.he_flags2 >>
1327 				QDF_MON_STATUS_NUM_SIG_B_SYMBOLS_SHIFT) & 0xF;
1328 		cdp_rx_ppdu->sig_b_comp = (ppdu_info->rx_status.he_flags2 >>
1329 			QDF_MON_STATUS_SIG_B_COMPRESSION_FLAG_2_SHIFT) & 0x1;
1330 	}
1331 	dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu, pdev);
1332 	dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
1333 	cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
1334 
1335 	cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
1336 	for (chain = 0; chain < MAX_CHAIN; chain++)
1337 		cdp_rx_ppdu->per_chain_rssi[chain] =
1338 			ppdu_info->rx_status.rssi[chain];
1339 
1340 	cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
1341 
1342 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
1343 
1344 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
1345 }
1346 
1347 /**
1348  * dp_rx_is_valid_undecoded_frame() - Check unencoded frame received valid
1349  * or not against configured error mask
1350  * @err_mask: configured err mask
1351  * @err_code: Received error reason code for phy abort
1352  *
1353  * Return: true / false
1354  */
1355 static inline bool
dp_rx_is_valid_undecoded_frame(uint64_t err_mask,uint8_t err_code)1356 dp_rx_is_valid_undecoded_frame(uint64_t err_mask, uint8_t err_code)
1357 {
1358 	if (err_code < CDP_PHYRX_ERR_MAX &&
1359 	    (err_mask & (1ULL << err_code)))
1360 		return true;
1361 
1362 	return false;
1363 }
1364 
1365 void
dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)1366 dp_rx_handle_ppdu_undecoded_metadata(struct dp_soc *soc, struct dp_pdev *pdev,
1367 				     struct hal_rx_ppdu_info *ppdu_info)
1368 {
1369 	qdf_nbuf_t ppdu_nbuf;
1370 	struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
1371 	uint8_t abort_reason = 0;
1372 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1373 	uint64_t mask64;
1374 
1375 	 /* Return if RX_ABORT not set */
1376 	if (ppdu_info->rx_status.phyrx_abort == 0)
1377 		return;
1378 
1379 	mask64 = RX_PHYERR_MASK_GET64(mon_pdev->phyrx_error_mask,
1380 				      mon_pdev->phyrx_error_mask_cont);
1381 	abort_reason = ppdu_info->rx_status.phyrx_abort_reason;
1382 
1383 	if (!dp_rx_is_valid_undecoded_frame(mask64, abort_reason))
1384 		return;
1385 
1386 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
1387 				   sizeof(struct cdp_rx_indication_ppdu),
1388 				   0, 0, FALSE);
1389 	if (ppdu_nbuf) {
1390 		cdp_rx_ppdu = ((struct cdp_rx_indication_ppdu *)
1391 				qdf_nbuf_data(ppdu_nbuf));
1392 
1393 		qdf_mem_zero(cdp_rx_ppdu,
1394 			     sizeof(struct cdp_rx_indication_ppdu));
1395 		dp_rx_populate_cdp_indication_ppdu_undecoded_metadata(pdev,
1396 				ppdu_info, cdp_rx_ppdu);
1397 
1398 		if (!qdf_nbuf_put_tail(ppdu_nbuf,
1399 				       sizeof(struct cdp_rx_indication_ppdu))) {
1400 			return;
1401 		}
1402 
1403 		mon_pdev->rx_mon_stats.rx_undecoded_count++;
1404 		mon_pdev->rx_mon_stats.rx_undecoded_error[abort_reason] += 1;
1405 
1406 		dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC_UNDECODED_METADATA,
1407 				     soc, ppdu_nbuf, HTT_INVALID_PEER,
1408 				     WDI_NO_VAL, pdev->pdev_id);
1409 	}
1410 }
1411 #endif/* QCA_UNDECODED_METADATA_SUPPORT */
1412 
1413 #ifdef QCA_MCOPY_SUPPORT
1414 QDF_STATUS
dp_rx_handle_mcopy_mode(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,qdf_nbuf_t nbuf,uint8_t fcs_ok_mpdu_cnt,bool deliver_frame)1415 dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1416 			struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf,
1417 			uint8_t fcs_ok_mpdu_cnt, bool deliver_frame)
1418 {
1419 	uint16_t size = 0;
1420 	struct ieee80211_frame *wh;
1421 	uint32_t *nbuf_data;
1422 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1423 
1424 	if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload)
1425 		return QDF_STATUS_SUCCESS;
1426 
1427 	/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1428 	if (mon_pdev->mcopy_mode == M_COPY) {
1429 		if (mon_pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
1430 			return QDF_STATUS_SUCCESS;
1431 	}
1432 
1433 	wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4);
1434 
1435 	size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload -
1436 				qdf_nbuf_data(nbuf));
1437 
1438 	if (qdf_nbuf_pull_head(nbuf, size) == NULL)
1439 		return QDF_STATUS_SUCCESS;
1440 
1441 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1442 	     IEEE80211_FC0_TYPE_MGT) ||
1443 	     ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1444 	     IEEE80211_FC0_TYPE_CTL)) {
1445 		return QDF_STATUS_SUCCESS;
1446 	}
1447 
1448 	nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
1449 	*nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id;
1450 	/* only retain RX MSDU payload in the skb */
1451 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len);
1452 	if (deliver_frame) {
1453 		mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
1454 		dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
1455 				     nbuf, HTT_INVALID_PEER,
1456 				     WDI_NO_VAL, pdev->pdev_id);
1457 	}
1458 	return QDF_STATUS_E_ALREADY;
1459 }
1460 
1461 void
dp_rx_mcopy_handle_last_mpdu(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,qdf_nbuf_t status_nbuf)1462 dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev,
1463 			     struct hal_rx_ppdu_info *ppdu_info,
1464 			     qdf_nbuf_t status_nbuf)
1465 {
1466 	QDF_STATUS mcopy_status;
1467 	qdf_nbuf_t nbuf_clone = NULL;
1468 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1469 
1470 	/* If the MPDU end tlv and RX header are received in different buffers,
1471 	 * process the RX header based on fcs status.
1472 	 */
1473 	if (mon_pdev->mcopy_status_nbuf) {
1474 		/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1475 		if (mon_pdev->mcopy_mode == M_COPY) {
1476 			if (mon_pdev->m_copy_id.rx_ppdu_id ==
1477 			    ppdu_info->com_info.ppdu_id)
1478 				goto end1;
1479 		}
1480 
1481 		if (ppdu_info->is_fcs_passed) {
1482 			nbuf_clone = qdf_nbuf_clone(mon_pdev->mcopy_status_nbuf);
1483 			if (!nbuf_clone) {
1484 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1485 					  QDF_TRACE_LEVEL_ERROR,
1486 					  "Failed to clone nbuf");
1487 				goto end1;
1488 			}
1489 
1490 			mon_pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
1491 			dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
1492 					     nbuf_clone,
1493 					     HTT_INVALID_PEER,
1494 					     WDI_NO_VAL, pdev->pdev_id);
1495 			ppdu_info->is_fcs_passed = false;
1496 		}
1497 end1:
1498 		qdf_nbuf_free(mon_pdev->mcopy_status_nbuf);
1499 		mon_pdev->mcopy_status_nbuf = NULL;
1500 	}
1501 
1502 	/* If the MPDU end tlv and RX header are received in different buffers,
1503 	 * preserve the RX header as the fcs status will be received in MPDU
1504 	 * end tlv in next buffer. So, cache the buffer to be processd in next
1505 	 * iteration
1506 	 */
1507 	if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) !=
1508 	    ppdu_info->com_info.mpdu_cnt) {
1509 		mon_pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf);
1510 		if (mon_pdev->mcopy_status_nbuf) {
1511 			mcopy_status = dp_rx_handle_mcopy_mode(
1512 							soc, pdev,
1513 							ppdu_info,
1514 							mon_pdev->mcopy_status_nbuf,
1515 							ppdu_info->fcs_ok_cnt,
1516 							false);
1517 			if (mcopy_status == QDF_STATUS_SUCCESS) {
1518 				qdf_nbuf_free(mon_pdev->mcopy_status_nbuf);
1519 				mon_pdev->mcopy_status_nbuf = NULL;
1520 			}
1521 		}
1522 	}
1523 }
1524 
1525 void
dp_rx_mcopy_process_ppdu_info(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,uint32_t tlv_status)1526 dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev,
1527 			      struct hal_rx_ppdu_info *ppdu_info,
1528 			      uint32_t tlv_status)
1529 {
1530 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1531 
1532 	if (qdf_unlikely(!mon_pdev->mcopy_mode))
1533 		return;
1534 
1535 	/* The fcs status is received in MPDU end tlv. If the RX header
1536 	 * and its MPDU end tlv are received in different status buffer then
1537 	 * to process that header ppdu_info->is_fcs_passed is used.
1538 	 * If end tlv is received in next status buffer then com_info.mpdu_cnt
1539 	 * will be 0 at the time of receiving MPDU end tlv and we update the
1540 	 * is_fcs_passed flag based on ppdu_info->fcs_err.
1541 	 */
1542 	if (tlv_status != HAL_TLV_STATUS_MPDU_END)
1543 		return;
1544 
1545 	if (!ppdu_info->fcs_err) {
1546 		if (ppdu_info->fcs_ok_cnt >
1547 		    HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) {
1548 			dp_err("No. of MPDUs(%d) per status buff exceeded",
1549 					ppdu_info->fcs_ok_cnt);
1550 			return;
1551 		}
1552 		if (ppdu_info->com_info.mpdu_cnt)
1553 			ppdu_info->fcs_ok_cnt++;
1554 		else
1555 			ppdu_info->is_fcs_passed = true;
1556 	} else {
1557 		if (ppdu_info->com_info.mpdu_cnt)
1558 			ppdu_info->fcs_err_cnt++;
1559 		else
1560 			ppdu_info->is_fcs_passed = false;
1561 	}
1562 }
1563 
1564 void
dp_rx_process_mcopy_mode(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,uint32_t tlv_status,qdf_nbuf_t status_nbuf)1565 dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1566 			 struct hal_rx_ppdu_info *ppdu_info,
1567 			 uint32_t tlv_status,
1568 			 qdf_nbuf_t status_nbuf)
1569 {
1570 	QDF_STATUS mcopy_status;
1571 	qdf_nbuf_t nbuf_clone = NULL;
1572 	uint8_t fcs_ok_mpdu_cnt = 0;
1573 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1574 
1575 	dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf);
1576 
1577 	if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt))
1578 		goto end;
1579 
1580 	if (qdf_unlikely(!ppdu_info->fcs_ok_cnt))
1581 		goto end;
1582 
1583 	/* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
1584 	if (mon_pdev->mcopy_mode == M_COPY)
1585 		ppdu_info->fcs_ok_cnt = 1;
1586 
1587 	while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) {
1588 		nbuf_clone = qdf_nbuf_clone(status_nbuf);
1589 		if (!nbuf_clone) {
1590 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1591 				  "Failed to clone nbuf");
1592 			goto end;
1593 		}
1594 
1595 		mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
1596 						       ppdu_info,
1597 						       nbuf_clone,
1598 						       fcs_ok_mpdu_cnt,
1599 						       true);
1600 
1601 		if (mcopy_status == QDF_STATUS_SUCCESS)
1602 			qdf_nbuf_free(nbuf_clone);
1603 
1604 		fcs_ok_mpdu_cnt++;
1605 	}
1606 end:
1607 	qdf_nbuf_free(status_nbuf);
1608 	ppdu_info->fcs_ok_cnt = 0;
1609 	ppdu_info->fcs_err_cnt = 0;
1610 	ppdu_info->com_info.mpdu_cnt = 0;
1611 	qdf_mem_zero(&ppdu_info->ppdu_msdu_info,
1612 		     HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER
1613 		     * sizeof(struct hal_rx_msdu_payload_info));
1614 }
1615 #endif /* QCA_MCOPY_SUPPORT */
1616 
1617 int
dp_rx_handle_smart_mesh_mode(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,qdf_nbuf_t nbuf)1618 dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
1619 			      struct hal_rx_ppdu_info *ppdu_info,
1620 			      qdf_nbuf_t nbuf)
1621 {
1622 	uint8_t size = 0;
1623 	struct dp_mon_vdev *mon_vdev;
1624 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1625 
1626 	if (!mon_pdev->mvdev) {
1627 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1628 			  "[%s]:[%d] Monitor vdev is NULL !!",
1629 			  __func__, __LINE__);
1630 		return 1;
1631 	}
1632 
1633 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1634 
1635 	if (!ppdu_info->msdu_info.first_msdu_payload) {
1636 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1637 			  "[%s]:[%d] First msdu payload not present",
1638 			  __func__, __LINE__);
1639 		return 1;
1640 	}
1641 
1642 	/* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
1643 	size = (ppdu_info->msdu_info.first_msdu_payload -
1644 		qdf_nbuf_data(nbuf)) + 4;
1645 	ppdu_info->msdu_info.first_msdu_payload = NULL;
1646 
1647 	if (!qdf_nbuf_pull_head(nbuf, size)) {
1648 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1649 			  "[%s]:[%d] No header present",
1650 			  __func__, __LINE__);
1651 		return 1;
1652 	}
1653 
1654 	/* Only retain RX MSDU payload in the skb */
1655 	qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
1656 			   ppdu_info->msdu_info.payload_len);
1657 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, nbuf,
1658 				      qdf_nbuf_headroom(nbuf))) {
1659 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1660 		return 1;
1661 	}
1662 
1663 	mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1664 			      nbuf, NULL);
1665 	mon_pdev->ppdu_info.rx_status.monitor_direct_used = 0;
1666 	return 0;
1667 }
1668 
1669 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
1670 /**
1671  * dp_rx_mon_stitch_mpdu() - Stich MPDU from MSDU
1672  * @mon_pdev: mon_pdev handle
1673  * @tail: 1st MSDU of next MPDU
1674  *
1675  * Return: mpdu buf
1676  */
1677 static qdf_nbuf_t
dp_rx_mon_stitch_mpdu(struct dp_mon_pdev * mon_pdev,qdf_nbuf_t tail)1678 dp_rx_mon_stitch_mpdu(struct dp_mon_pdev *mon_pdev, qdf_nbuf_t tail)
1679 {
1680 	qdf_nbuf_t head, nbuf, next;
1681 	qdf_nbuf_t mpdu_buf = NULL, head_frag_list = NULL;
1682 	uint32_t is_first_frag, frag_list_sum_len = 0;
1683 
1684 	if (!(qdf_nbuf_is_queue_empty(&mon_pdev->msdu_queue))) {
1685 		head = qdf_nbuf_queue_remove(&mon_pdev->msdu_queue);
1686 		nbuf = head;
1687 		mpdu_buf = qdf_nbuf_copy(head);
1688 		if (qdf_unlikely(!mpdu_buf))
1689 			goto fail;
1690 
1691 		is_first_frag = 1;
1692 
1693 		while (nbuf) {
1694 			/* Find the 1st msdu to append in mpdu_buf->frag_list */
1695 			if (nbuf != head && is_first_frag) {
1696 				is_first_frag = 0;
1697 				head_frag_list  = nbuf;
1698 			}
1699 
1700 			/* calculate frag_list length */
1701 			if (!is_first_frag)
1702 				frag_list_sum_len += qdf_nbuf_len(nbuf);
1703 
1704 			if (qdf_nbuf_queue_first(&mon_pdev->msdu_queue) == tail)
1705 				break;
1706 
1707 			next = qdf_nbuf_queue_remove(&mon_pdev->msdu_queue);
1708 			qdf_nbuf_set_next(nbuf, next);
1709 			nbuf = next;
1710 		}
1711 
1712 		qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1713 					 frag_list_sum_len);
1714 		qdf_nbuf_free(head);
1715 	}
1716 
1717 	return mpdu_buf;
1718 
1719 fail:
1720 	dp_err_rl("nbuf copy failed len: %d Q1: %d Q2: %d", qdf_nbuf_len(nbuf),
1721 		  qdf_nbuf_queue_len(&mon_pdev->msdu_queue),
1722 		  qdf_nbuf_queue_len(&mon_pdev->mpdu_queue));
1723 
1724 	/* Drop all MSDU of MPDU */
1725 	while (nbuf) {
1726 		qdf_nbuf_free(nbuf);
1727 		if (qdf_nbuf_queue_first(&mon_pdev->msdu_queue) == tail)
1728 			break;
1729 		nbuf = qdf_nbuf_queue_remove(&mon_pdev->msdu_queue);
1730 	}
1731 
1732 	return NULL;
1733 }
1734 
1735 /**
1736  * dp_rx_mon_send_mpdu() - Send MPDU to stack
1737  * @pdev: DP pdev handle
1738  * @mon_pdev: mon_pdev handle
1739  * @mpdu_buf: buffer to submit
1740  *
1741  * Return: None
1742  */
1743 static inline void
dp_rx_mon_send_mpdu(struct dp_pdev * pdev,struct dp_mon_pdev * mon_pdev,qdf_nbuf_t mpdu_buf)1744 dp_rx_mon_send_mpdu(struct dp_pdev *pdev, struct dp_mon_pdev *mon_pdev,
1745 		    qdf_nbuf_t mpdu_buf)
1746 {
1747 	struct dp_mon_vdev *mon_vdev;
1748 
1749 	if (qdf_unlikely(!mon_pdev->mvdev)) {
1750 		dp_info_rl("Monitor vdev is NULL !!");
1751 		qdf_nbuf_free(mpdu_buf);
1752 		return;
1753 	}
1754 
1755 	mon_pdev->ppdu_info.rx_status.ppdu_id =
1756 			mon_pdev->ppdu_info.com_info.ppdu_id;
1757 	mon_pdev->ppdu_info.rx_status.device_id = pdev->soc->device_id;
1758 	mon_pdev->ppdu_info.rx_status.chan_noise_floor =
1759 			pdev->chan_noise_floor;
1760 
1761 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, mpdu_buf,
1762 				      qdf_nbuf_headroom(mpdu_buf))) {
1763 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
1764 		qdf_nbuf_free(mpdu_buf);
1765 		dp_err("radiotap_update_err");
1766 		return;
1767 	}
1768 
1769 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1770 	if (qdf_likely(mon_vdev && mon_vdev->osif_rx_mon))
1771 		mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1772 				      mpdu_buf, NULL);
1773 	else
1774 		qdf_nbuf_free(mpdu_buf);
1775 }
1776 
dp_rx_handle_local_pkt_capture(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info,qdf_nbuf_t nbuf,uint32_t tlv_status)1777 int dp_rx_handle_local_pkt_capture(struct dp_pdev *pdev,
1778 				   struct hal_rx_ppdu_info *ppdu_info,
1779 				   qdf_nbuf_t nbuf, uint32_t tlv_status)
1780 {
1781 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1782 	qdf_nbuf_t buf, last;
1783 	uint16_t size;
1784 
1785 	qdf_spin_lock_bh(&mon_pdev->lpc_lock);
1786 	switch (tlv_status) {
1787 	case HAL_TLV_STATUS_MPDU_START:
1788 	{
1789 		/* Only Add MPDU to queue if multiple MPDUs present in PPDU */
1790 		if (qdf_unlikely(mon_pdev->first_mpdu)) {
1791 			mon_pdev->first_mpdu = false;
1792 			break;
1793 		}
1794 
1795 		/* last nbuf of queue points to 1st MSDU of next MPDU */
1796 		last = qdf_nbuf_queue_last(&mon_pdev->msdu_queue);
1797 		buf = dp_rx_mon_stitch_mpdu(mon_pdev, last);
1798 		/* Add MPDU to queue */
1799 		if (qdf_likely(buf))
1800 			qdf_nbuf_queue_add(&mon_pdev->mpdu_queue, buf);
1801 		break;
1802 	}
1803 
1804 	case HAL_TLV_STATUS_HEADER:
1805 	{
1806 		buf = qdf_nbuf_clone(nbuf);
1807 		if (qdf_unlikely(!buf))
1808 			break;
1809 
1810 		/* Adding 8 bytes to get to start of 802.11 frame
1811 		 * after phy_ppdu_id
1812 		 */
1813 		size = (ppdu_info->msdu_info.first_msdu_payload -
1814 			qdf_nbuf_data(buf)) + mon_pdev->phy_ppdu_id_size;
1815 
1816 		if (qdf_unlikely(!qdf_nbuf_pull_head(buf, size))) {
1817 			qdf_nbuf_free(buf);
1818 			dp_info("No header present");
1819 			break;
1820 		}
1821 
1822 		/* Only retain RX MSDU payload in the skb */
1823 		qdf_nbuf_trim_tail(buf, qdf_nbuf_len(buf) -
1824 				ppdu_info->msdu_info.payload_len +
1825 				mon_pdev->phy_ppdu_id_size);
1826 
1827 		/* Add MSDU to Queue */
1828 		qdf_nbuf_queue_add(&mon_pdev->msdu_queue, buf);
1829 		break;
1830 	}
1831 
1832 	case HAL_TLV_STATUS_PPDU_DONE:
1833 	{
1834 		while ((buf = qdf_nbuf_queue_remove(&mon_pdev->mpdu_queue)))
1835 			dp_rx_mon_send_mpdu(pdev, mon_pdev, buf);
1836 
1837 		/* Stich and send Last MPDU of PPDU */
1838 		buf = dp_rx_mon_stitch_mpdu(mon_pdev, NULL);
1839 		if (buf)
1840 			dp_rx_mon_send_mpdu(pdev, mon_pdev, buf);
1841 
1842 		mon_pdev->first_mpdu = true;
1843 		break;
1844 	}
1845 
1846 	default:
1847 		break;
1848 	}
1849 
1850 	qdf_spin_unlock_bh(&mon_pdev->lpc_lock);
1851 
1852 	return 0;
1853 }
1854 #endif
1855 
1856 qdf_nbuf_t
dp_rx_nbuf_prepare(struct dp_soc * soc,struct dp_pdev * pdev)1857 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
1858 {
1859 	uint8_t *buf;
1860 	int32_t nbuf_retry_count;
1861 	QDF_STATUS ret;
1862 	qdf_nbuf_t nbuf = NULL;
1863 
1864 	for (nbuf_retry_count = 0; nbuf_retry_count <
1865 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
1866 			nbuf_retry_count++) {
1867 		/* Allocate a new skb using alloc_skb */
1868 		nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE,
1869 						  RX_MON_STATUS_BUF_RESERVATION,
1870 						  RX_DATA_BUFFER_ALIGNMENT);
1871 
1872 		if (!nbuf) {
1873 			DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
1874 			continue;
1875 		}
1876 
1877 		buf = qdf_nbuf_data(nbuf);
1878 
1879 		memset(buf, 0, RX_MON_STATUS_BUF_SIZE);
1880 
1881 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
1882 						 QDF_DMA_FROM_DEVICE,
1883 						 RX_MON_STATUS_BUF_SIZE);
1884 
1885 		/* nbuf map failed */
1886 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
1887 			qdf_nbuf_free(nbuf);
1888 			DP_STATS_INC(pdev, replenish.map_err, 1);
1889 			continue;
1890 		}
1891 		/* qdf_nbuf alloc and map succeeded */
1892 		break;
1893 	}
1894 
1895 	/* qdf_nbuf still alloc or map failed */
1896 	if (qdf_unlikely(nbuf_retry_count >=
1897 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
1898 		return NULL;
1899 
1900 	return nbuf;
1901 }
1902 
1903 #ifndef DISABLE_MON_CONFIG
1904 uint32_t
dp_mon_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)1905 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1906 	       uint32_t mac_id, uint32_t quota)
1907 {
1908 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1909 
1910 	if (mon_soc && mon_soc->mon_rx_process)
1911 		return mon_soc->mon_rx_process(soc, int_ctx,
1912 					       mac_id, quota);
1913 	return 0;
1914 }
1915 #else
1916 uint32_t
dp_mon_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)1917 dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
1918 	       uint32_t mac_id, uint32_t quota)
1919 {
1920 	return 0;
1921 }
1922 #endif
1923 
1924 /**
1925  * dp_send_mgmt_packet_to_stack(): send indicataion to upper layers
1926  *
1927  * @soc: soc handle
1928  * @nbuf: Mgmt packet
1929  * @pdev: pdev handle
1930  *
1931  * Return: QDF_STATUS_SUCCESS on success
1932  *         QDF_STATUS_E_INVAL in error
1933  */
1934 #ifdef QCA_MCOPY_SUPPORT
1935 static inline QDF_STATUS
dp_send_mgmt_packet_to_stack(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_pdev * pdev)1936 dp_send_mgmt_packet_to_stack(struct dp_soc *soc,
1937 			     qdf_nbuf_t nbuf,
1938 			     struct dp_pdev *pdev)
1939 {
1940 	uint32_t *nbuf_data;
1941 	struct ieee80211_frame *wh;
1942 	qdf_frag_t addr;
1943 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1944 
1945 	if (!nbuf)
1946 		return QDF_STATUS_E_INVAL;
1947 
1948 	/* Get addr pointing to80211 header */
1949 	addr = dp_rx_mon_get_nbuf_80211_hdr(nbuf);
1950 	if (qdf_unlikely(!addr)) {
1951 		qdf_nbuf_free(nbuf);
1952 		return QDF_STATUS_E_INVAL;
1953 	}
1954 
1955 	/*check if this is not a mgmt packet*/
1956 	wh = (struct ieee80211_frame *)addr;
1957 	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1958 	     IEEE80211_FC0_TYPE_MGT) &&
1959 	     ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
1960 	     IEEE80211_FC0_TYPE_CTL)) {
1961 		qdf_nbuf_free(nbuf);
1962 		return QDF_STATUS_E_INVAL;
1963 	}
1964 	nbuf_data = (uint32_t *)qdf_nbuf_push_head(nbuf, 4);
1965 	if (!nbuf_data) {
1966 		QDF_TRACE(QDF_MODULE_ID_DP,
1967 			  QDF_TRACE_LEVEL_ERROR,
1968 			  FL("No headroom"));
1969 		qdf_nbuf_free(nbuf);
1970 		return QDF_STATUS_E_INVAL;
1971 	}
1972 	*nbuf_data = mon_pdev->ppdu_info.com_info.ppdu_id;
1973 
1974 	dp_wdi_event_handler(WDI_EVENT_RX_MGMT_CTRL, soc, nbuf,
1975 			     HTT_INVALID_PEER,
1976 			     WDI_NO_VAL, pdev->pdev_id);
1977 	return QDF_STATUS_SUCCESS;
1978 }
1979 #else
1980 static inline QDF_STATUS
dp_send_mgmt_packet_to_stack(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_pdev * pdev)1981 dp_send_mgmt_packet_to_stack(struct dp_soc *soc,
1982 			     qdf_nbuf_t nbuf,
1983 			     struct dp_pdev *pdev)
1984 {
1985 	return QDF_STATUS_SUCCESS;
1986 }
1987 #endif /* QCA_MCOPY_SUPPORT */
1988 
dp_rx_mon_process_dest_pktlog(struct dp_soc * soc,uint32_t mac_id,qdf_nbuf_t mpdu)1989 QDF_STATUS dp_rx_mon_process_dest_pktlog(struct dp_soc *soc,
1990 					 uint32_t mac_id,
1991 					 qdf_nbuf_t mpdu)
1992 {
1993 	uint32_t event, msdu_timestamp = 0;
1994 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1995 	void *data;
1996 	struct ieee80211_frame *wh;
1997 	uint8_t type, subtype;
1998 	struct dp_mon_pdev *mon_pdev;
1999 
2000 	if (!pdev)
2001 		return QDF_STATUS_E_INVAL;
2002 
2003 	mon_pdev = pdev->monitor_pdev;
2004 
2005 	if (mon_pdev->rx_pktlog_cbf) {
2006 		if (qdf_nbuf_get_nr_frags(mpdu))
2007 			data = qdf_nbuf_get_frag_addr(mpdu, 0);
2008 		else
2009 			data = qdf_nbuf_data(mpdu);
2010 
2011 		/* CBF logging required, doesn't matter if it is a full mode
2012 		 * or lite mode.
2013 		 * Need to look for mpdu with:
2014 		 * TYPE = ACTION, SUBTYPE = NO ACK in the header
2015 		 */
2016 		event = WDI_EVENT_RX_CBF;
2017 
2018 		wh = (struct ieee80211_frame *)data;
2019 		type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2020 		subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2021 		if (type == IEEE80211_FC0_TYPE_MGT &&
2022 		    subtype == IEEE80211_FCO_SUBTYPE_ACTION_NO_ACK) {
2023 			msdu_timestamp = mon_pdev->ppdu_info.rx_status.tsft;
2024 			dp_rx_populate_cbf_hdr(soc,
2025 					       mac_id, event,
2026 					       mpdu,
2027 					       msdu_timestamp);
2028 		}
2029 	}
2030 	return QDF_STATUS_SUCCESS;
2031 }
2032 
dp_rx_mon_deliver(struct dp_soc * soc,uint32_t mac_id,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)2033 QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
2034 			     qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
2035 {
2036 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2037 	struct cdp_mon_status *rs;
2038 	qdf_nbuf_t mon_skb, skb_next;
2039 	qdf_nbuf_t mon_mpdu = NULL;
2040 	struct dp_mon_vdev *mon_vdev;
2041 	struct dp_mon_pdev *mon_pdev;
2042 
2043 	if (!pdev)
2044 		goto mon_deliver_fail;
2045 
2046 	mon_pdev = pdev->monitor_pdev;
2047 	rs = &mon_pdev->rx_mon_recv_status;
2048 
2049 	if (!mon_pdev->mvdev && !mon_pdev->mcopy_mode &&
2050 	    !mon_pdev->rx_pktlog_cbf)
2051 		goto mon_deliver_fail;
2052 
2053 	/* restitch mon MPDU for delivery via monitor interface */
2054 	mon_mpdu = dp_rx_mon_restitch_mpdu(soc, mac_id, head_msdu,
2055 					   tail_msdu, rs);
2056 
2057 	/* If MPDU restitch fails, free buffers*/
2058 	if (!mon_mpdu) {
2059 		dp_info("MPDU restitch failed, free buffers");
2060 		goto mon_deliver_fail;
2061 	}
2062 
2063 	dp_rx_mon_process_dest_pktlog(soc, mac_id, mon_mpdu);
2064 
2065 	/* monitor vap cannot be present when mcopy is enabled
2066 	 * hence same skb can be consumed
2067 	 */
2068 	if (mon_pdev->mcopy_mode)
2069 		return dp_send_mgmt_packet_to_stack(soc, mon_mpdu, pdev);
2070 
2071 	if (mon_pdev->mvdev &&
2072 	    mon_pdev->mvdev->osif_vdev &&
2073 	    mon_pdev->mvdev->monitor_vdev &&
2074 	    mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
2075 		mon_vdev = mon_pdev->mvdev->monitor_vdev;
2076 
2077 		mon_pdev->ppdu_info.rx_status.ppdu_id =
2078 			mon_pdev->ppdu_info.com_info.ppdu_id;
2079 		mon_pdev->ppdu_info.rx_status.device_id = soc->device_id;
2080 		mon_pdev->ppdu_info.rx_status.chan_noise_floor =
2081 			pdev->chan_noise_floor;
2082 		dp_handle_tx_capture(soc, pdev, mon_mpdu);
2083 
2084 		if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status,
2085 					      mon_mpdu,
2086 					      qdf_nbuf_headroom(mon_mpdu))) {
2087 			DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
2088 			qdf_nbuf_free(mon_mpdu);
2089 			return QDF_STATUS_E_INVAL;
2090 		}
2091 
2092 		dp_rx_mon_update_pf_tag_to_buf_headroom(soc, mon_mpdu);
2093 		mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
2094 				      mon_mpdu,
2095 				      &mon_pdev->ppdu_info.rx_status);
2096 	} else {
2097 		dp_rx_mon_dest_debug("%pK: mon_mpdu=%pK monitor_vdev %pK osif_vdev %pK"
2098 				     , soc, mon_mpdu, mon_pdev->mvdev,
2099 				     (mon_pdev->mvdev ? mon_pdev->mvdev->osif_vdev
2100 				     : NULL));
2101 		qdf_nbuf_free(mon_mpdu);
2102 		return QDF_STATUS_E_INVAL;
2103 	}
2104 
2105 	return QDF_STATUS_SUCCESS;
2106 
2107 mon_deliver_fail:
2108 	mon_skb = head_msdu;
2109 	while (mon_skb) {
2110 		skb_next = qdf_nbuf_next(mon_skb);
2111 
2112 		 dp_rx_mon_dest_debug("%pK: [%s][%d] mon_skb=%pK len %u",
2113 				      soc,  __func__, __LINE__, mon_skb, mon_skb->len);
2114 
2115 		qdf_nbuf_free(mon_skb);
2116 		mon_skb = skb_next;
2117 	}
2118 	return QDF_STATUS_E_INVAL;
2119 }
2120 
dp_rx_mon_deliver_non_std(struct dp_soc * soc,uint32_t mac_id)2121 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc,
2122 				     uint32_t mac_id)
2123 {
2124 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2125 	ol_txrx_rx_mon_fp osif_rx_mon;
2126 	qdf_nbuf_t dummy_msdu;
2127 	struct dp_mon_pdev *mon_pdev;
2128 	struct dp_mon_vdev *mon_vdev;
2129 
2130 	/* Sanity checking */
2131 	if (!pdev || !pdev->monitor_pdev)
2132 		goto mon_deliver_non_std_fail;
2133 
2134 	mon_pdev = pdev->monitor_pdev;
2135 
2136 	if (!mon_pdev->mvdev || !mon_pdev->mvdev ||
2137 	    !mon_pdev->mvdev->monitor_vdev ||
2138 	    !mon_pdev->mvdev->monitor_vdev->osif_rx_mon)
2139 		goto mon_deliver_non_std_fail;
2140 
2141 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
2142 	/* Generate a dummy skb_buff */
2143 	osif_rx_mon = mon_vdev->osif_rx_mon;
2144 	dummy_msdu = qdf_nbuf_alloc(soc->osdev, MAX_MONITOR_HEADER,
2145 				    MAX_MONITOR_HEADER, 4, FALSE);
2146 	if (!dummy_msdu)
2147 		goto allocate_dummy_msdu_fail;
2148 
2149 	qdf_nbuf_set_pktlen(dummy_msdu, 0);
2150 	qdf_nbuf_set_next(dummy_msdu, NULL);
2151 
2152 	mon_pdev->ppdu_info.rx_status.ppdu_id =
2153 		mon_pdev->ppdu_info.com_info.ppdu_id;
2154 
2155 	/* Apply the radio header to this dummy skb */
2156 	if (!qdf_nbuf_update_radiotap(&mon_pdev->ppdu_info.rx_status, dummy_msdu,
2157 				      qdf_nbuf_headroom(dummy_msdu))) {
2158 		DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
2159 		qdf_nbuf_free(dummy_msdu);
2160 		goto mon_deliver_non_std_fail;
2161 	}
2162 
2163 	/* deliver to the user layer application */
2164 	osif_rx_mon(mon_pdev->mvdev->osif_vdev,
2165 		    dummy_msdu, NULL);
2166 
2167 	return QDF_STATUS_SUCCESS;
2168 
2169 allocate_dummy_msdu_fail:
2170 		 dp_rx_mon_dest_debug("%pK: mon_skb=%pK ",
2171 				      soc, dummy_msdu);
2172 
2173 mon_deliver_non_std_fail:
2174 	return QDF_STATUS_E_INVAL;
2175 }
2176 
2177 /**
2178  * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
2179  *                                     filtering enabled
2180  * @soc: core txrx main context
2181  * @ppdu_info: Structure for rx ppdu info
2182  * @status_nbuf: Qdf nbuf abstraction for linux skb
2183  * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN
2184  *
2185  * Return: none
2186  */
2187 void
dp_rx_process_peer_based_pktlog(struct dp_soc * soc,struct hal_rx_ppdu_info * ppdu_info,qdf_nbuf_t status_nbuf,uint32_t pdev_id)2188 dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
2189 				struct hal_rx_ppdu_info *ppdu_info,
2190 				qdf_nbuf_t status_nbuf, uint32_t pdev_id)
2191 {
2192 	struct dp_peer *peer;
2193 	struct mon_rx_user_status *rx_user_status;
2194 	uint32_t num_users = ppdu_info->com_info.num_users;
2195 	uint16_t sw_peer_id;
2196 
2197 	/* Sanity check for num_users */
2198 	if (!num_users)
2199 		return;
2200 
2201 	qdf_assert_always(num_users <= CDP_MU_MAX_USERS);
2202 	rx_user_status = &ppdu_info->rx_user_status[num_users - 1];
2203 
2204 	sw_peer_id = rx_user_status->sw_peer_id;
2205 
2206 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
2207 				     DP_MOD_ID_RX_PPDU_STATS);
2208 
2209 	if (!peer)
2210 		return;
2211 
2212 	if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) &&
2213 	    (peer->monitor_peer->peer_based_pktlog_filter)) {
2214 		dp_wdi_event_handler(
2215 				     WDI_EVENT_RX_DESC, soc,
2216 				     status_nbuf,
2217 				     peer->peer_id,
2218 				     WDI_NO_VAL, pdev_id);
2219 	}
2220 	dp_peer_unref_delete(peer,
2221 			     DP_MOD_ID_RX_PPDU_STATS);
2222 }
2223 
2224 uint32_t
dp_mon_rx_add_tlv(uint8_t id,uint16_t len,void * value,qdf_nbuf_t mpdu_nbuf)2225 dp_mon_rx_add_tlv(uint8_t id, uint16_t len, void *value, qdf_nbuf_t mpdu_nbuf)
2226 {
2227 	uint8_t *dest = NULL;
2228 	uint32_t num_bytes_pushed = 0;
2229 
2230 	/* Add tlv id field */
2231 	dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint8_t));
2232 	if (qdf_likely(dest)) {
2233 		*((uint8_t *)dest) = id;
2234 		num_bytes_pushed += sizeof(uint8_t);
2235 	}
2236 
2237 	/* Add tlv len field */
2238 	dest = qdf_nbuf_push_head(mpdu_nbuf, sizeof(uint16_t));
2239 	if (qdf_likely(dest)) {
2240 		*((uint16_t *)dest) = len;
2241 		num_bytes_pushed += sizeof(uint16_t);
2242 	}
2243 
2244 	/* Add tlv value field */
2245 	dest = qdf_nbuf_push_head(mpdu_nbuf, len);
2246 	if (qdf_likely(dest)) {
2247 		qdf_mem_copy(dest, value, len);
2248 		num_bytes_pushed += len;
2249 	}
2250 
2251 	return num_bytes_pushed;
2252 }
2253 
2254 void
dp_mon_rx_stats_update_rssi_dbm_params(struct dp_mon_pdev * mon_pdev,struct hal_rx_ppdu_info * ppdu_info)2255 dp_mon_rx_stats_update_rssi_dbm_params(struct dp_mon_pdev *mon_pdev,
2256 				       struct hal_rx_ppdu_info *ppdu_info)
2257 {
2258 	ppdu_info->rx_status.rssi_offset = mon_pdev->rssi_offsets.rssi_offset;
2259 	ppdu_info->rx_status.rssi_dbm_conv_support =
2260 				mon_pdev->rssi_dbm_conv_support;
2261 	ppdu_info->rx_status.chan_noise_floor =
2262 		mon_pdev->rssi_offsets.rssi_offset;
2263 }
2264 
2265 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS
dp_rx_mon_update_user_ctrl_frame_stats(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)2266 void dp_rx_mon_update_user_ctrl_frame_stats(struct dp_pdev *pdev,
2267 					    struct hal_rx_ppdu_info *ppdu_info)
2268 {
2269 	struct dp_peer *peer;
2270 	struct dp_mon_peer *mon_peer;
2271 	struct dp_soc *soc = pdev->soc;
2272 	uint16_t fc, sw_peer_id;
2273 	uint8_t i;
2274 
2275 	if (qdf_unlikely(!ppdu_info))
2276 		return;
2277 
2278 	fc = ppdu_info->nac_info.frame_control;
2279 	if (qdf_likely((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) !=
2280 	    QDF_IEEE80211_FC0_TYPE_CTL))
2281 		return;
2282 
2283 	for (i = 0; i < ppdu_info->com_info.num_users; i++) {
2284 		sw_peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
2285 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
2286 					     DP_MOD_ID_RX_PPDU_STATS);
2287 		if (qdf_unlikely(!peer))
2288 			continue;
2289 		mon_peer = peer->monitor_peer;
2290 		if (qdf_unlikely(!mon_peer)) {
2291 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
2292 			continue;
2293 		}
2294 		DP_STATS_INCC(mon_peer, rx.ndpa_cnt, 1,
2295 			      ppdu_info->ctrl_frm_info[i].ndpa);
2296 		DP_STATS_INCC(mon_peer, rx.bar_cnt, 1,
2297 			      ppdu_info->ctrl_frm_info[i].bar);
2298 
2299 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
2300 	}
2301 }
2302 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */
2303