1 /*
2 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #include <dp_types.h>
18 #include "dp_rx.h"
19 #include "dp_tx.h"
20 #include "dp_peer.h"
21 #include <dp_htt.h>
22 #include <dp_mon_filter.h>
23 #include <dp_htt.h>
24 #include <dp_mon.h>
25 #include <dp_rx_mon.h>
26 #include <dp_internal.h>
27 #include "htt_ppdu_stats.h"
28 #include "dp_cal_client_api.h"
29 #if defined(DP_CON_MON)
30 #ifndef REMOVE_PKT_LOG
31 #include <pktlog_ac_api.h>
32 #include <pktlog_ac.h>
33 #endif
34 #endif
35 #ifdef FEATURE_PERPKT_INFO
36 #include "dp_ratetable.h"
37 #endif
38 #ifdef QCA_SUPPORT_LITE_MONITOR
39 #include "dp_lite_mon.h"
40 #endif
41 #include "dp_mon_1.0.h"
42 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
43 #include "dp_mon_2.0.h"
44 #include "dp_mon_filter_2.0.h"
45 #endif
46
47 #define DP_INTR_POLL_TIMER_MS 5
48 #define INVALID_FREE_BUFF 0xffffffff
49
50 #ifdef WLAN_RX_PKT_CAPTURE_ENH
51 #include "dp_rx_mon_feature.h"
52 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
53
54 #ifdef QCA_UNDECODED_METADATA_SUPPORT
55 #define MAX_STRING_LEN_PER_FIELD 6
56 #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX)
57 #endif
58
59 #ifdef QCA_MCOPY_SUPPORT
60 static inline void
dp_pdev_disable_mcopy_code(struct dp_pdev * pdev)61 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
62 {
63 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
64
65 mon_pdev->mcopy_mode = M_COPY_DISABLED;
66 mon_pdev->mvdev = NULL;
67 }
68
69 static inline void
dp_reset_mcopy_mode(struct dp_pdev * pdev)70 dp_reset_mcopy_mode(struct dp_pdev *pdev)
71 {
72 QDF_STATUS status = QDF_STATUS_SUCCESS;
73 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
74 struct cdp_mon_ops *cdp_ops;
75
76 if (mon_pdev->mcopy_mode) {
77 cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
78 if (cdp_ops && cdp_ops->config_full_mon_mode)
79 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
80 DP_FULL_MON_ENABLE);
81 dp_pdev_disable_mcopy_code(pdev);
82 dp_mon_filter_reset_mcopy_mode(pdev);
83 status = dp_mon_filter_update(pdev);
84 if (status != QDF_STATUS_SUCCESS) {
85 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
86 FL("Failed to reset AM copy mode filters"));
87 }
88 mon_pdev->monitor_configured = false;
89 }
90 }
91
92 static QDF_STATUS
dp_config_mcopy_mode(struct dp_pdev * pdev,int val)93 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
94 {
95 QDF_STATUS status = QDF_STATUS_SUCCESS;
96 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
97 struct dp_mon_ops *mon_ops;
98 struct cdp_mon_ops *cdp_ops;
99
100 if (mon_pdev->mvdev)
101 return QDF_STATUS_E_RESOURCES;
102
103 mon_pdev->mcopy_mode = val;
104 mon_pdev->tx_sniffer_enable = 0;
105 mon_pdev->monitor_configured = true;
106
107 mon_ops = dp_mon_ops_get(pdev->soc);
108 if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) {
109 if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings)
110 mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true);
111 }
112
113 /*
114 * Setup the M copy mode filter.
115 */
116 cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
117 if (cdp_ops && cdp_ops->config_full_mon_mode)
118 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
119 DP_FULL_MON_ENABLE);
120 dp_mon_filter_setup_mcopy_mode(pdev);
121 status = dp_mon_filter_update(pdev);
122 if (status != QDF_STATUS_SUCCESS) {
123 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
124 FL("Failed to set M_copy mode filters"));
125 dp_mon_filter_reset_mcopy_mode(pdev);
126 dp_pdev_disable_mcopy_code(pdev);
127 return status;
128 }
129
130 if (!mon_pdev->pktlog_ppdu_stats)
131 dp_h2t_cfg_stats_msg_send(pdev,
132 DP_PPDU_STATS_CFG_SNIFFER,
133 pdev->pdev_id);
134
135 return status;
136 }
137 #else
138 static inline void
dp_reset_mcopy_mode(struct dp_pdev * pdev)139 dp_reset_mcopy_mode(struct dp_pdev *pdev)
140 {
141 }
142
143 static inline QDF_STATUS
dp_config_mcopy_mode(struct dp_pdev * pdev,int val)144 dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
145 {
146 return QDF_STATUS_E_INVAL;
147 }
148 #endif /* QCA_MCOPY_SUPPORT */
149
150 #ifdef QCA_UNDECODED_METADATA_SUPPORT
151 static QDF_STATUS
dp_reset_undecoded_metadata_capture(struct dp_pdev * pdev)152 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
153 {
154 QDF_STATUS status = QDF_STATUS_SUCCESS;
155 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
156
157 if (mon_pdev->undecoded_metadata_capture) {
158 dp_mon_filter_reset_undecoded_metadata_mode(pdev);
159 status = dp_mon_filter_update(pdev);
160 if (status != QDF_STATUS_SUCCESS) {
161 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
162 FL("Undecoded capture filter reset failed"));
163 }
164 }
165 mon_pdev->undecoded_metadata_capture = 0;
166 return status;
167 }
168
169 static QDF_STATUS
dp_enable_undecoded_metadata_capture(struct dp_pdev * pdev,int val)170 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
171 {
172 QDF_STATUS status = QDF_STATUS_SUCCESS;
173 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
174
175 if (!mon_pdev->mvdev) {
176 qdf_err("monitor_pdev is NULL");
177 return QDF_STATUS_E_RESOURCES;
178 }
179
180 mon_pdev->undecoded_metadata_capture = val;
181 mon_pdev->monitor_configured = true;
182
183
184 /* Setup the undecoded metadata capture mode filter. */
185 dp_mon_filter_setup_undecoded_metadata_mode(pdev);
186 status = dp_mon_filter_update(pdev);
187 if (status != QDF_STATUS_SUCCESS) {
188 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
189 FL("Failed to set Undecoded capture filters"));
190 dp_mon_filter_reset_undecoded_metadata_mode(pdev);
191 return status;
192 }
193
194 return status;
195 }
196 #else
197 static inline QDF_STATUS
dp_reset_undecoded_metadata_capture(struct dp_pdev * pdev)198 dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
199 {
200 return QDF_STATUS_E_INVAL;
201 }
202
203 static inline QDF_STATUS
dp_enable_undecoded_metadata_capture(struct dp_pdev * pdev,int val)204 dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
205 {
206 return QDF_STATUS_E_INVAL;
207 }
208 #endif /* QCA_UNDECODED_METADATA_SUPPORT */
209
dp_reset_monitor_mode(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t special_monitor)210 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
211 uint8_t pdev_id,
212 uint8_t special_monitor)
213 {
214 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
215 struct dp_pdev *pdev =
216 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
217 pdev_id);
218 QDF_STATUS status = QDF_STATUS_SUCCESS;
219 struct dp_mon_pdev *mon_pdev;
220
221 if (!pdev)
222 return QDF_STATUS_E_FAILURE;
223
224 mon_pdev = pdev->monitor_pdev;
225 qdf_spin_lock_bh(&mon_pdev->mon_lock);
226 status = dp_reset_monitor_mode_unlock(soc_hdl, pdev_id,
227 special_monitor);
228 qdf_spin_unlock_bh(&mon_pdev->mon_lock);
229
230 return status;
231 }
232
dp_reset_monitor_mode_unlock(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t special_monitor)233 QDF_STATUS dp_reset_monitor_mode_unlock(struct cdp_soc_t *soc_hdl,
234 uint8_t pdev_id,
235 uint8_t special_monitor)
236 {
237 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
238 struct dp_pdev *pdev =
239 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
240 pdev_id);
241 QDF_STATUS status = QDF_STATUS_SUCCESS;
242 struct dp_mon_pdev *mon_pdev;
243 struct cdp_mon_ops *cdp_ops;
244
245 if (!pdev)
246 return QDF_STATUS_E_FAILURE;
247
248 mon_pdev = pdev->monitor_pdev;
249
250 cdp_ops = dp_mon_cdp_ops_get(soc);
251 if (cdp_ops && cdp_ops->soc_config_full_mon_mode) {
252 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
253 DP_FULL_MON_DISABLE);
254 mon_pdev->hold_mon_dest_ring = false;
255 mon_pdev->is_bkpressure = false;
256 mon_pdev->set_reset_mon = false;
257 #if defined(QCA_SUPPORT_FULL_MON)
258 if (mon_pdev->mon_desc)
259 qdf_mem_zero(mon_pdev->mon_desc,
260 sizeof(struct hal_rx_mon_desc_info));
261 #endif
262 }
263
264 /*
265 * Lite monitor mode, smart monitor mode and monitor
266 * mode uses this APIs to filter reset and mode disable
267 */
268 if (mon_pdev->mcopy_mode) {
269 #if defined(QCA_MCOPY_SUPPORT)
270 dp_pdev_disable_mcopy_code(pdev);
271 dp_mon_filter_reset_mcopy_mode(pdev);
272 #endif /* QCA_MCOPY_SUPPORT */
273 } else if (special_monitor) {
274 #if defined(ATH_SUPPORT_NAC)
275 dp_mon_filter_reset_smart_monitor(pdev);
276 #endif /* ATH_SUPPORT_NAC */
277 /* for mon 2.0 we make use of lite mon to
278 * set filters for smart monitor use case.
279 */
280 dp_monitor_lite_mon_disable_rx(pdev);
281 } else if (mon_pdev->undecoded_metadata_capture) {
282 #ifdef QCA_UNDECODED_METADATA_SUPPORT
283 dp_reset_undecoded_metadata_capture(pdev);
284 #endif
285 } else {
286 dp_mon_filter_reset_mon_mode(pdev);
287 }
288 status = dp_mon_filter_update(pdev);
289 if (status != QDF_STATUS_SUCCESS) {
290 dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
291 soc);
292 }
293
294 mon_pdev->mvdev = NULL;
295 mon_pdev->monitor_configured = false;
296
297 return QDF_STATUS_SUCCESS;
298 }
299
300 #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
301 QDF_STATUS
dp_pdev_set_advance_monitor_filter(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct cdp_monitor_filter * filter_val)302 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
303 struct cdp_monitor_filter *filter_val)
304 {
305 /* Many monitor VAPs can exists in a system but only one can be up at
306 * anytime
307 */
308 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
309 struct dp_vdev *vdev;
310 struct dp_pdev *pdev =
311 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
312 pdev_id);
313 QDF_STATUS status = QDF_STATUS_SUCCESS;
314 struct dp_mon_pdev *mon_pdev;
315
316 if (!pdev || !pdev->monitor_pdev)
317 return QDF_STATUS_E_FAILURE;
318
319 mon_pdev = pdev->monitor_pdev;
320 vdev = mon_pdev->mvdev;
321
322 if (!vdev)
323 return QDF_STATUS_E_FAILURE;
324
325 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
326 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
327 pdev, pdev_id, soc, vdev);
328
329 /*Check if current pdev's monitor_vdev exists */
330 if (!mon_pdev->mvdev) {
331 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
332 "vdev=%pK", vdev);
333 qdf_assert(vdev);
334 }
335
336 /* update filter mode, type in pdev structure */
337 mon_pdev->mon_filter_mode = filter_val->mode;
338 mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
339 mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
340 mon_pdev->fp_data_filter = filter_val->fp_data;
341 mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
342 mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
343 mon_pdev->mo_data_filter = filter_val->mo_data;
344
345 dp_mon_filter_setup_mon_mode(pdev);
346 status = dp_mon_filter_update(pdev);
347 if (status != QDF_STATUS_SUCCESS) {
348 dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
349 soc);
350 dp_mon_filter_reset_mon_mode(pdev);
351 }
352
353 return status;
354 }
355 #endif
356
357 QDF_STATUS
dp_deliver_tx_mgmt(struct cdp_soc_t * cdp_soc,uint8_t pdev_id,qdf_nbuf_t nbuf)358 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
359 {
360 struct dp_pdev *pdev =
361 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
362 pdev_id);
363
364 if (!pdev)
365 return QDF_STATUS_E_FAILURE;
366
367 dp_deliver_mgmt_frm(pdev, nbuf);
368
369 return QDF_STATUS_SUCCESS;
370 }
371
372 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
373 /**
374 * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
375 * @mon_vdev: Datapath mon VDEV handle
376 *
377 * Return: 0 on success, not 0 on failure
378 */
379 static inline QDF_STATUS
dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev * mon_vdev)380 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
381 {
382 mon_vdev->scan_spcl_vap_stats =
383 qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
384
385 if (!mon_vdev->scan_spcl_vap_stats) {
386 dp_mon_err("scan spcl vap stats attach fail");
387 return QDF_STATUS_E_NOMEM;
388 }
389
390 return QDF_STATUS_SUCCESS;
391 }
392
393 /**
394 * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
395 * @mon_vdev: Datapath mon VDEV handle
396 *
397 * Return: void
398 */
399 static inline void
dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev * mon_vdev)400 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
401 {
402 if (mon_vdev->scan_spcl_vap_stats) {
403 qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
404 mon_vdev->scan_spcl_vap_stats = NULL;
405 }
406 }
407
408 /**
409 * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
410 * @vdev: Datapath VDEV handle
411 *
412 * Return: void
413 */
414 static inline void
dp_reset_scan_spcl_vap_stats(struct dp_vdev * vdev)415 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
416 {
417 struct dp_mon_vdev *mon_vdev;
418 struct dp_mon_pdev *mon_pdev;
419
420 mon_pdev = vdev->pdev->monitor_pdev;
421 if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
422 return;
423
424 mon_vdev = vdev->monitor_vdev;
425 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
426 return;
427
428 qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
429 sizeof(struct cdp_scan_spcl_vap_stats));
430 }
431
432 /**
433 * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
434 * @soc_hdl: Datapath soc handle
435 * @vdev_id: vdev id
436 * @stats: structure to hold spcl vap stats
437 *
438 * Return: 0 on success, not 0 on failure
439 */
440 static QDF_STATUS
dp_get_scan_spcl_vap_stats(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct cdp_scan_spcl_vap_stats * stats)441 dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
442 struct cdp_scan_spcl_vap_stats *stats)
443 {
444 struct dp_mon_vdev *mon_vdev = NULL;
445 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
446 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
447 DP_MOD_ID_CDP);
448
449 if (!vdev || !stats) {
450 if (vdev)
451 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
452 return QDF_STATUS_E_INVAL;
453 }
454
455 mon_vdev = vdev->monitor_vdev;
456 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
457 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
458 return QDF_STATUS_E_INVAL;
459 }
460
461 qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
462 sizeof(struct cdp_scan_spcl_vap_stats));
463
464 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
465 return QDF_STATUS_SUCCESS;
466 }
467 #else
468 static inline void
dp_reset_scan_spcl_vap_stats(struct dp_vdev * vdev)469 dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
470 {
471 }
472
473 static inline QDF_STATUS
dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev * mon_vdev)474 dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
475 {
476 return QDF_STATUS_SUCCESS;
477 }
478
479 static inline void
dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev * mon_vdev)480 dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
481 {
482 }
483 #endif
484
485 /**
486 * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
487 * @dp_soc: DP soc context
488 * @vdev_id: vdev ID
489 * @special_monitor: Flag to denote if its smart monitor mode
490 *
491 * Return: 0 on success, not 0 on failure
492 */
dp_vdev_set_monitor_mode(struct cdp_soc_t * dp_soc,uint8_t vdev_id,uint8_t special_monitor)493 QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
494 uint8_t vdev_id,
495 uint8_t special_monitor)
496 {
497 struct dp_soc *soc = (struct dp_soc *)dp_soc;
498 struct dp_pdev *pdev;
499 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
500 DP_MOD_ID_CDP);
501 QDF_STATUS status = QDF_STATUS_SUCCESS;
502 struct dp_mon_pdev *mon_pdev;
503 struct cdp_mon_ops *cdp_ops;
504
505 if (!vdev)
506 return QDF_STATUS_E_FAILURE;
507
508 pdev = vdev->pdev;
509
510 if (!pdev || !pdev->monitor_pdev) {
511 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
512 return QDF_STATUS_E_FAILURE;
513 }
514
515 mon_pdev = pdev->monitor_pdev;
516
517 mon_pdev->mvdev = vdev;
518
519 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
520 "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
521 pdev, pdev->pdev_id, pdev->soc, vdev);
522
523 /*
524 * do not configure monitor buf ring and filter for smart and
525 * lite monitor
526 * for smart monitor filters are added along with first NAC
527 * for lite monitor required configuration done through
528 * dp_set_pdev_param
529 */
530
531 if (special_monitor) {
532 status = QDF_STATUS_SUCCESS;
533 goto fail;
534 }
535
536 if (mon_pdev->scan_spcl_vap_configured)
537 dp_reset_scan_spcl_vap_stats(vdev);
538
539 /*Check if current pdev's monitor_vdev exists */
540 if (mon_pdev->monitor_configured) {
541 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
542 "monitor vap already created vdev=%pK\n", vdev);
543 status = QDF_STATUS_E_RESOURCES;
544 goto fail;
545 }
546
547 mon_pdev->monitor_configured = true;
548 mon_pdev->phy_ppdu_id_size = hal_rx_get_phy_ppdu_id_size(soc->hal_soc);
549
550 /* If advance monitor filter is applied using lite_mon
551 * via vap configuration, required filters are already applied
552 * hence returning SUCCESS from here.
553 */
554 if (dp_monitor_lite_mon_is_rx_adv_filter_enable(pdev)) {
555 status = QDF_STATUS_SUCCESS;
556 goto fail;
557 }
558 /* disable lite mon if configured, monitor vap takes
559 * priority over lite mon when its created. Lite mon
560 * can be configured later again.
561 */
562 dp_monitor_lite_mon_disable_rx(pdev);
563
564 cdp_ops = dp_mon_cdp_ops_get(soc);
565 if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
566 cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
567 DP_FULL_MON_ENABLE);
568 dp_mon_filter_setup_mon_mode(pdev);
569 status = dp_mon_filter_update(pdev);
570 if (status != QDF_STATUS_SUCCESS) {
571 dp_cdp_err("%pK: Failed to reset monitor filters", soc);
572 dp_mon_filter_reset_mon_mode(pdev);
573 mon_pdev->monitor_configured = false;
574 mon_pdev->mvdev = NULL;
575 }
576
577 fail:
578 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
579 return status;
580 }
581
582 #ifdef QCA_TX_CAPTURE_SUPPORT
583 static QDF_STATUS
dp_config_tx_capture_mode(struct dp_pdev * pdev)584 dp_config_tx_capture_mode(struct dp_pdev *pdev)
585 {
586 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
587
588 mon_pdev->tx_sniffer_enable = 1;
589 mon_pdev->monitor_configured = false;
590
591 if (!mon_pdev->pktlog_ppdu_stats)
592 dp_h2t_cfg_stats_msg_send(pdev,
593 DP_PPDU_STATS_CFG_SNIFFER,
594 pdev->pdev_id);
595
596 return QDF_STATUS_SUCCESS;
597 }
598 #else
599 #ifdef QCA_MCOPY_SUPPORT
600 static QDF_STATUS
dp_config_tx_capture_mode(struct dp_pdev * pdev)601 dp_config_tx_capture_mode(struct dp_pdev *pdev)
602 {
603 return QDF_STATUS_E_INVAL;
604 }
605 #endif
606 #endif
607
608 #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
609 QDF_STATUS
dp_config_debug_sniffer(struct dp_pdev * pdev,int val)610 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
611 {
612 QDF_STATUS status = QDF_STATUS_SUCCESS;
613 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
614
615 /*
616 * Note: The mirror copy mode cannot co-exist with any other
617 * monitor modes. Hence disabling the filter for this mode will
618 * reset the monitor destination ring filters.
619 */
620 dp_reset_mcopy_mode(pdev);
621 switch (val) {
622 case 0:
623 mon_pdev->tx_sniffer_enable = 0;
624 mon_pdev->monitor_configured = false;
625
626 /*
627 * We don't need to reset the Rx monitor status ring or call
628 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
629 * disabled. The Rx monitor status ring will be disabled when
630 * the last mode using the monitor status ring get disabled.
631 */
632 if (!mon_pdev->pktlog_ppdu_stats &&
633 !mon_pdev->enhanced_stats_en &&
634 !mon_pdev->bpr_enable) {
635 dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
636 } else if (mon_pdev->enhanced_stats_en &&
637 !mon_pdev->bpr_enable) {
638 dp_h2t_cfg_stats_msg_send(pdev,
639 DP_PPDU_STATS_CFG_ENH_STATS,
640 pdev->pdev_id);
641 } else if (!mon_pdev->enhanced_stats_en &&
642 mon_pdev->bpr_enable) {
643 dp_h2t_cfg_stats_msg_send(pdev,
644 DP_PPDU_STATS_CFG_BPR_ENH,
645 pdev->pdev_id);
646 } else {
647 dp_h2t_cfg_stats_msg_send(pdev,
648 DP_PPDU_STATS_CFG_BPR,
649 pdev->pdev_id);
650 }
651 break;
652
653 case 1:
654 status = dp_config_tx_capture_mode(pdev);
655 break;
656 case 2:
657 case 4:
658 status = dp_config_mcopy_mode(pdev, val);
659 break;
660
661 default:
662 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
663 "Invalid value, mode: %d not supported", val);
664 status = QDF_STATUS_E_INVAL;
665 break;
666 }
667 return status;
668 }
669 #endif
670
671 #ifdef QCA_UNDECODED_METADATA_SUPPORT
672 QDF_STATUS
dp_mon_config_undecoded_metadata_capture(struct dp_pdev * pdev,int val)673 dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
674 {
675 QDF_STATUS status = QDF_STATUS_SUCCESS;
676 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
677
678 if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) {
679 qdf_err("No monitor or Special vap, undecoded capture not supported");
680 return QDF_STATUS_E_RESOURCES;
681 }
682
683 if (val)
684 status = dp_enable_undecoded_metadata_capture(pdev, val);
685 else
686 status = dp_reset_undecoded_metadata_capture(pdev);
687
688 return status;
689 }
690 #endif
691
692 /**
693 * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
694 * ring based on target
695 * @soc: soc handle
696 * @mac_for_pdev: WIN- pdev_id, MCL- mac id
697 * @pdev: physical device handle
698 * @ring_num: mac id
699 * @htt_tlv_filter: tlv filter
700 *
701 * Return: zero on success, non-zero on failure
702 */
703 static inline QDF_STATUS
dp_monitor_mode_ring_config(struct dp_soc * soc,uint8_t mac_for_pdev,struct dp_pdev * pdev,uint8_t ring_num,struct htt_rx_ring_tlv_filter htt_tlv_filter)704 dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
705 struct dp_pdev *pdev, uint8_t ring_num,
706 struct htt_rx_ring_tlv_filter htt_tlv_filter)
707 {
708 QDF_STATUS status;
709
710 if (soc->wlan_cfg_ctx->rxdma1_enable)
711 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
712 soc->rxdma_mon_buf_ring[ring_num]
713 .hal_srng,
714 RXDMA_MONITOR_BUF,
715 RX_MONITOR_BUFFER_SIZE,
716 &htt_tlv_filter);
717 else
718 status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
719 pdev->rx_mac_buf_ring[ring_num]
720 .hal_srng,
721 RXDMA_BUF, RX_DATA_BUFFER_SIZE,
722 &htt_tlv_filter);
723
724 return status;
725 }
726
727 /**
728 * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
729 * @soc_hdl: datapath soc handle
730 * @pdev_id: physical device instance id
731 *
732 * Return: virtual interface id
733 */
dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)734 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
735 uint8_t pdev_id)
736 {
737 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
738 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
739
740 if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
741 !pdev->monitor_pdev->mvdev))
742 return -EINVAL;
743
744 return pdev->monitor_pdev->mvdev->vdev_id;
745 }
746
747 #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
748 #ifndef WLAN_TX_PKT_CAPTURE_ENH
dp_deliver_mgmt_frm(struct dp_pdev * pdev,qdf_nbuf_t nbuf)749 void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
750 {
751 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
752
753 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
754 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
755 nbuf, HTT_INVALID_PEER,
756 WDI_NO_VAL, pdev->pdev_id);
757 } else {
758 if (!mon_pdev->bpr_enable)
759 qdf_nbuf_free(nbuf);
760 }
761 }
762 #endif
763 #endif
764
dp_htt_ppdu_stats_attach(struct dp_pdev * pdev)765 QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
766 {
767 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
768
769 mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
770
771 if (!mon_pdev->ppdu_tlv_buf) {
772 QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
773 return QDF_STATUS_E_NOMEM;
774 }
775
776 return QDF_STATUS_SUCCESS;
777 }
778
dp_htt_ppdu_stats_detach(struct dp_pdev * pdev)779 void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
780 {
781 struct ppdu_info *ppdu_info, *ppdu_info_next;
782 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
783
784
785 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
786 ppdu_info_list_elem, ppdu_info_next) {
787 if (!ppdu_info)
788 break;
789 TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
790 ppdu_info, ppdu_info_list_elem);
791 mon_pdev->list_depth--;
792 qdf_assert_always(ppdu_info->nbuf);
793 qdf_nbuf_free(ppdu_info->nbuf);
794 qdf_mem_free(ppdu_info);
795 }
796
797 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
798 ppdu_info_list_elem, ppdu_info_next) {
799 if (!ppdu_info)
800 break;
801 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
802 ppdu_info, ppdu_info_list_elem);
803 mon_pdev->sched_comp_list_depth--;
804 qdf_assert_always(ppdu_info->nbuf);
805 qdf_nbuf_free(ppdu_info->nbuf);
806 qdf_mem_free(ppdu_info);
807 }
808
809 if (mon_pdev->ppdu_tlv_buf)
810 qdf_mem_free(mon_pdev->ppdu_tlv_buf);
811 }
812
dp_pdev_get_rx_mon_stats(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct cdp_pdev_mon_stats * stats)813 QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
814 struct cdp_pdev_mon_stats *stats)
815 {
816 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
817 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
818 struct dp_mon_pdev *mon_pdev;
819
820 if (!pdev)
821 return QDF_STATUS_E_FAILURE;
822
823 mon_pdev = pdev->monitor_pdev;
824 if (!mon_pdev)
825 return QDF_STATUS_E_FAILURE;
826
827 qdf_mem_copy(stats, &mon_pdev->rx_mon_stats,
828 sizeof(struct cdp_pdev_mon_stats));
829
830 return QDF_STATUS_SUCCESS;
831 }
832
833 #ifdef QCA_UNDECODED_METADATA_SUPPORT
834 /**
835 * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured
836 * monitor pdev stats
837 * @mon_pdev: Monitor PDEV handle
838 * @rx_mon_stats: Monitor pdev status/destination ring stats
839 *
840 * Return: None
841 */
842 static inline void
dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev * mon_pdev,struct cdp_pdev_mon_stats * rx_mon_stats)843 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
844 struct cdp_pdev_mon_stats *rx_mon_stats)
845 {
846 char undecoded_error[DP_UNDECODED_ERR_LENGTH];
847 uint8_t index = 0, i;
848
849 DP_PRINT_STATS("Rx Undecoded Frame count:%d",
850 rx_mon_stats->rx_undecoded_count);
851 index = 0;
852 for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) {
853 index += qdf_snprint(&undecoded_error[index],
854 DP_UNDECODED_ERR_LENGTH - index,
855 " %d", rx_mon_stats->rx_undecoded_error[i]);
856 }
857 DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error);
858 }
859 #else
860 static inline void
dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev * mon_pdev,struct cdp_pdev_mon_stats * rx_mon_stats)861 dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
862 struct cdp_pdev_mon_stats *rx_mon_stats)
863 {
864 }
865 #endif
866
867 static const char *
868 dp_preamble_type_str[] = {
869 "preamble OFDMA ",
870 "preamble CCK ",
871 "preamble HT ",
872 "preamble VHT ",
873 "preamble HE ",
874 "preamble EHT ",
875 "preamble NO SUPPORT",
876 };
877
878 static const char *
879 dp_reception_type_str[] = {
880 "reception su ",
881 "reception mu_mimo ",
882 "reception ofdma ",
883 "reception ofdma mimo",
884 };
885
886 static const char *
887 dp_mu_dl_ul_str[] = {
888 "MU DL",
889 "MU UL",
890 };
891
892 static inline void
dp_print_pdev_mpdu_fcs_ok_cnt(struct cdp_pdev_mon_stats * rx_mon_sts,uint32_t pkt_t,uint32_t rx_t,uint32_t dl_ul,uint32_t user)893 dp_print_pdev_mpdu_fcs_ok_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
894 uint32_t pkt_t, uint32_t rx_t,
895 uint32_t dl_ul, uint32_t user)
896 {
897 DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_ok=%d",
898 dp_preamble_type_str[pkt_t],
899 dp_reception_type_str[rx_t],
900 dp_mu_dl_ul_str[dl_ul],
901 user,
902 rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user]);
903 }
904
905 static inline void
dp_print_pdev_mpdu_fcs_err_cnt(struct cdp_pdev_mon_stats * rx_mon_sts,uint32_t pkt_t,uint32_t rx_t,uint32_t dl_ul,uint32_t user)906 dp_print_pdev_mpdu_fcs_err_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
907 uint32_t pkt_t, uint32_t rx_t,
908 uint32_t dl_ul, uint32_t user)
909 {
910 DP_PRINT_STATS("%s, %s, %s, user=%d, mpdu_fcs_err=%d",
911 dp_preamble_type_str[pkt_t],
912 dp_reception_type_str[rx_t],
913 dp_mu_dl_ul_str[dl_ul],
914 user,
915 rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user]);
916 }
917
918 static inline void
dp_print_pdev_mpdu_cnt(struct cdp_pdev_mon_stats * rx_mon_sts,uint32_t pkt_t,uint32_t rx_t,uint32_t dl_ul,uint32_t user)919 dp_print_pdev_mpdu_cnt(struct cdp_pdev_mon_stats *rx_mon_sts,
920 uint32_t pkt_t, uint32_t rx_t,
921 uint32_t dl_ul, uint32_t user)
922 {
923 if (rx_mon_sts->mpdu_cnt_fcs_ok[pkt_t][rx_t][dl_ul][user])
924 dp_print_pdev_mpdu_fcs_ok_cnt(rx_mon_sts, pkt_t, rx_t,
925 dl_ul, user);
926
927 if (rx_mon_sts->mpdu_cnt_fcs_err[pkt_t][rx_t][dl_ul][user])
928 dp_print_pdev_mpdu_fcs_err_cnt(rx_mon_sts, pkt_t, rx_t,
929 dl_ul, user);
930 }
931
932 static inline void
dp_print_pdev_mpdu_user(struct cdp_pdev_mon_stats * rx_mon_sts,uint32_t pkt_t,uint32_t rx_t,uint32_t dl_ul)933 dp_print_pdev_mpdu_user(struct cdp_pdev_mon_stats *rx_mon_sts,
934 uint32_t pkt_t, uint32_t rx_t,
935 uint32_t dl_ul)
936 {
937 uint32_t user;
938
939 for (user = 0; user < CDP_MU_SNIF_USER_MAX; user++)
940 dp_print_pdev_mpdu_cnt(rx_mon_sts, pkt_t, rx_t,
941 dl_ul, user);
942 }
943
944 static inline void
dp_print_pdev_mpdu_dl_ul(struct cdp_pdev_mon_stats * rx_mon_sts,uint32_t pkt_t,uint32_t rx_t)945 dp_print_pdev_mpdu_dl_ul(struct cdp_pdev_mon_stats *rx_mon_sts,
946 uint32_t pkt_t, uint32_t rx_t)
947 {
948 uint32_t dl_ul;
949
950 for (dl_ul = CDP_MU_TYPE_DL; dl_ul < CDP_MU_TYPE_MAX; dl_ul++)
951 dp_print_pdev_mpdu_user(rx_mon_sts, pkt_t, rx_t,
952 dl_ul);
953 }
954
955 static inline void
dp_print_pdev_mpdu_rx_type(struct cdp_pdev_mon_stats * rx_mon_sts,uint32_t pkt_t)956 dp_print_pdev_mpdu_rx_type(struct cdp_pdev_mon_stats *rx_mon_sts,
957 uint32_t pkt_t)
958 {
959 uint32_t rx_t;
960
961 for (rx_t = CDP_RX_TYPE_SU; rx_t < CDP_RX_TYPE_MAX; rx_t++)
962 dp_print_pdev_mpdu_dl_ul(rx_mon_sts, pkt_t, rx_t);
963 }
964
965 static inline void
dp_print_pdev_mpdu_pkt_type(struct cdp_pdev_mon_stats * rx_mon_sts)966 dp_print_pdev_mpdu_pkt_type(struct cdp_pdev_mon_stats *rx_mon_sts)
967 {
968 uint32_t pkt_t;
969
970 for (pkt_t = CDP_PKT_TYPE_OFDM; pkt_t < CDP_PKT_TYPE_MAX; pkt_t++)
971 dp_print_pdev_mpdu_rx_type(rx_mon_sts, pkt_t);
972 }
973
974 static inline void
print_ppdu_eht_type_mode(struct cdp_pdev_mon_stats * rx_mon_stats,uint32_t ppdu_type_mode,uint32_t dl_ul)975 print_ppdu_eht_type_mode(
976 struct cdp_pdev_mon_stats *rx_mon_stats,
977 uint32_t ppdu_type_mode,
978 uint32_t dl_ul)
979 {
980 DP_PRINT_STATS("type_mode=%d, dl_ul=%d, cnt=%d",
981 ppdu_type_mode,
982 dl_ul,
983 rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul]);
984 }
985
986 static inline void
print_ppdu_eth_type_mode_dl_ul(struct cdp_pdev_mon_stats * rx_mon_stats,uint32_t ppdu_type_mode)987 print_ppdu_eth_type_mode_dl_ul(
988 struct cdp_pdev_mon_stats *rx_mon_stats,
989 uint32_t ppdu_type_mode
990 )
991 {
992 uint32_t dl_ul;
993
994 for (dl_ul = 0; dl_ul < CDP_MU_TYPE_MAX; dl_ul++) {
995 if (rx_mon_stats->ppdu_eht_type_mode[ppdu_type_mode][dl_ul])
996 print_ppdu_eht_type_mode(rx_mon_stats,
997 ppdu_type_mode, dl_ul);
998 }
999 }
1000
1001 static inline void
dp_print_pdev_eht_ppdu_cnt(struct dp_pdev * pdev)1002 dp_print_pdev_eht_ppdu_cnt(struct dp_pdev *pdev)
1003 {
1004 struct cdp_pdev_mon_stats *rx_mon_stats;
1005 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1006 uint32_t ppdu_type_mode;
1007
1008 rx_mon_stats = &mon_pdev->rx_mon_stats;
1009 DP_PRINT_STATS("Monitor EHT PPDU Count");
1010 for (ppdu_type_mode = 0; ppdu_type_mode < CDP_EHT_TYPE_MODE_MAX;
1011 ppdu_type_mode++) {
1012 print_ppdu_eth_type_mode_dl_ul(rx_mon_stats,
1013 ppdu_type_mode);
1014 }
1015 }
1016
1017 static inline void
dp_print_pdev_mpdu_stats(struct dp_pdev * pdev)1018 dp_print_pdev_mpdu_stats(struct dp_pdev *pdev)
1019 {
1020 struct cdp_pdev_mon_stats *rx_mon_stats;
1021 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1022
1023 rx_mon_stats = &mon_pdev->rx_mon_stats;
1024 DP_PRINT_STATS("Monitor MPDU Count");
1025 dp_print_pdev_mpdu_pkt_type(rx_mon_stats);
1026 }
1027
1028 void
dp_print_pdev_rx_mon_stats(struct dp_pdev * pdev)1029 dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
1030 {
1031 struct cdp_pdev_mon_stats *rx_mon_stats;
1032 uint32_t *stat_ring_ppdu_ids;
1033 uint32_t *dest_ring_ppdu_ids;
1034 int i, idx;
1035 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1036
1037 rx_mon_stats = &mon_pdev->rx_mon_stats;
1038
1039 DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
1040
1041 DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
1042 rx_mon_stats->status_ppdu_compl);
1043 DP_PRINT_STATS("status_ppdu_start_cnt = %d",
1044 rx_mon_stats->status_ppdu_start);
1045 DP_PRINT_STATS("status_ppdu_end_cnt = %d",
1046 rx_mon_stats->status_ppdu_end);
1047 DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
1048 rx_mon_stats->status_ppdu_start_mis);
1049 DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
1050 rx_mon_stats->status_ppdu_end_mis);
1051
1052 DP_PRINT_STATS("start_user_info_cnt = %d",
1053 rx_mon_stats->start_user_info_cnt);
1054 DP_PRINT_STATS("end_user_stats_cnt = %d",
1055 rx_mon_stats->end_user_stats_cnt);
1056
1057 DP_PRINT_STATS("status_ppdu_done_cnt = %d",
1058 rx_mon_stats->status_ppdu_done);
1059 DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
1060 rx_mon_stats->dest_ppdu_done);
1061 DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
1062 rx_mon_stats->dest_mpdu_done);
1063 DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
1064 rx_mon_stats->tlv_tag_status_err);
1065 DP_PRINT_STATS("mon status DMA not done WAR count= %u",
1066 rx_mon_stats->status_buf_done_war);
1067 DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
1068 rx_mon_stats->dest_mpdu_drop);
1069 DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
1070 rx_mon_stats->dup_mon_linkdesc_cnt);
1071 DP_PRINT_STATS("dup_mon_buf_cnt = %d",
1072 rx_mon_stats->dup_mon_buf_cnt);
1073 DP_PRINT_STATS("mon_rx_buf_reaped = %u",
1074 rx_mon_stats->mon_rx_bufs_reaped_dest);
1075 DP_PRINT_STATS("mon_rx_buf_replenished = %u",
1076 rx_mon_stats->mon_rx_bufs_replenished_dest);
1077 DP_PRINT_STATS("ppdu_id_mismatch = %u",
1078 rx_mon_stats->ppdu_id_mismatch);
1079 DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
1080 rx_mon_stats->ppdu_id_match);
1081 DP_PRINT_STATS("ppdus dropped frm status ring = %d",
1082 rx_mon_stats->status_ppdu_drop);
1083 DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
1084 rx_mon_stats->dest_ppdu_drop);
1085 DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u",
1086 rx_mon_stats->mpdu_ppdu_id_mismatch_drop);
1087 DP_PRINT_STATS("mpdu_decap_type_invalid = %u",
1088 rx_mon_stats->mpdu_decap_type_invalid);
1089 DP_PRINT_STATS("pending_desc_count = %u",
1090 rx_mon_stats->pending_desc_count);
1091 stat_ring_ppdu_ids =
1092 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1093 dest_ring_ppdu_ids =
1094 (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1095
1096 if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
1097 DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
1098
1099 qdf_spin_lock_bh(&mon_pdev->mon_lock);
1100 idx = rx_mon_stats->ppdu_id_hist_idx;
1101 qdf_mem_copy(stat_ring_ppdu_ids,
1102 rx_mon_stats->stat_ring_ppdu_id_hist,
1103 sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1104 qdf_mem_copy(dest_ring_ppdu_ids,
1105 rx_mon_stats->dest_ring_ppdu_id_hist,
1106 sizeof(uint32_t) * MAX_PPDU_ID_HIST);
1107 qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1108
1109 DP_PRINT_STATS("PPDU Id history:");
1110 DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
1111 for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
1112 idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
1113 DP_PRINT_STATS("%*u\t%*u", 16,
1114 rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
1115 rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
1116 }
1117 qdf_mem_free(stat_ring_ppdu_ids);
1118 qdf_mem_free(dest_ring_ppdu_ids);
1119 DP_PRINT_STATS("mon_rx_dest_stuck = %d",
1120 rx_mon_stats->mon_rx_dest_stuck);
1121
1122 dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats);
1123 dp_mon_rx_print_advanced_stats(pdev->soc, pdev);
1124
1125 dp_print_pdev_mpdu_stats(pdev);
1126 dp_print_pdev_eht_ppdu_cnt(pdev);
1127
1128 }
1129
1130 #ifdef QCA_SUPPORT_BPR
1131 QDF_STATUS
dp_set_bpr_enable(struct dp_pdev * pdev,int val)1132 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
1133 {
1134 struct dp_mon_ops *mon_ops;
1135
1136 mon_ops = dp_mon_ops_get(pdev->soc);
1137 if (mon_ops && mon_ops->mon_set_bpr_enable)
1138 return mon_ops->mon_set_bpr_enable(pdev, val);
1139
1140 return QDF_STATUS_E_FAILURE;
1141 }
1142 #endif
1143
1144 #ifdef WDI_EVENT_ENABLE
1145 #ifdef BE_PKTLOG_SUPPORT
1146 static bool
dp_set_hybrid_pktlog_enable(struct dp_pdev * pdev,struct dp_mon_pdev * mon_pdev,struct dp_soc * soc)1147 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
1148 struct dp_mon_pdev *mon_pdev,
1149 struct dp_soc *soc)
1150 {
1151 struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1152 struct dp_mon_ops *mon_ops = NULL;
1153 uint16_t num_buffers;
1154
1155 /* Nothing needs to be done if monitor mode is
1156 * enabled
1157 */
1158 if (mon_pdev->mvdev)
1159 return false;
1160
1161 mon_ops = dp_mon_ops_get(pdev->soc);
1162 if (!mon_ops) {
1163 dp_mon_filter_err("Mon ops uninitialized");
1164 return QDF_STATUS_E_FAILURE;
1165 }
1166
1167 if (!mon_pdev->pktlog_hybrid_mode) {
1168 mon_pdev->pktlog_hybrid_mode = true;
1169 soc_cfg_ctx = soc->wlan_cfg_ctx;
1170 num_buffers =
1171 wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1172
1173 if (mon_ops && mon_ops->set_mon_mode_buf_rings_tx)
1174 mon_ops->set_mon_mode_buf_rings_tx(pdev, num_buffers);
1175
1176 dp_mon_filter_setup_pktlog_hybrid(pdev);
1177 if (dp_tx_mon_filter_update(pdev) !=
1178 QDF_STATUS_SUCCESS) {
1179 dp_cdp_err("Set hybrid filters failed");
1180 dp_mon_filter_reset_pktlog_hybrid(pdev);
1181 mon_pdev->rx_pktlog_mode =
1182 DP_RX_PKTLOG_DISABLED;
1183 return false;
1184 }
1185
1186 dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG);
1187 }
1188
1189 return true;
1190 }
1191
1192 static void
dp_set_hybrid_pktlog_disable(struct dp_mon_pdev * mon_pdev)1193 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
1194 {
1195 mon_pdev->pktlog_hybrid_mode = false;
1196 }
1197 #else
1198 static void
dp_set_hybrid_pktlog_disable(struct dp_mon_pdev * mon_pdev)1199 dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
1200 {
1201 }
1202
1203 static bool
dp_set_hybrid_pktlog_enable(struct dp_pdev * pdev,struct dp_mon_pdev * mon_pdev,struct dp_soc * soc)1204 dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
1205 struct dp_mon_pdev *mon_pdev,
1206 struct dp_soc *soc)
1207 {
1208 dp_cdp_err("Hybrid mode is supported only on beryllium");
1209 return true;
1210 }
1211 #endif
dp_set_pktlog_wifi3(struct dp_pdev * pdev,uint32_t event,bool enable)1212 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
1213 bool enable)
1214 {
1215 struct dp_soc *soc = NULL;
1216 int max_mac_rings = wlan_cfg_get_num_mac_rings
1217 (pdev->wlan_cfg_ctx);
1218 uint8_t mac_id = 0;
1219 struct dp_mon_ops *mon_ops;
1220 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1221
1222 soc = pdev->soc;
1223 mon_ops = dp_mon_ops_get(soc);
1224
1225 if (!mon_ops)
1226 return 0;
1227
1228 dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
1229
1230 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1231 FL("Max_mac_rings %d "),
1232 max_mac_rings);
1233
1234 if (enable) {
1235 switch (event) {
1236 case WDI_EVENT_RX_DESC:
1237 /* Nothing needs to be done if monitor mode is
1238 * enabled
1239 */
1240 if (mon_pdev->mvdev)
1241 return 0;
1242
1243 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
1244 break;
1245
1246 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
1247 dp_mon_filter_setup_rx_pkt_log_full(pdev);
1248 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1249 dp_cdp_err("%pK: Pktlog full filters set failed",
1250 soc);
1251 dp_mon_filter_reset_rx_pkt_log_full(pdev);
1252 mon_pdev->rx_pktlog_mode =
1253 DP_RX_PKTLOG_DISABLED;
1254 return 0;
1255 }
1256
1257 dp_monitor_reap_timer_start(soc,
1258 CDP_MON_REAP_SOURCE_PKTLOG);
1259 break;
1260
1261 case WDI_EVENT_LITE_RX:
1262 /* Nothing needs to be done if monitor mode is
1263 * enabled
1264 */
1265 if (mon_pdev->mvdev)
1266 return 0;
1267
1268 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
1269 break;
1270
1271 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
1272
1273 /*
1274 * Set the packet log lite mode filter.
1275 */
1276 dp_mon_filter_setup_rx_pkt_log_lite(pdev);
1277 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1278 dp_cdp_err("%pK: Pktlog lite filters set failed",
1279 soc);
1280 dp_mon_filter_reset_rx_pkt_log_lite(pdev);
1281 mon_pdev->rx_pktlog_mode =
1282 DP_RX_PKTLOG_DISABLED;
1283 return 0;
1284 }
1285
1286 dp_monitor_reap_timer_start(soc,
1287 CDP_MON_REAP_SOURCE_PKTLOG);
1288 break;
1289 case WDI_EVENT_LITE_T2H:
1290 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
1291 int mac_for_pdev = dp_get_mac_id_for_pdev(
1292 mac_id, pdev->pdev_id);
1293
1294 mon_pdev->pktlog_ppdu_stats = true;
1295 dp_h2t_cfg_stats_msg_send(pdev,
1296 DP_PPDU_TXLITE_STATS_BITMASK_CFG,
1297 mac_for_pdev);
1298 }
1299 break;
1300
1301 case WDI_EVENT_RX_CBF:
1302 /* Nothing needs to be done if monitor mode is
1303 * enabled
1304 */
1305 if (mon_pdev->mvdev)
1306 return 0;
1307
1308 if (mon_pdev->rx_pktlog_cbf)
1309 break;
1310
1311 mon_pdev->rx_pktlog_cbf = true;
1312 mon_pdev->monitor_configured = true;
1313 if (mon_ops->mon_vdev_set_monitor_mode_buf_rings)
1314 mon_ops->mon_vdev_set_monitor_mode_buf_rings(
1315 pdev);
1316
1317 /*
1318 * Set the packet log lite mode filter.
1319 */
1320 qdf_info("Non mon mode: Enable destination ring");
1321
1322 dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
1323 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1324 dp_mon_err("Pktlog set CBF filters failed");
1325 dp_mon_filter_reset_rx_pktlog_cbf(pdev);
1326 mon_pdev->rx_pktlog_mode =
1327 DP_RX_PKTLOG_DISABLED;
1328 mon_pdev->monitor_configured = false;
1329 return 0;
1330 }
1331
1332 dp_monitor_reap_timer_start(soc,
1333 CDP_MON_REAP_SOURCE_PKTLOG);
1334 break;
1335 case WDI_EVENT_HYBRID_TX:
1336 if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc))
1337 return 0;
1338 break;
1339
1340 default:
1341 /* Nothing needs to be done for other pktlog types */
1342 break;
1343 }
1344 } else {
1345 switch (event) {
1346 case WDI_EVENT_RX_DESC:
1347 case WDI_EVENT_LITE_RX:
1348 /* Nothing needs to be done if monitor mode is
1349 * enabled
1350 */
1351 if (mon_pdev->mvdev)
1352 return 0;
1353
1354 if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED)
1355 break;
1356
1357 mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
1358 dp_mon_filter_reset_rx_pkt_log_full(pdev);
1359 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1360 dp_cdp_err("%pK: Pktlog filters reset failed",
1361 soc);
1362 return 0;
1363 }
1364
1365 dp_mon_filter_reset_rx_pkt_log_lite(pdev);
1366 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1367 dp_cdp_err("%pK: Pktlog filters reset failed",
1368 soc);
1369 return 0;
1370 }
1371
1372 dp_monitor_reap_timer_stop(soc,
1373 CDP_MON_REAP_SOURCE_PKTLOG);
1374 break;
1375 case WDI_EVENT_LITE_T2H:
1376 /*
1377 * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
1378 * passing value 0. Once these macros will define in htt
1379 * header file will use proper macros
1380 */
1381 for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
1382 int mac_for_pdev =
1383 dp_get_mac_id_for_pdev(mac_id,
1384 pdev->pdev_id);
1385
1386 mon_pdev->pktlog_ppdu_stats = false;
1387 if (!mon_pdev->enhanced_stats_en &&
1388 !mon_pdev->tx_sniffer_enable &&
1389 !mon_pdev->mcopy_mode) {
1390 dp_h2t_cfg_stats_msg_send(pdev, 0,
1391 mac_for_pdev);
1392 } else if (mon_pdev->tx_sniffer_enable ||
1393 mon_pdev->mcopy_mode) {
1394 dp_h2t_cfg_stats_msg_send(pdev,
1395 DP_PPDU_STATS_CFG_SNIFFER,
1396 mac_for_pdev);
1397 } else if (mon_pdev->enhanced_stats_en) {
1398 dp_h2t_cfg_stats_msg_send(pdev,
1399 DP_PPDU_STATS_CFG_ENH_STATS,
1400 mac_for_pdev);
1401 }
1402 }
1403
1404 break;
1405 case WDI_EVENT_RX_CBF:
1406 mon_pdev->rx_pktlog_cbf = false;
1407 break;
1408
1409 case WDI_EVENT_HYBRID_TX:
1410 dp_set_hybrid_pktlog_disable(mon_pdev);
1411 break;
1412
1413 default:
1414 /* Nothing needs to be done for other pktlog types */
1415 break;
1416 }
1417 }
1418 return 0;
1419 }
1420 #endif
1421
1422 /* MCL specific functions */
1423 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
dp_pktlogmod_exit(struct dp_pdev * pdev)1424 void dp_pktlogmod_exit(struct dp_pdev *pdev)
1425 {
1426 struct dp_soc *soc = pdev->soc;
1427 struct hif_opaque_softc *scn = soc->hif_handle;
1428 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1429
1430 if (!scn) {
1431 dp_mon_err("Invalid hif(scn) handle");
1432 return;
1433 }
1434
1435 dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG);
1436 pktlogmod_exit(scn);
1437 mon_pdev->pkt_log_init = false;
1438 }
1439 #endif /*DP_CON_MON*/
1440
1441 #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
1442 #ifdef IPA_OFFLOAD
1443 static void
dp_peer_get_tx_rx_stats(struct dp_peer * peer,struct cdp_interface_peer_stats * peer_stats_intf)1444 dp_peer_get_tx_rx_stats(struct dp_peer *peer,
1445 struct cdp_interface_peer_stats *peer_stats_intf)
1446 {
1447 struct dp_rx_tid *rx_tid = NULL;
1448 uint8_t i = 0;
1449
1450 for (i = 0; i < DP_MAX_TIDS; i++) {
1451 rx_tid = &peer->rx_tid[i];
1452 peer_stats_intf->rx_byte_count +=
1453 rx_tid->rx_msdu_cnt.bytes;
1454 peer_stats_intf->rx_packet_count +=
1455 rx_tid->rx_msdu_cnt.num;
1456 }
1457 peer_stats_intf->tx_packet_count =
1458 peer->monitor_peer->stats.tx.tx_ucast_success.num;
1459 peer_stats_intf->tx_byte_count =
1460 peer->monitor_peer->stats.tx.tx_ucast_success.bytes;
1461 }
1462 #else
1463 static void
dp_peer_get_tx_rx_stats(struct dp_peer * peer,struct cdp_interface_peer_stats * peer_stats_intf)1464 dp_peer_get_tx_rx_stats(struct dp_peer *peer,
1465 struct cdp_interface_peer_stats *peer_stats_intf)
1466 {
1467 struct dp_txrx_peer *txrx_peer = NULL;
1468 struct dp_peer *tgt_peer = NULL;
1469 uint8_t inx = 0;
1470 uint8_t stats_arr_size;
1471
1472 tgt_peer = dp_get_tgt_peer_from_peer(peer);
1473 txrx_peer = tgt_peer->txrx_peer;
1474 peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num;
1475 peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes;
1476 stats_arr_size = txrx_peer->stats_arr_size;
1477
1478 for (inx = 0; inx < stats_arr_size; inx++) {
1479 peer_stats_intf->tx_packet_count +=
1480 txrx_peer->stats[inx].per_pkt_stats.tx.ucast.num;
1481 peer_stats_intf->tx_byte_count +=
1482 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes;
1483 }
1484 }
1485 #endif
1486
dp_peer_stats_notify(struct dp_pdev * dp_pdev,struct dp_peer * peer)1487 QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
1488 {
1489 struct cdp_interface_peer_stats peer_stats_intf = {0};
1490 struct dp_mon_peer_stats *mon_peer_stats = NULL;
1491 struct dp_peer *tgt_peer = NULL;
1492 struct dp_txrx_peer *txrx_peer = NULL;
1493
1494 if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer))
1495 return QDF_STATUS_E_FAULT;
1496
1497 tgt_peer = dp_get_tgt_peer_from_peer(peer);
1498 if (qdf_unlikely(!tgt_peer))
1499 return QDF_STATUS_E_FAULT;
1500
1501 txrx_peer = tgt_peer->txrx_peer;
1502 if (!qdf_unlikely(txrx_peer))
1503 return QDF_STATUS_E_FAULT;
1504
1505 mon_peer_stats = &peer->monitor_peer->stats;
1506
1507 if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
1508 peer_stats_intf.rssi_changed = true;
1509
1510 if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
1511 (mon_peer_stats->tx.tx_rate &&
1512 mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) {
1513 qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
1514 QDF_MAC_ADDR_SIZE);
1515 peer_stats_intf.vdev_id = peer->vdev->vdev_id;
1516 peer_stats_intf.last_peer_tx_rate =
1517 mon_peer_stats->tx.last_tx_rate;
1518 peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
1519 peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
1520 peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
1521 dp_peer_get_tx_rx_stats(peer, &peer_stats_intf);
1522 peer_stats_intf.per = tgt_peer->stats.tx.last_per;
1523 peer_stats_intf.free_buff = INVALID_FREE_BUFF;
1524 dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
1525 (void *)&peer_stats_intf, 0,
1526 WDI_NO_VAL, dp_pdev->pdev_id);
1527 }
1528
1529 return QDF_STATUS_SUCCESS;
1530 }
1531 #endif
1532
1533 #ifdef FEATURE_NAC_RSSI
1534 /**
1535 * dp_rx_nac_filter() - Function to perform filtering of non-associated
1536 * clients
1537 * @pdev: DP pdev handle
1538 * @rx_pkt_hdr: Rx packet Header
1539 *
1540 * Return: dp_vdev*
1541 */
1542 static
dp_rx_nac_filter(struct dp_pdev * pdev,uint8_t * rx_pkt_hdr)1543 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
1544 uint8_t *rx_pkt_hdr)
1545 {
1546 struct ieee80211_frame *wh;
1547 struct dp_neighbour_peer *peer = NULL;
1548 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1549
1550 wh = (struct ieee80211_frame *)rx_pkt_hdr;
1551
1552 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
1553 return NULL;
1554
1555 qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
1556 TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
1557 neighbour_peer_list_elem) {
1558 if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
1559 wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
1560 dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
1561 pdev->soc,
1562 peer->neighbour_peers_macaddr.raw[0],
1563 peer->neighbour_peers_macaddr.raw[1],
1564 peer->neighbour_peers_macaddr.raw[2],
1565 peer->neighbour_peers_macaddr.raw[3],
1566 peer->neighbour_peers_macaddr.raw[4],
1567 peer->neighbour_peers_macaddr.raw[5]);
1568
1569 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1570
1571 return mon_pdev->mvdev;
1572 }
1573 }
1574 qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
1575
1576 return NULL;
1577 }
1578
dp_filter_neighbour_peer(struct dp_pdev * pdev,uint8_t * rx_pkt_hdr)1579 QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
1580 uint8_t *rx_pkt_hdr)
1581 {
1582 struct dp_vdev *vdev = NULL;
1583 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1584
1585 if (mon_pdev->filter_neighbour_peers) {
1586 /* Next Hop scenario not yet handle */
1587 vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
1588 if (vdev) {
1589 dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
1590 pdev->invalid_peer_head_msdu,
1591 pdev->invalid_peer_tail_msdu);
1592
1593 pdev->invalid_peer_head_msdu = NULL;
1594 pdev->invalid_peer_tail_msdu = NULL;
1595 return QDF_STATUS_SUCCESS;
1596 }
1597 }
1598
1599 return QDF_STATUS_E_FAILURE;
1600 }
1601 #endif
1602
1603 /**
1604 * dp_update_mon_mac_filter() - Set/reset monitor mac filter
1605 * @soc_hdl: cdp soc handle
1606 * @vdev_id: id of virtual device object
1607 * @cmd: Add/Del command
1608 *
1609 * Return: 0 for success. nonzero for failure.
1610 */
dp_update_mon_mac_filter(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint32_t cmd)1611 static QDF_STATUS dp_update_mon_mac_filter(struct cdp_soc_t *soc_hdl,
1612 uint8_t vdev_id, uint32_t cmd)
1613 {
1614 struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1615 struct dp_pdev *pdev;
1616 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
1617 DP_MOD_ID_CDP);
1618 struct dp_mon_pdev *mon_pdev;
1619 QDF_STATUS status = QDF_STATUS_E_FAILURE;
1620
1621 if (!vdev)
1622 return status;
1623
1624 pdev = vdev->pdev;
1625 if (!pdev) {
1626 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1627 return status;
1628 }
1629
1630 mon_pdev = pdev->monitor_pdev;
1631 if (cmd == DP_NAC_PARAM_ADD) {
1632 /* first neighbour added */
1633 dp_mon_filter_set_reset_mon_mac_filter(pdev, true);
1634 status = dp_mon_filter_update(pdev);
1635 if (status != QDF_STATUS_SUCCESS) {
1636 dp_cdp_err("%pK: Mon mac filter set failed", soc);
1637 dp_mon_filter_set_reset_mon_mac_filter(pdev, false);
1638 }
1639 } else if (cmd == DP_NAC_PARAM_DEL) {
1640 /* last neighbour deleted */
1641 dp_mon_filter_set_reset_mon_mac_filter(pdev, false);
1642 status = dp_mon_filter_update(pdev);
1643 if (status != QDF_STATUS_SUCCESS)
1644 dp_cdp_err("%pK: Mon mac filter reset failed", soc);
1645 }
1646
1647 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
1648 return status;
1649 }
1650
1651
1652 bool
dp_enable_mon_reap_timer(struct cdp_soc_t * soc_hdl,enum cdp_mon_reap_source source,bool enable)1653 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl,
1654 enum cdp_mon_reap_source source,
1655 bool enable)
1656 {
1657 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1658
1659 if (enable)
1660 return dp_monitor_reap_timer_start(soc, source);
1661 else
1662 return dp_monitor_reap_timer_stop(soc, source);
1663 }
1664
1665 #if defined(DP_CON_MON)
1666 #ifndef REMOVE_PKT_LOG
dp_pkt_log_init(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)1667 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1668 {
1669 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1670 struct dp_pdev *handle =
1671 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1672 struct dp_mon_pdev *mon_pdev;
1673
1674 if (!handle) {
1675 dp_mon_err("pdev handle is NULL");
1676 return;
1677 }
1678
1679 mon_pdev = handle->monitor_pdev;
1680
1681 if (mon_pdev->pkt_log_init) {
1682 dp_mon_err("%pK: Packet log not initialized", soc);
1683 return;
1684 }
1685
1686 pktlog_sethandle(&mon_pdev->pl_dev, scn);
1687 pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
1688 pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
1689
1690 if (pktlogmod_init(scn)) {
1691 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1692 "%s: pktlogmod_init failed", __func__);
1693 mon_pdev->pkt_log_init = false;
1694 } else {
1695 mon_pdev->pkt_log_init = true;
1696 }
1697 }
1698
1699 /**
1700 * dp_pkt_log_con_service() - connect packet log service
1701 * @soc_hdl: Datapath soc handle
1702 * @pdev_id: id of data path pdev handle
1703 * @scn: device context
1704 *
1705 * Return: none
1706 */
dp_pkt_log_con_service(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)1707 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
1708 uint8_t pdev_id, void *scn)
1709 {
1710 dp_pkt_log_init(soc_hdl, pdev_id, scn);
1711 pktlog_htc_attach();
1712 }
1713
1714 /**
1715 * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
1716 * @soc_hdl: Datapath soc handle
1717 * @pdev_id: id of data path pdev handle
1718 *
1719 * Return: none
1720 */
dp_pkt_log_exit(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1721 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1722 {
1723 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1724 struct dp_pdev *pdev =
1725 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1726
1727 if (!pdev) {
1728 dp_err("pdev handle is NULL");
1729 return;
1730 }
1731
1732 dp_pktlogmod_exit(pdev);
1733 }
1734
1735 #else
dp_pkt_log_con_service(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)1736 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
1737 uint8_t pdev_id, void *scn)
1738 {
1739 }
1740
dp_pkt_log_exit(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1741 static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1742 {
1743 }
1744 #endif
1745 #endif
1746
dp_neighbour_peers_detach(struct dp_pdev * pdev)1747 void dp_neighbour_peers_detach(struct dp_pdev *pdev)
1748 {
1749 struct dp_neighbour_peer *peer = NULL;
1750 struct dp_neighbour_peer *temp_peer = NULL;
1751 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1752
1753 TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
1754 neighbour_peer_list_elem, temp_peer) {
1755 /* delete this peer from the list */
1756 TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
1757 peer, neighbour_peer_list_elem);
1758 qdf_mem_free(peer);
1759 }
1760
1761 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
1762 }
1763
1764 #ifdef QCA_ENHANCED_STATS_SUPPORT
1765 /**
1766 * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats
1767 * @pdev: Datapath pdev handle
1768 *
1769 * Return: void
1770 */
dp_mon_tx_enable_enhanced_stats(struct dp_pdev * pdev)1771 static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev)
1772 {
1773 struct dp_soc *soc = pdev->soc;
1774 struct dp_mon_ops *mon_ops = NULL;
1775
1776 mon_ops = dp_mon_ops_get(soc);
1777 if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats)
1778 mon_ops->mon_tx_enable_enhanced_stats(pdev);
1779 }
1780
1781 /**
1782 * dp_enable_enhanced_stats()- API to enable enhanced statistcs
1783 * @soc: DP_SOC handle
1784 * @pdev_id: id of DP_PDEV handle
1785 *
1786 * Return: QDF_STATUS
1787 */
1788 QDF_STATUS
dp_enable_enhanced_stats(struct cdp_soc_t * soc,uint8_t pdev_id)1789 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
1790 {
1791 struct dp_pdev *pdev = NULL;
1792 QDF_STATUS status = QDF_STATUS_SUCCESS;
1793 struct dp_mon_pdev *mon_pdev;
1794 struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1795
1796 pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1797 pdev_id);
1798
1799 if (!pdev)
1800 return QDF_STATUS_E_FAILURE;
1801
1802 mon_pdev = pdev->monitor_pdev;
1803
1804 if (!mon_pdev)
1805 return QDF_STATUS_E_FAILURE;
1806
1807 if (mon_pdev->enhanced_stats_en == 0)
1808 dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
1809
1810 mon_pdev->enhanced_stats_en = 1;
1811 pdev->enhanced_stats_en = 1;
1812 pdev->link_peer_stats = wlan_cfg_is_peer_link_stats_enabled(
1813 dp_soc->wlan_cfg_ctx);
1814
1815 dp_mon_filter_setup_enhanced_stats(pdev);
1816 status = dp_mon_filter_update(pdev);
1817 if (status != QDF_STATUS_SUCCESS) {
1818 dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
1819 dp_mon_filter_reset_enhanced_stats(pdev);
1820 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
1821 mon_pdev->enhanced_stats_en = 0;
1822 pdev->enhanced_stats_en = 0;
1823 pdev->link_peer_stats = 0;
1824 return QDF_STATUS_E_FAILURE;
1825 }
1826
1827 dp_mon_tx_enable_enhanced_stats(pdev);
1828
1829 /* reset the tx fast path flag, as enhanced stats are enabled */
1830 pdev->tx_fast_flag &= ~DP_TX_DESC_FLAG_SIMPLE;
1831 if (dp_soc->hw_txrx_stats_en)
1832 pdev->tx_fast_flag &= ~DP_TX_DESC_FLAG_FASTPATH_SIMPLE;
1833
1834 return QDF_STATUS_SUCCESS;
1835 }
1836
1837 /**
1838 * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats
1839 * @pdev: Datapath pdev handle
1840 *
1841 * Return: void
1842 */
dp_mon_tx_disable_enhanced_stats(struct dp_pdev * pdev)1843 static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev)
1844 {
1845 struct dp_soc *soc = pdev->soc;
1846 struct dp_mon_ops *mon_ops = NULL;
1847
1848 mon_ops = dp_mon_ops_get(soc);
1849 if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats)
1850 mon_ops->mon_tx_disable_enhanced_stats(pdev);
1851 }
1852
1853 /**
1854 * dp_disable_enhanced_stats()- API to disable enhanced statistcs
1855 *
1856 * @soc: the soc handle
1857 * @pdev_id: pdev_id of pdev
1858 *
1859 * Return: QDF_STATUS
1860 */
1861 QDF_STATUS
dp_disable_enhanced_stats(struct cdp_soc_t * soc,uint8_t pdev_id)1862 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
1863 {
1864 struct dp_pdev *pdev =
1865 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1866 pdev_id);
1867 struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc);
1868 struct dp_mon_pdev *mon_pdev;
1869
1870 if (!pdev || !pdev->monitor_pdev)
1871 return QDF_STATUS_E_FAILURE;
1872
1873 mon_pdev = pdev->monitor_pdev;
1874
1875 if (mon_pdev->enhanced_stats_en == 1)
1876 dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
1877
1878 mon_pdev->enhanced_stats_en = 0;
1879 pdev->enhanced_stats_en = 0;
1880 pdev->link_peer_stats = 0;
1881
1882 dp_mon_tx_disable_enhanced_stats(pdev);
1883
1884 dp_mon_filter_reset_enhanced_stats(pdev);
1885 if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
1886 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1887 FL("Failed to reset enhanced mode filters"));
1888 }
1889
1890 /* set the tx fast path flag, as enhanced stats are disabled */
1891 pdev->tx_fast_flag |= DP_TX_DESC_FLAG_SIMPLE;
1892 if (dp_soc->hw_txrx_stats_en)
1893 pdev->tx_fast_flag |= DP_TX_DESC_FLAG_FASTPATH_SIMPLE;
1894
1895 return QDF_STATUS_SUCCESS;
1896 }
1897
1898 #ifdef WDI_EVENT_ENABLE
dp_peer_qos_stats_notify(struct dp_pdev * dp_pdev,struct cdp_rx_stats_ppdu_user * ppdu_user)1899 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
1900 struct cdp_rx_stats_ppdu_user *ppdu_user)
1901 {
1902 struct cdp_interface_peer_qos_stats qos_stats_intf = {0};
1903
1904 if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) {
1905 dp_mon_warn("Invalid peer id: %u", ppdu_user->peer_id);
1906 return QDF_STATUS_E_FAILURE;
1907 }
1908
1909 qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
1910 QDF_MAC_ADDR_SIZE);
1911 qos_stats_intf.frame_control = ppdu_user->frame_control;
1912 qos_stats_intf.frame_control_info_valid =
1913 ppdu_user->frame_control_info_valid;
1914 qos_stats_intf.qos_control = ppdu_user->qos_control;
1915 qos_stats_intf.qos_control_info_valid =
1916 ppdu_user->qos_control_info_valid;
1917 qos_stats_intf.vdev_id = ppdu_user->vdev_id;
1918 dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
1919 (void *)&qos_stats_intf, 0,
1920 WDI_NO_VAL, dp_pdev->pdev_id);
1921
1922 return QDF_STATUS_SUCCESS;
1923 }
1924 #else
1925 static inline QDF_STATUS
dp_peer_qos_stats_notify(struct dp_pdev * dp_pdev,struct cdp_rx_stats_ppdu_user * ppdu_user)1926 dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
1927 struct cdp_rx_stats_ppdu_user *ppdu_user)
1928 {
1929 return QDF_STATUS_SUCCESS;
1930 }
1931 #endif
1932 #endif /* QCA_ENHANCED_STATS_SUPPORT */
1933
1934 /**
1935 * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
1936 * for pktlog
1937 * @soc: cdp_soc handle
1938 * @pdev_id: id of dp pdev handle
1939 * @mac_addr: Peer mac address
1940 * @enb_dsb: Enable or disable peer based filtering
1941 *
1942 * Return: QDF_STATUS
1943 */
1944 static int
dp_enable_peer_based_pktlog(struct cdp_soc_t * soc,uint8_t pdev_id,uint8_t * mac_addr,uint8_t enb_dsb)1945 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
1946 uint8_t *mac_addr, uint8_t enb_dsb)
1947 {
1948 struct dp_peer *peer;
1949 QDF_STATUS status = QDF_STATUS_E_FAILURE;
1950 struct dp_pdev *pdev =
1951 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
1952 pdev_id);
1953 struct dp_mon_pdev *mon_pdev;
1954
1955 if (!pdev)
1956 return QDF_STATUS_E_FAILURE;
1957
1958 mon_pdev = pdev->monitor_pdev;
1959
1960 peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
1961 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
1962
1963 if (!peer) {
1964 dp_mon_err("Peer is NULL");
1965 return QDF_STATUS_E_FAILURE;
1966 }
1967
1968 if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
1969 peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
1970 mon_pdev->dp_peer_based_pktlog = enb_dsb;
1971 status = QDF_STATUS_SUCCESS;
1972 }
1973
1974 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1975
1976 return status;
1977 }
1978
1979 /**
1980 * dp_peer_update_pkt_capture_params() - Set Rx & Tx Capture flags for a peer
1981 * @soc: DP_SOC handle
1982 * @pdev_id: id of DP_PDEV handle
1983 * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
1984 * @is_tx_pkt_cap_enable: enable/disable/delete/print
1985 * Tx packet capture in monitor mode
1986 * @peer_mac: MAC address for which the above need to be enabled/disabled
1987 *
1988 * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
1989 */
1990 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
1991 static QDF_STATUS
dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,uint8_t pdev_id,bool is_rx_pkt_cap_enable,uint8_t is_tx_pkt_cap_enable,uint8_t * peer_mac)1992 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
1993 uint8_t pdev_id,
1994 bool is_rx_pkt_cap_enable,
1995 uint8_t is_tx_pkt_cap_enable,
1996 uint8_t *peer_mac)
1997 {
1998 struct dp_peer *peer;
1999 QDF_STATUS status = QDF_STATUS_E_FAILURE;
2000 struct dp_pdev *pdev =
2001 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
2002 pdev_id);
2003 if (!pdev)
2004 return QDF_STATUS_E_FAILURE;
2005
2006 peer = dp_peer_find_hash_find((struct dp_soc *)soc,
2007 peer_mac, 0, DP_VDEV_ALL,
2008 DP_MOD_ID_CDP);
2009 if (!peer)
2010 return QDF_STATUS_E_FAILURE;
2011
2012 /* we need to set tx pkt capture for non associated peer */
2013 if (!IS_MLO_DP_MLD_PEER(peer)) {
2014 status = dp_monitor_tx_peer_filter(pdev, peer,
2015 is_tx_pkt_cap_enable,
2016 peer_mac);
2017
2018 status = dp_peer_set_rx_capture_enabled(pdev, peer,
2019 is_rx_pkt_cap_enable,
2020 peer_mac);
2021 }
2022
2023 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
2024
2025 return status;
2026 }
2027 #endif
2028
2029 #ifdef QCA_MCOPY_SUPPORT
dp_mcopy_check_deliver(struct dp_pdev * pdev,uint16_t peer_id,uint32_t ppdu_id,uint8_t first_msdu)2030 QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
2031 uint16_t peer_id,
2032 uint32_t ppdu_id,
2033 uint8_t first_msdu)
2034 {
2035 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2036
2037 if (mon_pdev->mcopy_mode) {
2038 if (mon_pdev->mcopy_mode == M_COPY) {
2039 if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
2040 (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
2041 return QDF_STATUS_E_INVAL;
2042 }
2043 }
2044
2045 if (!first_msdu)
2046 return QDF_STATUS_E_INVAL;
2047
2048 mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
2049 mon_pdev->m_copy_id.tx_peer_id = peer_id;
2050 }
2051
2052 return QDF_STATUS_SUCCESS;
2053 }
2054 #endif
2055
2056 #ifdef WDI_EVENT_ENABLE
2057 #ifndef REMOVE_PKT_LOG
dp_get_pldev(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2058 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2059 {
2060 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2061 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2062
2063 if (!pdev || !pdev->monitor_pdev)
2064 return NULL;
2065
2066 return pdev->monitor_pdev->pl_dev;
2067 }
2068 #else
dp_get_pldev(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2069 static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2070 {
2071 return NULL;
2072 }
2073 #endif
2074 #endif
2075
dp_rx_populate_cbf_hdr(struct dp_soc * soc,uint32_t mac_id,uint32_t event,qdf_nbuf_t mpdu,uint32_t msdu_timestamp)2076 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
2077 uint32_t mac_id,
2078 uint32_t event,
2079 qdf_nbuf_t mpdu,
2080 uint32_t msdu_timestamp)
2081 {
2082 uint32_t data_size, hdr_size, ppdu_id, align4byte;
2083 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2084 uint32_t *msg_word;
2085
2086 if (!pdev)
2087 return QDF_STATUS_E_INVAL;
2088
2089 ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
2090
2091 hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
2092 + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
2093
2094 data_size = qdf_nbuf_len(mpdu);
2095
2096 qdf_nbuf_push_head(mpdu, hdr_size);
2097
2098 msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
2099 /*
2100 * Populate the PPDU Stats Indication header
2101 */
2102 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
2103 HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
2104 HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
2105 align4byte = ((data_size +
2106 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
2107 + 3) >> 2) << 2;
2108 HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
2109 msg_word++;
2110 HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
2111 msg_word++;
2112
2113 *msg_word = msdu_timestamp;
2114 msg_word++;
2115 /* Skip reserved field */
2116 msg_word++;
2117 /*
2118 * Populate MGMT_CTRL Payload TLV first
2119 */
2120 HTT_STATS_TLV_TAG_SET(*msg_word,
2121 HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
2122
2123 align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
2124 qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
2125 + 3) >> 2) << 2;
2126 HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
2127 msg_word++;
2128
2129 HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
2130 *msg_word, data_size);
2131 msg_word++;
2132
2133 dp_wdi_event_handler(event, soc, (void *)mpdu,
2134 HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
2135
2136 qdf_nbuf_pull_head(mpdu, hdr_size);
2137
2138 return QDF_STATUS_SUCCESS;
2139 }
2140
2141 #ifdef ATH_SUPPORT_EXT_STAT
2142 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
2143 /**
2144 * dp_pdev_clear_link_airtime_stats() - clear airtime stats for given pdev
2145 * @pdev: DP PDEV handle
2146 */
2147 static inline
dp_pdev_clear_link_airtime_stats(struct dp_pdev * pdev)2148 void dp_pdev_clear_link_airtime_stats(struct dp_pdev *pdev)
2149 {
2150 uint8_t ac;
2151
2152 for (ac = 0; ac < WME_AC_MAX; ac++)
2153 pdev->stats.telemetry_stats.link_airtime[ac] = 0;
2154 }
2155
2156 /**
2157 * dp_peer_update_telemetry_stats() - update peer telemetry stats
2158 * @soc: Datapath soc
2159 * @peer: Datapath peer
2160 * @arg: argument to callback function
2161 */
2162 static inline
dp_peer_update_telemetry_stats(struct dp_soc * soc,struct dp_peer * peer,void * arg)2163 void dp_peer_update_telemetry_stats(struct dp_soc *soc,
2164 struct dp_peer *peer,
2165 void *arg)
2166 {
2167 struct dp_pdev *pdev;
2168 struct dp_vdev *vdev;
2169 struct dp_mon_peer *mon_peer = NULL;
2170 uint8_t ac;
2171 uint64_t current_time = qdf_get_log_timestamp();
2172 uint32_t remn, time_diff, usage;
2173 uint16_t usage_per_sec;
2174 struct dp_mon_peer_airtime_stats *stat_airtime;
2175 struct dp_mon_peer_airtime_consumption *consump;
2176
2177 vdev = peer->vdev;
2178 if (!vdev)
2179 return;
2180
2181 pdev = vdev->pdev;
2182 if (!pdev)
2183 return;
2184
2185 mon_peer = peer->monitor_peer;
2186 if (qdf_likely(mon_peer)) {
2187 stat_airtime = &mon_peer->stats.airtime_stats;
2188 time_diff = (uint32_t)(current_time -
2189 stat_airtime->last_update_time);
2190 for (ac = 0; ac < WME_AC_MAX; ac++) {
2191 consump = &stat_airtime->tx_airtime_consumption[ac];
2192 usage = consump->consumption;
2193 usage_per_sec = (uint8_t)qdf_do_div((uint64_t)
2194 (usage * 100), time_diff);
2195 remn = qdf_do_div_rem((uint64_t)
2196 (usage * 100), time_diff);
2197 if (remn < time_diff / 2) {
2198 if (remn && usage_per_sec == 0)
2199 usage_per_sec++;
2200 } else {
2201 if (usage_per_sec < 100)
2202 usage_per_sec++;
2203 }
2204 consump->avg_consumption_per_sec = usage_per_sec;
2205 /* Store each peer airtime consumption in pdev
2206 * link_airtime to calculate pdev's total airtime
2207 * consumption
2208 */
2209 DP_STATS_INC(pdev,
2210 telemetry_stats.link_airtime[ac],
2211 consump->consumption);
2212 consump->consumption = 0;
2213
2214 consump = &stat_airtime->rx_airtime_consumption[ac];
2215 usage = consump->consumption;
2216 usage_per_sec = (uint8_t)qdf_do_div((uint64_t)
2217 (usage * 100), time_diff);
2218 remn = qdf_do_div_rem((uint64_t)
2219 (usage * 100), time_diff);
2220 if (remn < time_diff / 2) {
2221 if (remn && usage_per_sec == 0)
2222 usage_per_sec++;
2223 } else {
2224 if (usage_per_sec < 100)
2225 usage_per_sec++;
2226 }
2227 consump->avg_consumption_per_sec = usage_per_sec;
2228 /* Store each peer airtime consumption in pdev
2229 * link_airtime to calculate pdev's total airtime
2230 * consumption
2231 */
2232 DP_STATS_INC(pdev,
2233 telemetry_stats.link_airtime[ac],
2234 consump->consumption);
2235 consump->consumption = 0;
2236 }
2237 stat_airtime->last_update_time = current_time;
2238 }
2239 }
2240
dp_pdev_update_telemetry_airtime_stats(struct cdp_soc_t * soc,uint8_t pdev_id)2241 QDF_STATUS dp_pdev_update_telemetry_airtime_stats(struct cdp_soc_t *soc,
2242 uint8_t pdev_id)
2243 {
2244 struct dp_pdev *pdev =
2245 dp_get_pdev_from_soc_pdev_id_wifi3(cdp_soc_t_to_dp_soc(soc),
2246 pdev_id);
2247 if (!pdev)
2248 return QDF_STATUS_E_FAILURE;
2249
2250 /* Clear current airtime stats as the below API will increment the stats
2251 * for all peers on top of current value
2252 */
2253 dp_pdev_clear_link_airtime_stats(pdev);
2254 dp_pdev_iterate_peer(pdev, dp_peer_update_telemetry_stats, NULL,
2255 DP_MOD_ID_CDP);
2256
2257 return QDF_STATUS_SUCCESS;
2258 }
2259 #endif
2260
2261 /**
2262 * dp_peer_cal_clients_stats_update() - update peer stats on cal client timer
2263 * @soc: Datapath SOC
2264 * @peer: Datapath peer
2265 * @arg: argument to iter function
2266 */
2267 #ifdef IPA_OFFLOAD
2268 static void
dp_peer_cal_clients_stats_update(struct dp_soc * soc,struct dp_peer * peer,void * arg)2269 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
2270 struct dp_peer *peer,
2271 void *arg)
2272 {
2273 struct cdp_calibr_stats_intf peer_stats_intf = {0};
2274 struct dp_peer *tgt_peer = NULL;
2275 struct dp_txrx_peer *txrx_peer = NULL;
2276
2277 if (!dp_peer_is_primary_link_peer(peer))
2278 return;
2279
2280 tgt_peer = dp_get_tgt_peer_from_peer(peer);
2281 if (!tgt_peer || !(tgt_peer->txrx_peer))
2282 return;
2283
2284 txrx_peer = tgt_peer->txrx_peer;
2285 peer_stats_intf.to_stack = txrx_peer->to_stack;
2286 peer_stats_intf.tx_success =
2287 peer->monitor_peer->stats.tx.tx_ucast_success;
2288 peer_stats_intf.tx_ucast =
2289 peer->monitor_peer->stats.tx.tx_ucast_total;
2290
2291 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
2292 &tgt_peer->stats);
2293 dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo);
2294 }
2295 #else
2296 static void
dp_peer_cal_clients_stats_update(struct dp_soc * soc,struct dp_peer * peer,void * arg)2297 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
2298 struct dp_peer *peer,
2299 void *arg)
2300 {
2301 struct cdp_calibr_stats_intf peer_stats_intf = {0};
2302 struct dp_peer *tgt_peer = NULL;
2303 struct dp_txrx_peer *txrx_peer = NULL;
2304 uint8_t inx = 0;
2305 uint8_t stats_arr_size;
2306
2307 if (!dp_peer_is_primary_link_peer(peer))
2308 return;
2309
2310 tgt_peer = dp_get_tgt_peer_from_peer(peer);
2311 if (!tgt_peer || !(tgt_peer->txrx_peer))
2312 return;
2313
2314 txrx_peer = tgt_peer->txrx_peer;
2315 peer_stats_intf.to_stack = txrx_peer->to_stack;
2316 stats_arr_size = txrx_peer->stats_arr_size;
2317
2318 for (inx = 0; inx < stats_arr_size; inx++) {
2319 peer_stats_intf.tx_success.num +=
2320 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.num;
2321 peer_stats_intf.tx_success.bytes +=
2322 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes;
2323 peer_stats_intf.tx_ucast.num +=
2324 txrx_peer->stats[inx].per_pkt_stats.tx.ucast.num;
2325 peer_stats_intf.tx_ucast.bytes +=
2326 txrx_peer->stats[inx].per_pkt_stats.tx.ucast.bytes;
2327 }
2328
2329 dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
2330 &tgt_peer->stats);
2331 }
2332 #endif
2333
2334 /**
2335 * dp_iterate_update_peer_list() - update peer stats on cal client timer
2336 * @pdev_hdl: pdev handle
2337 */
dp_iterate_update_peer_list(struct cdp_pdev * pdev_hdl)2338 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
2339 {
2340 struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
2341
2342 dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
2343 DP_MOD_ID_CDP);
2344 }
2345 #else
dp_iterate_update_peer_list(struct cdp_pdev * pdev_hdl)2346 static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
2347 {
2348 }
2349 #endif
2350
2351 #ifdef ATH_SUPPORT_NAC
dp_set_filter_neigh_peers(struct dp_pdev * pdev,bool val)2352 int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
2353 bool val)
2354 {
2355 /* Enable/Disable smart mesh filtering. This flag will be checked
2356 * during rx processing to check if packets are from NAC clients.
2357 */
2358 pdev->monitor_pdev->filter_neighbour_peers = val;
2359 return 0;
2360 }
2361 #endif /* ATH_SUPPORT_NAC */
2362
2363 #ifdef WLAN_ATF_ENABLE
dp_set_atf_stats_enable(struct dp_pdev * pdev,bool value)2364 void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
2365 {
2366 if (!pdev) {
2367 dp_cdp_err("pdev is NULL");
2368 return;
2369 }
2370
2371 pdev->monitor_pdev->dp_atf_stats_enable = value;
2372 }
2373 #endif
2374
2375 #ifdef QCA_ENHANCED_STATS_SUPPORT
2376 /**
2377 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv() - Process
2378 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2379 * @pdev: DP PDEV handle
2380 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2381 * @ppdu_id: PPDU Id
2382 *
2383 * Return: QDF_STATUS_SUCCESS if nbuf has to be freed in caller
2384 */
2385 static QDF_STATUS
dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev * pdev,qdf_nbuf_t tag_buf,uint32_t ppdu_id)2386 dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2387 qdf_nbuf_t tag_buf,
2388 uint32_t ppdu_id)
2389 {
2390 uint32_t *nbuf_ptr;
2391 uint8_t trim_size;
2392 size_t head_size;
2393 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
2394 uint32_t *msg_word;
2395 uint32_t tsf_hdr;
2396 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2397
2398 if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
2399 (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
2400 return QDF_STATUS_SUCCESS;
2401
2402 /*
2403 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
2404 */
2405 msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
2406 msg_word = msg_word + 2;
2407 tsf_hdr = *msg_word;
2408
2409 trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
2410 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2411 qdf_nbuf_data(tag_buf));
2412
2413 if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2414 return QDF_STATUS_SUCCESS;
2415
2416 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2417 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
2418
2419 if (mon_pdev->tx_capture_enabled) {
2420 head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2421 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2422 qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
2423 head_size, qdf_nbuf_headroom(tag_buf));
2424 qdf_assert_always(0);
2425 return QDF_STATUS_E_NOMEM;
2426 }
2427 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2428 qdf_nbuf_push_head(tag_buf, head_size);
2429 qdf_assert_always(ptr_mgmt_comp_info);
2430 ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2431 ptr_mgmt_comp_info->is_sgen_pkt = true;
2432 ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
2433 } else {
2434 head_size = sizeof(ppdu_id);
2435 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2436 *nbuf_ptr = ppdu_id;
2437 }
2438 if (mon_pdev->bpr_enable) {
2439 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2440 tag_buf, HTT_INVALID_PEER,
2441 WDI_NO_VAL, pdev->pdev_id);
2442 }
2443
2444 dp_deliver_mgmt_frm(pdev, tag_buf);
2445
2446 return QDF_STATUS_E_ALREADY;
2447 }
2448
2449 int
dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)2450 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
2451 {
2452 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
2453 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
2454 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
2455 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
2456
2457 return 0;
2458 }
2459
2460 /**
2461 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
2462 * @peer: Datapath peer handle
2463 * @ppdu: User PPDU Descriptor
2464 * @cur_ppdu_id: PPDU_ID
2465 *
2466 * Return: None
2467 *
2468 * on Tx data frame, we may get delayed ba set
2469 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
2470 * request Block Ack Request(BAR). Successful msdu is received only after Block
2471 * Ack. To populate peer stats we need successful msdu(data frame).
2472 * So we hold the Tx data stats on delayed_ba for stats update.
2473 */
2474 static void
dp_peer_copy_delay_stats(struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * ppdu,uint32_t cur_ppdu_id)2475 dp_peer_copy_delay_stats(struct dp_peer *peer,
2476 struct cdp_tx_completion_ppdu_user *ppdu,
2477 uint32_t cur_ppdu_id)
2478 {
2479 struct dp_pdev *pdev;
2480 struct dp_vdev *vdev;
2481 struct dp_mon_peer *mon_peer = peer->monitor_peer;
2482
2483 if (mon_peer->last_delayed_ba) {
2484 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2485 "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
2486 mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
2487 vdev = peer->vdev;
2488 if (vdev) {
2489 pdev = vdev->pdev;
2490 pdev->stats.cdp_delayed_ba_not_recev++;
2491 }
2492 }
2493
2494 mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
2495 mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
2496 mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
2497 mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
2498 mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
2499 mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
2500 mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
2501 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2502 mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
2503 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2504 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
2505 ppdu->mpdu_tried_ucast;
2506 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
2507 ppdu->mpdu_tried_mcast;
2508 mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
2509 mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
2510 mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
2511
2512 mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
2513 mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
2514 mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
2515
2516 mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
2517 mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
2518
2519 mon_peer->last_delayed_ba = true;
2520
2521 ppdu->debug_copied = true;
2522 }
2523
2524 /**
2525 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
2526 * @peer: Datapath peer handle
2527 * @ppdu: PPDU Descriptor
2528 *
2529 * Return: None
2530 *
2531 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
2532 * from Tx BAR frame not required to populate peer stats.
2533 * But we need successful MPDU and MSDU to update previous
2534 * transmitted Tx data frame. Overwrite ppdu stats with the previous
2535 * stored ppdu stats.
2536 */
2537 static void
dp_peer_copy_stats_to_bar(struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * ppdu)2538 dp_peer_copy_stats_to_bar(struct dp_peer *peer,
2539 struct cdp_tx_completion_ppdu_user *ppdu)
2540 {
2541 struct dp_mon_peer *mon_peer = peer->monitor_peer;
2542
2543 ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
2544 ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
2545 ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
2546 ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
2547 ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
2548 ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
2549 ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
2550 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2551 ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
2552 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2553 ppdu->mpdu_tried_ucast =
2554 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
2555 ppdu->mpdu_tried_mcast =
2556 mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
2557 ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
2558 ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
2559 ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
2560
2561 ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
2562 ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
2563 ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
2564
2565 ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
2566 ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
2567
2568 mon_peer->last_delayed_ba = false;
2569
2570 ppdu->debug_copied = true;
2571 }
2572
2573 /**
2574 * dp_tx_rate_stats_update() - Update rate per-peer statistics
2575 * @peer: Datapath peer handle
2576 * @ppdu: PPDU Descriptor
2577 *
2578 * Return: None
2579 */
2580 static void
dp_tx_rate_stats_update(struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * ppdu)2581 dp_tx_rate_stats_update(struct dp_peer *peer,
2582 struct cdp_tx_completion_ppdu_user *ppdu)
2583 {
2584 uint32_t ratekbps = 0;
2585 uint64_t ppdu_tx_rate = 0;
2586 uint32_t rix;
2587 uint16_t ratecode = 0;
2588 struct dp_mon_peer *mon_peer = NULL;
2589
2590 if (!peer || !ppdu)
2591 return;
2592
2593 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
2594 return;
2595
2596 mon_peer = peer->monitor_peer;
2597 if (!mon_peer)
2598 return;
2599
2600 ratekbps = dp_getrateindex(ppdu->gi,
2601 ppdu->mcs,
2602 ppdu->nss,
2603 ppdu->preamble,
2604 ppdu->bw,
2605 ppdu->punc_mode,
2606 &rix,
2607 &ratecode);
2608
2609 if (!ratekbps)
2610 return;
2611
2612 /* Calculate goodput in non-training period
2613 * In training period, don't do anything as
2614 * pending pkt is send as goodput.
2615 */
2616 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
2617 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
2618 (CDP_PERCENT_MACRO - ppdu->current_rate_per));
2619 }
2620 ppdu->rix = rix;
2621 ppdu->tx_ratekbps = ratekbps;
2622 ppdu->tx_ratecode = ratecode;
2623 DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps);
2624 mon_peer->stats.tx.avg_tx_rate =
2625 dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps);
2626 ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate);
2627 DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
2628
2629 mon_peer->stats.tx.bw_info = ppdu->bw;
2630 mon_peer->stats.tx.gi_info = ppdu->gi;
2631 mon_peer->stats.tx.nss_info = ppdu->nss;
2632 mon_peer->stats.tx.mcs_info = ppdu->mcs;
2633 mon_peer->stats.tx.preamble_info = ppdu->preamble;
2634 if (peer->vdev) {
2635 /*
2636 * In STA mode:
2637 * We get ucast stats as BSS peer stats.
2638 *
2639 * In AP mode:
2640 * We get mcast stats as BSS peer stats.
2641 * We get ucast stats as assoc peer stats.
2642 */
2643 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
2644 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
2645 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
2646 } else {
2647 peer->vdev->stats.tx.last_tx_rate = ratekbps;
2648 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
2649 }
2650 }
2651 }
2652
2653 #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE)
dp_send_stats_event(struct dp_pdev * pdev,struct dp_peer * peer,uint16_t peer_id)2654 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
2655 uint16_t peer_id)
2656 {
2657 struct cdp_interface_peer_stats peer_stats_intf = {0};
2658 struct dp_mon_peer *mon_peer = peer->monitor_peer;
2659 struct dp_txrx_peer *txrx_peer = NULL;
2660 uint8_t inx = 0;
2661 uint8_t stats_arr_size;
2662
2663 if (qdf_unlikely(!mon_peer))
2664 return;
2665
2666 mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
2667 peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
2668
2669 txrx_peer = dp_get_txrx_peer(peer);
2670 if (qdf_likely(txrx_peer)) {
2671 stats_arr_size = txrx_peer->stats_arr_size;
2672 peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
2673 for (inx = 0; inx < stats_arr_size; inx++)
2674 peer_stats_intf.tx_byte_count +=
2675 txrx_peer->stats[inx].per_pkt_stats.tx.tx_success.bytes;
2676 }
2677
2678 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
2679 &peer_stats_intf, peer_id,
2680 UPDATE_PEER_STATS, pdev->pdev_id);
2681 }
2682 #endif
2683
2684 #ifdef WLAN_FEATURE_11BE
2685 /**
2686 * dp_get_ru_index_frm_ru_tones() - get ru index
2687 * @ru_tones: ru tones
2688 *
2689 * Return: ru index
2690 */
dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)2691 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
2692 {
2693 enum cdp_ru_index ru_index;
2694
2695 switch (ru_tones) {
2696 case RU_26:
2697 ru_index = RU_26_INDEX;
2698 break;
2699 case RU_52:
2700 ru_index = RU_52_INDEX;
2701 break;
2702 case RU_52_26:
2703 ru_index = RU_52_26_INDEX;
2704 break;
2705 case RU_106:
2706 ru_index = RU_106_INDEX;
2707 break;
2708 case RU_106_26:
2709 ru_index = RU_106_26_INDEX;
2710 break;
2711 case RU_242:
2712 ru_index = RU_242_INDEX;
2713 break;
2714 case RU_484:
2715 ru_index = RU_484_INDEX;
2716 break;
2717 case RU_484_242:
2718 ru_index = RU_484_242_INDEX;
2719 break;
2720 case RU_996:
2721 ru_index = RU_996_INDEX;
2722 break;
2723 case RU_996_484:
2724 ru_index = RU_996_484_INDEX;
2725 break;
2726 case RU_996_484_242:
2727 ru_index = RU_996_484_242_INDEX;
2728 break;
2729 case RU_2X996:
2730 ru_index = RU_2X996_INDEX;
2731 break;
2732 case RU_2X996_484:
2733 ru_index = RU_2X996_484_INDEX;
2734 break;
2735 case RU_3X996:
2736 ru_index = RU_3X996_INDEX;
2737 break;
2738 case RU_3X996_484:
2739 ru_index = RU_2X996_484_INDEX;
2740 break;
2741 case RU_4X996:
2742 ru_index = RU_4X996_INDEX;
2743 break;
2744 default:
2745 ru_index = RU_INDEX_MAX;
2746 break;
2747 }
2748
2749 return ru_index;
2750 }
2751
2752 /**
2753 * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum
2754 * @ru_size: HTT ru_size enum
2755 *
2756 * Return: ru_width of uint32_t type
2757 */
dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)2758 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
2759 {
2760 uint32_t width = 0;
2761
2762 switch (ru_size) {
2763 case HTT_PPDU_STATS_RU_26:
2764 width = RU_26;
2765 break;
2766 case HTT_PPDU_STATS_RU_52:
2767 width = RU_52;
2768 break;
2769 case HTT_PPDU_STATS_RU_52_26:
2770 width = RU_52_26;
2771 break;
2772 case HTT_PPDU_STATS_RU_106:
2773 width = RU_106;
2774 break;
2775 case HTT_PPDU_STATS_RU_106_26:
2776 width = RU_106_26;
2777 break;
2778 case HTT_PPDU_STATS_RU_242:
2779 width = RU_242;
2780 break;
2781 case HTT_PPDU_STATS_RU_484:
2782 width = RU_484;
2783 break;
2784 case HTT_PPDU_STATS_RU_484_242:
2785 width = RU_484_242;
2786 break;
2787 case HTT_PPDU_STATS_RU_996:
2788 width = RU_996;
2789 break;
2790 case HTT_PPDU_STATS_RU_996_484:
2791 width = RU_996_484;
2792 break;
2793 case HTT_PPDU_STATS_RU_996_484_242:
2794 width = RU_996_484_242;
2795 break;
2796 case HTT_PPDU_STATS_RU_996x2:
2797 width = RU_2X996;
2798 break;
2799 case HTT_PPDU_STATS_RU_996x2_484:
2800 width = RU_2X996_484;
2801 break;
2802 case HTT_PPDU_STATS_RU_996x3:
2803 width = RU_3X996;
2804 break;
2805 case HTT_PPDU_STATS_RU_996x3_484:
2806 width = RU_3X996_484;
2807 break;
2808 case HTT_PPDU_STATS_RU_996x4:
2809 width = RU_4X996;
2810 break;
2811 default:
2812 dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
2813 }
2814
2815 return width;
2816 }
2817 #else
dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)2818 static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
2819 {
2820 enum cdp_ru_index ru_index;
2821
2822 switch (ru_tones) {
2823 case RU_26:
2824 ru_index = RU_26_INDEX;
2825 break;
2826 case RU_52:
2827 ru_index = RU_52_INDEX;
2828 break;
2829 case RU_106:
2830 ru_index = RU_106_INDEX;
2831 break;
2832 case RU_242:
2833 ru_index = RU_242_INDEX;
2834 break;
2835 case RU_484:
2836 ru_index = RU_484_INDEX;
2837 break;
2838 case RU_996:
2839 ru_index = RU_996_INDEX;
2840 break;
2841 default:
2842 ru_index = RU_INDEX_MAX;
2843 break;
2844 }
2845
2846 return ru_index;
2847 }
2848
dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)2849 static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
2850 {
2851 uint32_t width = 0;
2852
2853 switch (ru_size) {
2854 case HTT_PPDU_STATS_RU_26:
2855 width = RU_26;
2856 break;
2857 case HTT_PPDU_STATS_RU_52:
2858 width = RU_52;
2859 break;
2860 case HTT_PPDU_STATS_RU_106:
2861 width = RU_106;
2862 break;
2863 case HTT_PPDU_STATS_RU_242:
2864 width = RU_242;
2865 break;
2866 case HTT_PPDU_STATS_RU_484:
2867 width = RU_484;
2868 break;
2869 case HTT_PPDU_STATS_RU_996:
2870 width = RU_996;
2871 break;
2872 default:
2873 dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
2874 }
2875
2876 return width;
2877 }
2878 #endif
2879
2880 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
2881 /**
2882 * dp_pdev_telemetry_stats_update() - Update pdev telemetry stats
2883 * @pdev: Datapath pdev handle
2884 * @ppdu: PPDU Descriptor
2885 *
2886 * Return: None
2887 */
2888 static void
dp_pdev_telemetry_stats_update(struct dp_pdev * pdev,struct cdp_tx_completion_ppdu_user * ppdu)2889 dp_pdev_telemetry_stats_update(
2890 struct dp_pdev *pdev,
2891 struct cdp_tx_completion_ppdu_user *ppdu)
2892 {
2893 uint16_t mpdu_tried;
2894 uint16_t mpdu_failed;
2895 uint16_t num_mpdu;
2896 uint8_t ac = 0;
2897
2898 num_mpdu = ppdu->mpdu_success;
2899 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
2900 mpdu_failed = mpdu_tried - num_mpdu;
2901
2902 ac = TID_TO_WME_AC(ppdu->tid);
2903
2904 DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed[ac],
2905 mpdu_failed);
2906
2907 DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total[ac],
2908 mpdu_tried);
2909 }
2910
2911 /*
2912 * dp_ppdu_desc_get_txmode() - Get TX mode
2913 * @ppdu: PPDU Descriptor
2914 *
2915 * Return: None
2916 */
2917 static inline
dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu * ppdu)2918 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu)
2919 {
2920 uint16_t frame_type = ppdu->htt_frame_type;
2921
2922 ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN;
2923
2924 if (ppdu->frame_type == CDP_PPDU_FTYPE_CTRL &&
2925 (frame_type != HTT_STATS_FTYPE_SGEN_MU_TRIG &&
2926 frame_type != HTT_STATS_FTYPE_SGEN_BE_MU_TRIG))
2927 return;
2928
2929 if (frame_type == HTT_STATS_FTYPE_SGEN_MU_BAR ||
2930 frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_BAR) {
2931 ppdu->txmode = TX_MODE_UL_OFDMA_MU_BAR_TRIGGER;
2932 ppdu->txmode_type = TX_MODE_TYPE_UL;
2933
2934 return;
2935 }
2936
2937 switch (ppdu->htt_seq_type) {
2938 case HTT_SEQTYPE_SU:
2939 if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
2940 ppdu->txmode = TX_MODE_DL_SU_DATA;
2941 ppdu->txmode_type = TX_MODE_TYPE_DL;
2942 }
2943 break;
2944 case HTT_SEQTYPE_MU_OFDMA:
2945 case HTT_SEQTYPE_BE_MU_OFDMA:
2946 if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) {
2947 ppdu->txmode = TX_MODE_DL_OFDMA_DATA;
2948 ppdu->txmode_type = TX_MODE_TYPE_DL;
2949 }
2950 break;
2951 case HTT_SEQTYPE_AC_MU_MIMO:
2952 case HTT_SEQTYPE_AX_MU_MIMO:
2953 case HTT_SEQTYPE_BE_MU_MIMO:
2954 if (frame_type == HTT_STATS_FTYPE_TIDQ_DATA_MU) {
2955 ppdu->txmode = TX_MODE_DL_MUMIMO_DATA;
2956 ppdu->txmode_type = TX_MODE_TYPE_DL;
2957 }
2958 break;
2959 case HTT_SEQTYPE_UL_MU_OFDMA_TRIG:
2960 case HTT_SEQTYPE_BE_UL_MU_OFDMA_TRIG:
2961 if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG ||
2962 frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) {
2963 ppdu->txmode = TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA;
2964 ppdu->txmode_type = TX_MODE_TYPE_UL;
2965 }
2966 break;
2967 case HTT_SEQTYPE_UL_MU_MIMO_TRIG:
2968 case HTT_SEQTYPE_BE_UL_MU_MIMO_TRIG:
2969 if (frame_type == HTT_STATS_FTYPE_SGEN_MU_TRIG ||
2970 frame_type == HTT_STATS_FTYPE_SGEN_BE_MU_TRIG) {
2971 ppdu->txmode = TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA;
2972 ppdu->txmode_type = TX_MODE_TYPE_UL;
2973 }
2974 break;
2975 default:
2976 ppdu->txmode_type = TX_MODE_TYPE_UNKNOWN;
2977 break;
2978 }
2979 }
2980
2981 /*
2982 * dp_pdev_update_deter_stats() - Update pdev deterministic stats
2983 * @pdev: Datapath pdev handle
2984 * @ppdu: PPDU Descriptor
2985 *
2986 * Return: None
2987 */
2988 static inline void
dp_pdev_update_deter_stats(struct dp_pdev * pdev,struct cdp_tx_completion_ppdu * ppdu)2989 dp_pdev_update_deter_stats(struct dp_pdev *pdev,
2990 struct cdp_tx_completion_ppdu *ppdu)
2991 {
2992 uint32_t user_idx;
2993
2994 if (!pdev || !ppdu)
2995 return;
2996
2997 if (ppdu->txmode_type == TX_MODE_TYPE_UNKNOWN)
2998 return;
2999
3000 if (ppdu->backoff_ac_valid) {
3001 if (ppdu->backoff_ac >= WME_AC_MAX) {
3002 dp_mon_err("backoff_ac %d exceed max limit",
3003 ppdu->backoff_ac);
3004 return;
3005 }
3006 DP_STATS_UPD(pdev,
3007 deter_stats.ch_access_delay[ppdu->backoff_ac],
3008 ppdu->ch_access_delay);
3009 }
3010
3011 if (ppdu->txmode_type == TX_MODE_TYPE_DL) {
3012 DP_STATS_INC(pdev,
3013 deter_stats.dl_mode_cnt[ppdu->txmode],
3014 1);
3015 if (!ppdu->num_users) {
3016 dp_mon_err("dl users is %d", ppdu->num_users);
3017 return;
3018 }
3019 user_idx = ppdu->num_users - 1;
3020 switch (ppdu->txmode) {
3021 case TX_MODE_DL_OFDMA_DATA:
3022 DP_STATS_INC(pdev,
3023 deter_stats.dl_ofdma_usr[user_idx],
3024 1);
3025 break;
3026 case TX_MODE_DL_MUMIMO_DATA:
3027 if (user_idx >= CDP_MU_MAX_MIMO_USERS) {
3028 dp_mon_err("dl mimo users %d exceed max limit",
3029 ppdu->num_users);
3030 return;
3031 }
3032 DP_STATS_INC(pdev,
3033 deter_stats.dl_mimo_usr[user_idx],
3034 1);
3035 break;
3036 }
3037 } else {
3038 DP_STATS_INC(pdev,
3039 deter_stats.ul_mode_cnt[ppdu->txmode],
3040 1);
3041
3042 if (!ppdu->num_ul_users) {
3043 dp_mon_err("dl users is %d", ppdu->num_ul_users);
3044 return;
3045 }
3046 user_idx = ppdu->num_ul_users - 1;
3047 switch (ppdu->txmode) {
3048 case TX_MODE_UL_OFDMA_BASIC_TRIGGER_DATA:
3049 DP_STATS_INC(pdev,
3050 deter_stats.ul_ofdma_usr[user_idx],
3051 1);
3052 break;
3053 case TX_MODE_UL_MUMIMO_BASIC_TRIGGER_DATA:
3054 if (user_idx >= CDP_MU_MAX_MIMO_USERS) {
3055 dp_mon_err("ul mimo users %d exceed max limit",
3056 ppdu->num_ul_users);
3057 return;
3058 }
3059 DP_STATS_INC(pdev,
3060 deter_stats.ul_mimo_usr[user_idx],
3061 1);
3062 break;
3063 }
3064 if (ppdu->num_ul_user_resp_valid) {
3065 if (ppdu->num_ul_user_resp) {
3066 DP_STATS_INC(pdev,
3067 deter_stats.ts[ppdu->txmode].trigger_success,
3068 1);
3069 } else {
3070 DP_STATS_INC(pdev,
3071 deter_stats.ts[ppdu->txmode].trigger_fail,
3072 1);
3073 }
3074 }
3075 }
3076 }
3077
3078 /*
3079 * dp_ppdu_desc_get_msduq() - Get msduq index from bitmap
3080 * @ppdu: PPDU Descriptor
3081 * @msduq_index: MSDUQ index
3082 *
3083 * Return: None
3084 */
3085 static inline void
dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap,uint32_t * msduq_index)3086 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index)
3087 {
3088 if ((msduq_bitmap & BIT(HTT_MSDUQ_INDEX_NON_UDP)) ||
3089 (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_UDP))) {
3090 *msduq_index = MSDUQ_INDEX_DEFAULT;
3091 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_0)) {
3092 *msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_0;
3093 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_PRIO_1)) {
3094 *msduq_index = MSDUQ_INDEX_CUSTOM_PRIO_1;
3095 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_0)) {
3096 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_0;
3097 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_1)) {
3098 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_1;
3099 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_2)) {
3100 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_2;
3101 } else if (msduq_bitmap & BIT(HTT_MSDUQ_INDEX_CUSTOM_EXT_PRIO_3)) {
3102 *msduq_index = MSDUQ_INDEX_CUSTOM_EXT_PRIO_3;
3103 } else {
3104 *msduq_index = MSDUQ_INDEX_MAX;
3105 }
3106 }
3107
3108 /*
3109 * dp_ppdu_desc_user_deter_stats_update() - Update per-peer deterministic stats
3110 * @pdev: Datapath pdev handle
3111 * @peer: Datapath peer handle
3112 * @ppdu_desc: PPDU Descriptor
3113 * @user: PPDU Descriptor per user
3114 *
3115 * Return: None
3116 */
3117 static void
dp_ppdu_desc_user_deter_stats_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu * ppdu_desc,struct cdp_tx_completion_ppdu_user * user)3118 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev,
3119 struct dp_peer *peer,
3120 struct cdp_tx_completion_ppdu *ppdu_desc,
3121 struct cdp_tx_completion_ppdu_user *user)
3122 {
3123 struct dp_mon_peer *mon_peer = NULL;
3124 uint64_t avg_tx_rate = 0;
3125 uint32_t ratekbps = 0;
3126 uint32_t rix;
3127 uint32_t msduq;
3128 uint16_t ratecode = 0;
3129 uint8_t txmode;
3130 uint8_t tid;
3131
3132 if (!pdev || !ppdu_desc || !user || !peer)
3133 return;
3134
3135 mon_peer = peer->monitor_peer;
3136 if (qdf_unlikely(!mon_peer))
3137 return;
3138
3139 if (ppdu_desc->txmode_type == TX_MODE_TYPE_UNKNOWN)
3140 return;
3141
3142 if (ppdu_desc->txmode_type == TX_MODE_TYPE_UL &&
3143 (ppdu_desc->txmode != TX_MODE_UL_OFDMA_MU_BAR_TRIGGER)) {
3144 if (user->tid < CDP_UL_TRIG_BK_TID ||
3145 user->tid > CDP_UL_TRIG_VO_TID)
3146 return;
3147
3148 user->tid = UL_TRIGGER_TID_TO_DATA_TID(user->tid);
3149 }
3150
3151 if (user->tid >= CDP_DATA_TID_MAX)
3152 return;
3153
3154 ratekbps = dp_getrateindex(user->gi,
3155 user->mcs,
3156 user->nss,
3157 user->preamble,
3158 user->bw,
3159 user->punc_mode,
3160 &rix,
3161 &ratecode);
3162
3163 if (!ratekbps)
3164 return;
3165
3166 avg_tx_rate = mon_peer->stats.deter_stats.avg_tx_rate;
3167 avg_tx_rate = dp_ath_rate_lpf(avg_tx_rate,
3168 ratekbps);
3169 DP_STATS_UPD(mon_peer,
3170 deter_stats.avg_tx_rate,
3171 avg_tx_rate);
3172
3173 txmode = ppdu_desc->txmode;
3174 tid = user->tid;
3175
3176 if (ppdu_desc->txmode_type == TX_MODE_TYPE_DL) {
3177 dp_ppdu_desc_get_msduq(user->msduq_bitmap, &msduq);
3178 if (msduq == MSDUQ_INDEX_MAX)
3179 return;
3180
3181 DP_STATS_INC(mon_peer,
3182 deter_stats.deter[tid].dl_det[msduq][txmode].mode_cnt,
3183 1);
3184
3185 DP_STATS_UPD(mon_peer,
3186 deter_stats.deter[tid].dl_det[msduq][txmode].avg_rate,
3187 avg_tx_rate);
3188 } else {
3189 DP_STATS_INC(mon_peer,
3190 deter_stats.deter[tid].ul_det[txmode].mode_cnt,
3191 1);
3192
3193 DP_STATS_UPD(mon_peer,
3194 deter_stats.deter[tid].ul_det[txmode].avg_rate,
3195 avg_tx_rate);
3196 if (!user->completion_status) {
3197 DP_STATS_INC(mon_peer,
3198 deter_stats.deter[tid].ul_det[txmode].trigger_success,
3199 1);
3200 } else {
3201 DP_STATS_INC(mon_peer,
3202 deter_stats.deter[tid].ul_det[txmode].trigger_fail,
3203 1);
3204 }
3205 }
3206 }
3207 #else
3208 static inline
dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu * ppdu)3209 void dp_ppdu_desc_get_txmode(struct cdp_tx_completion_ppdu *ppdu)
3210 {
3211 }
3212
3213 static inline void
dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap,uint32_t * msduq_index)3214 dp_ppdu_desc_get_msduq(uint32_t msduq_bitmap, uint32_t *msduq_index)
3215 {
3216 }
3217
3218 static void
dp_ppdu_desc_user_deter_stats_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu * ppdu_desc,struct cdp_tx_completion_ppdu_user * user)3219 dp_ppdu_desc_user_deter_stats_update(struct dp_pdev *pdev,
3220 struct dp_peer *peer,
3221 struct cdp_tx_completion_ppdu *ppdu_desc,
3222 struct cdp_tx_completion_ppdu_user *user)
3223 {
3224 }
3225
3226 static inline void
dp_pdev_telemetry_stats_update(struct dp_pdev * pdev,struct cdp_tx_completion_ppdu_user * ppdu)3227 dp_pdev_telemetry_stats_update(
3228 struct dp_pdev *pdev,
3229 struct cdp_tx_completion_ppdu_user *ppdu)
3230 { }
3231
3232 static inline void
dp_pdev_update_deter_stats(struct dp_pdev * pdev,struct cdp_tx_completion_ppdu * ppdu)3233 dp_pdev_update_deter_stats(struct dp_pdev *pdev,
3234 struct cdp_tx_completion_ppdu *ppdu)
3235 { }
3236 #endif
3237
3238 /**
3239 * dp_tx_stats_update() - Update per-peer statistics
3240 * @pdev: Datapath pdev handle
3241 * @peer: Datapath peer handle
3242 * @ppdu: PPDU Descriptor per user
3243 * @ppdu_desc: PPDU Descriptor
3244 *
3245 * Return: None
3246 */
3247 static void
dp_tx_stats_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * ppdu,struct cdp_tx_completion_ppdu * ppdu_desc)3248 dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
3249 struct cdp_tx_completion_ppdu_user *ppdu,
3250 struct cdp_tx_completion_ppdu *ppdu_desc)
3251 {
3252 uint8_t preamble, mcs, res_mcs = 0;
3253 uint16_t num_msdu;
3254 uint16_t num_mpdu;
3255 uint16_t mpdu_tried;
3256 uint16_t mpdu_failed;
3257 struct dp_mon_ops *mon_ops;
3258 enum cdp_ru_index ru_index;
3259 struct dp_mon_peer *mon_peer = NULL;
3260 uint32_t ratekbps = 0;
3261 uint64_t tx_byte_count;
3262 uint8_t idx = 0;
3263 bool is_preamble_valid = true;
3264
3265 preamble = ppdu->preamble;
3266 mcs = ppdu->mcs;
3267 num_msdu = ppdu->num_msdu;
3268 num_mpdu = ppdu->mpdu_success;
3269 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
3270 mpdu_failed = mpdu_tried - num_mpdu;
3271 tx_byte_count = ppdu->success_bytes;
3272
3273 /* If the peer statistics are already processed as part of
3274 * per-MSDU completion handler, do not process these again in per-PPDU
3275 * indications
3276 */
3277 if (pdev->soc->process_tx_status)
3278 return;
3279
3280 mon_peer = peer->monitor_peer;
3281 if (!mon_peer)
3282 return;
3283
3284 if (!ppdu->is_mcast) {
3285 DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu);
3286 DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes,
3287 tx_byte_count);
3288 }
3289
3290 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
3291 /*
3292 * All failed mpdu will be retried, so incrementing
3293 * retries mpdu based on mpdu failed. Even for
3294 * ack failure i.e for long retries we get
3295 * mpdu failed equal mpdu tried.
3296 */
3297 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
3298 dp_pdev_telemetry_stats_update(pdev, ppdu);
3299 return;
3300 }
3301
3302 if (ppdu->is_ppdu_cookie_valid)
3303 DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1);
3304
3305 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
3306 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
3307 if (qdf_unlikely(ppdu->mu_group_id &&
3308 !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
3309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3310 "mu_group_id out of bound!!\n");
3311 else
3312 DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id],
3313 (ppdu->user_pos + 1));
3314 }
3315
3316 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
3317 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
3318 DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones);
3319 DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start);
3320 ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones);
3321 if (ru_index != RU_INDEX_MAX) {
3322 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu,
3323 num_msdu);
3324 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu,
3325 num_mpdu);
3326 DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried,
3327 mpdu_tried);
3328 }
3329 }
3330
3331 /*
3332 * All failed mpdu will be retried, so incrementing
3333 * retries mpdu based on mpdu failed. Even for
3334 * ack failure i.e for long retries we get
3335 * mpdu failed equal mpdu tried.
3336 */
3337 DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
3338
3339 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
3340 num_msdu);
3341 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
3342 num_mpdu);
3343 DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
3344 mpdu_tried);
3345
3346 DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
3347 DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
3348 DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
3349 if (ppdu->tid < CDP_DATA_TID_MAX) {
3350 DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
3351 num_msdu);
3352 DP_STATS_INC(mon_peer,
3353 tx.wme_ac_type_bytes[TID_TO_WME_AC(ppdu->tid)],
3354 tx_byte_count);
3355 }
3356
3357 DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
3358 DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
3359 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
3360 DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ppdu_desc->ack_rssi);
3361
3362 if (!ppdu->is_mcast) {
3363 DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu);
3364 DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes,
3365 tx_byte_count);
3366 }
3367
3368 switch (preamble) {
3369 case DOT11_A:
3370 res_mcs = (mcs < MAX_MCS_11A) ? mcs : (MAX_MCS - 1);
3371 break;
3372 case DOT11_B:
3373 res_mcs = (mcs < MAX_MCS_11B) ? mcs : (MAX_MCS - 1);
3374 break;
3375 case DOT11_N:
3376 res_mcs = (mcs < MAX_MCS_11N) ? mcs : (MAX_MCS - 1);
3377 break;
3378 case DOT11_AC:
3379 res_mcs = (mcs < MAX_MCS_11AC) ? mcs : (MAX_MCS - 1);
3380 break;
3381 case DOT11_AX:
3382 res_mcs = (mcs < MAX_MCS_11AX) ? mcs : (MAX_MCS - 1);
3383 break;
3384 default:
3385 is_preamble_valid = false;
3386 }
3387
3388 DP_STATS_INCC(mon_peer,
3389 tx.pkt_type[preamble].mcs_count[res_mcs], num_msdu,
3390 is_preamble_valid);
3391 DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu);
3392 DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu));
3393 DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
3394 DP_STATS_INC(mon_peer, tx.tx_ppdus, 1);
3395 DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu);
3396 DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried);
3397
3398 for (idx = 0; idx < CDP_RSSI_CHAIN_LEN; idx++)
3399 DP_STATS_UPD(mon_peer, tx.rssi_chain[idx], ppdu->rssi_chain[idx]);
3400
3401 mon_ops = dp_mon_ops_get(pdev->soc);
3402 if (mon_ops && mon_ops->mon_tx_stats_update)
3403 mon_ops->mon_tx_stats_update(mon_peer, ppdu);
3404
3405 if (!ppdu->fixed_rate_used)
3406 dp_tx_rate_stats_update(peer, ppdu);
3407
3408 dp_pdev_telemetry_stats_update(pdev, ppdu);
3409
3410 dp_peer_stats_notify(pdev, peer);
3411
3412 ratekbps = mon_peer->stats.tx.tx_rate;
3413 DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps);
3414
3415 dp_send_stats_event(pdev, peer, ppdu->peer_id);
3416 }
3417
3418 /**
3419 * dp_get_ppdu_info_user_index() - Find and allocate a per-user
3420 * descriptor for a PPDU, if a new peer id arrives in a PPDU
3421 * @pdev: DP pdev handle
3422 * @peer_id: peer unique identifier
3423 * @ppdu_info: per ppdu tlv structure
3424 *
3425 * Return: user index to be populated
3426 */
dp_get_ppdu_info_user_index(struct dp_pdev * pdev,uint16_t peer_id,struct ppdu_info * ppdu_info)3427 static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
3428 uint16_t peer_id,
3429 struct ppdu_info *ppdu_info)
3430 {
3431 uint8_t user_index = 0;
3432 struct cdp_tx_completion_ppdu *ppdu_desc;
3433 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3434
3435 ppdu_desc =
3436 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3437
3438 while ((user_index + 1) <= ppdu_info->last_user) {
3439 ppdu_user_desc = &ppdu_desc->user[user_index];
3440 if (ppdu_user_desc->peer_id != peer_id) {
3441 user_index++;
3442 continue;
3443 } else {
3444 /* Max users possible is 8 so user array index should
3445 * not exceed 7
3446 */
3447 qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
3448 return user_index;
3449 }
3450 }
3451
3452 ppdu_info->last_user++;
3453 /* Max users possible is 8 so last user should not exceed 8 */
3454 qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
3455 return ppdu_info->last_user - 1;
3456 }
3457
3458 /**
3459 * dp_process_ppdu_stats_common_tlv() - Process htt_ppdu_stats_common_tlv
3460 * @pdev: DP pdev handle
3461 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
3462 * @ppdu_info: per ppdu tlv structure
3463 *
3464 * Return: void
3465 */
3466 static void
dp_process_ppdu_stats_common_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)3467 dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
3468 uint32_t *tag_buf,
3469 struct ppdu_info *ppdu_info)
3470 {
3471 uint16_t frame_type;
3472 uint16_t frame_ctrl;
3473 uint16_t freq;
3474 struct dp_soc *soc = NULL;
3475 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3476 uint64_t ppdu_start_timestamp;
3477 uint32_t eval_start_timestamp;
3478 uint32_t *start_tag_buf;
3479 uint32_t *ts_tag_buf;
3480
3481 start_tag_buf = tag_buf;
3482 ppdu_desc =
3483 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3484
3485 ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3486
3487 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
3488 ppdu_info->sched_cmdid =
3489 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
3490 ppdu_desc->num_users =
3491 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
3492
3493 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
3494
3495 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
3496 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
3497 ppdu_desc->htt_frame_type = frame_type;
3498
3499 ppdu_desc->htt_seq_type =
3500 HTT_PPDU_STATS_COMMON_TLV_PPDU_SEQ_TYPE_GET(*tag_buf);
3501
3502 frame_ctrl = ppdu_desc->frame_ctrl;
3503
3504 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
3505
3506 switch (frame_type) {
3507 case HTT_STATS_FTYPE_TIDQ_DATA_SU:
3508 case HTT_STATS_FTYPE_TIDQ_DATA_MU:
3509 case HTT_STATS_FTYPE_SGEN_QOS_NULL:
3510 /*
3511 * for management packet, frame type come as DATA_SU
3512 * need to check frame_ctrl before setting frame_type
3513 */
3514 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
3515 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
3516 else
3517 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
3518 break;
3519 case HTT_STATS_FTYPE_SGEN_MU_BAR:
3520 case HTT_STATS_FTYPE_SGEN_BAR:
3521 case HTT_STATS_FTYPE_SGEN_BE_MU_BAR:
3522 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
3523 break;
3524 default:
3525 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
3526 break;
3527 }
3528
3529 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
3530 ppdu_desc->tx_duration = *tag_buf;
3531
3532 tag_buf = start_tag_buf +
3533 HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US);
3534 eval_start_timestamp = *tag_buf;
3535
3536 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
3537 ppdu_desc->ppdu_start_timestamp = *tag_buf;
3538
3539 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
3540 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
3541 if (freq != ppdu_desc->channel) {
3542 soc = pdev->soc;
3543 ppdu_desc->channel = freq;
3544 pdev->operating_channel.freq = freq;
3545 if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
3546 pdev->operating_channel.num =
3547 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
3548 pdev->pdev_id,
3549 freq);
3550
3551 if (soc && soc->cdp_soc.ol_ops->freq_to_band)
3552 pdev->operating_channel.band =
3553 soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
3554 pdev->pdev_id,
3555 freq);
3556 }
3557
3558 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
3559
3560 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
3561 ppdu_desc->phy_ppdu_tx_time_us =
3562 HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
3563 ppdu_desc->beam_change =
3564 HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
3565 ppdu_desc->doppler =
3566 HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
3567 ppdu_desc->spatial_reuse =
3568 HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
3569 ppdu_desc->num_ul_users =
3570 HTT_PPDU_STATS_COMMON_TLV_NUM_UL_EXPECTED_USERS_GET(*tag_buf);
3571
3572 dp_tx_capture_htt_frame_counter(pdev, frame_type);
3573
3574 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
3575 ppdu_start_timestamp = *tag_buf;
3576 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
3577 HTT_SHIFT_UPPER_TIMESTAMP) &
3578 HTT_MASK_UPPER_TIMESTAMP);
3579
3580 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
3581 ppdu_desc->tx_duration;
3582 /* Ack time stamp is same as end time stamp*/
3583 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
3584
3585 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
3586 ppdu_desc->tx_duration;
3587
3588 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
3589 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
3590 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
3591
3592 /* Ack time stamp is same as end time stamp*/
3593 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
3594
3595 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
3596 ppdu_desc->bss_color =
3597 HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
3598
3599 ppdu_desc->backoff_ac_valid =
3600 HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_VALID_GET(*tag_buf);
3601 if (ppdu_desc->backoff_ac_valid) {
3602 ppdu_desc->backoff_ac =
3603 HTT_PPDU_STATS_COMMON_TLV_BACKOFF_AC_GET(*tag_buf);
3604 ts_tag_buf = start_tag_buf +
3605 HTT_GET_STATS_CMN_INDEX(SCH_EVAL_START_TSTMP_L32_US);
3606 eval_start_timestamp = *ts_tag_buf;
3607
3608 ts_tag_buf = start_tag_buf +
3609 HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
3610 ppdu_desc->ch_access_delay =
3611 *ts_tag_buf - eval_start_timestamp;
3612 }
3613 ppdu_desc->num_ul_user_resp_valid =
3614 HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_VALID_GET(*tag_buf);
3615 if (ppdu_desc->num_ul_user_resp_valid)
3616 ppdu_desc->num_ul_user_resp =
3617 HTT_PPDU_STATS_COMMON_TLV_NUM_UL_USER_RESPONSES_GET(*tag_buf);
3618 }
3619
3620 /**
3621 * dp_process_ppdu_stats_user_common_tlv() - Process ppdu_stats_user_common
3622 * @pdev: DP PDEV handle
3623 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
3624 * @ppdu_info: per ppdu tlv structure
3625 *
3626 * Return: void
3627 */
dp_process_ppdu_stats_user_common_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)3628 static void dp_process_ppdu_stats_user_common_tlv(
3629 struct dp_pdev *pdev, uint32_t *tag_buf,
3630 struct ppdu_info *ppdu_info)
3631 {
3632 uint16_t peer_id;
3633 struct cdp_tx_completion_ppdu *ppdu_desc;
3634 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3635 uint8_t curr_user_index = 0;
3636 struct dp_peer *peer;
3637 struct dp_vdev *vdev;
3638 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3639
3640 ppdu_desc =
3641 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3642
3643 tag_buf++;
3644 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
3645
3646 curr_user_index =
3647 dp_get_ppdu_info_user_index(pdev,
3648 peer_id, ppdu_info);
3649 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3650 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3651
3652 ppdu_desc->vdev_id =
3653 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
3654
3655 ppdu_user_desc->peer_id = peer_id;
3656
3657 tag_buf++;
3658
3659 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
3660 ppdu_user_desc->delayed_ba = 1;
3661 ppdu_desc->delayed_ba = 1;
3662 }
3663
3664 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
3665 ppdu_user_desc->is_mcast = true;
3666 ppdu_user_desc->mpdu_tried_mcast =
3667 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
3668 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
3669 } else {
3670 ppdu_user_desc->mpdu_tried_ucast =
3671 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
3672 }
3673
3674 ppdu_user_desc->is_seq_num_valid =
3675 HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
3676 tag_buf++;
3677
3678 ppdu_user_desc->qos_ctrl =
3679 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
3680 ppdu_user_desc->frame_ctrl =
3681 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
3682 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
3683
3684 if (ppdu_user_desc->delayed_ba)
3685 ppdu_user_desc->mpdu_success = 0;
3686
3687 tag_buf += 3;
3688
3689 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
3690 ppdu_user_desc->ppdu_cookie =
3691 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
3692 ppdu_user_desc->is_ppdu_cookie_valid = 1;
3693 }
3694
3695 /* returning earlier causes other feilds unpopulated */
3696 if (peer_id == DP_SCAN_PEER_ID) {
3697 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
3698 DP_MOD_ID_TX_PPDU_STATS);
3699 if (!vdev)
3700 return;
3701 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
3702 QDF_MAC_ADDR_SIZE);
3703 dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
3704 } else {
3705 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
3706 DP_MOD_ID_TX_PPDU_STATS);
3707 if (!peer) {
3708 /*
3709 * fw sends peer_id which is about to removed but
3710 * it was already removed in host.
3711 * eg: for disassoc, fw send ppdu stats
3712 * with peer id equal to previously associated
3713 * peer's peer_id but it was removed
3714 */
3715 vdev = dp_vdev_get_ref_by_id(pdev->soc,
3716 ppdu_desc->vdev_id,
3717 DP_MOD_ID_TX_PPDU_STATS);
3718 if (!vdev)
3719 return;
3720 qdf_mem_copy(ppdu_user_desc->mac_addr,
3721 vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
3722 dp_vdev_unref_delete(pdev->soc, vdev,
3723 DP_MOD_ID_TX_PPDU_STATS);
3724 return;
3725 }
3726 qdf_mem_copy(ppdu_user_desc->mac_addr,
3727 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
3728 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
3729 }
3730
3731 tag_buf += 10;
3732 ppdu_user_desc->msduq_bitmap = *tag_buf;
3733 }
3734
3735 /**
3736 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
3737 * @pdev: DP pdev handle
3738 * @tag_buf: T2H message buffer carrying the user rate TLV
3739 * @ppdu_info: per ppdu tlv structure
3740 *
3741 * Return: void
3742 */
3743 static void
dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)3744 dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
3745 uint32_t *tag_buf,
3746 struct ppdu_info *ppdu_info)
3747 {
3748 uint16_t peer_id;
3749 struct cdp_tx_completion_ppdu *ppdu_desc;
3750 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3751 uint8_t curr_user_index = 0;
3752 struct dp_vdev *vdev;
3753 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3754 uint8_t bw, ru_format;
3755 uint16_t ru_size;
3756 htt_ppdu_stats_user_rate_tlv *stats_buf =
3757 (htt_ppdu_stats_user_rate_tlv *)tag_buf;
3758
3759 ppdu_desc =
3760 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3761
3762 tag_buf++;
3763 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
3764
3765 curr_user_index =
3766 dp_get_ppdu_info_user_index(pdev,
3767 peer_id, ppdu_info);
3768 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3769 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3770 if (peer_id == DP_SCAN_PEER_ID) {
3771 vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
3772 DP_MOD_ID_TX_PPDU_STATS);
3773 if (!vdev)
3774 return;
3775 dp_vdev_unref_delete(pdev->soc, vdev,
3776 DP_MOD_ID_TX_PPDU_STATS);
3777 }
3778 ppdu_user_desc->peer_id = peer_id;
3779
3780 ppdu_user_desc->tid =
3781 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
3782
3783 tag_buf += 1;
3784
3785 ppdu_user_desc->user_pos =
3786 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
3787 ppdu_user_desc->mu_group_id =
3788 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
3789
3790 ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf);
3791
3792 tag_buf += 1;
3793
3794 if (!ru_format) {
3795 /* ru_format = 0: ru_end, ru_start */
3796 ppdu_user_desc->ru_start =
3797 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
3798 ppdu_user_desc->ru_tones =
3799 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
3800 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
3801 } else if (ru_format == 1) {
3802 /* ru_format = 1: ru_index, ru_size */
3803 ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf);
3804 ppdu_user_desc->ru_tones =
3805 dp_mon_get_ru_width_from_ru_size(ru_size);
3806 } else {
3807 dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format);
3808 }
3809 ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
3810
3811 tag_buf += 2;
3812
3813 ppdu_user_desc->ppdu_type =
3814 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
3815
3816 tag_buf++;
3817 ppdu_user_desc->tx_rate = *tag_buf;
3818
3819 ppdu_user_desc->ltf_size =
3820 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
3821 ppdu_user_desc->stbc =
3822 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
3823 ppdu_user_desc->he_re =
3824 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
3825 ppdu_user_desc->txbf =
3826 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
3827 bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
3828 /* Align bw value as per host data structures */
3829 if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ)
3830 ppdu_user_desc->bw = bw - 3;
3831 else
3832 ppdu_user_desc->bw = bw - 2;
3833 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
3834 ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
3835 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
3836 ppdu_user_desc->preamble =
3837 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
3838 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
3839 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
3840 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
3841
3842 tag_buf += 2;
3843 ppdu_user_desc->punc_pattern_bitmap =
3844 HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf);
3845 ppdu_user_desc->fixed_rate_used = stats_buf->is_min_rate;
3846 }
3847
3848 /**
3849 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv() - Process
3850 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
3851 * @pdev: DP PDEV handle
3852 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
3853 * @ppdu_info: per ppdu tlv structure
3854 *
3855 * Return: void
3856 */
dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)3857 static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
3858 struct dp_pdev *pdev, uint32_t *tag_buf,
3859 struct ppdu_info *ppdu_info)
3860 {
3861 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
3862 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
3863
3864 struct cdp_tx_completion_ppdu *ppdu_desc;
3865 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3866 uint8_t curr_user_index = 0;
3867 uint16_t peer_id;
3868 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
3869 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3870
3871 ppdu_desc =
3872 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3873
3874 tag_buf++;
3875
3876 peer_id =
3877 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
3878
3879 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3880 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3881 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3882 ppdu_user_desc->peer_id = peer_id;
3883
3884 ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
3885 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
3886 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
3887
3888 dp_process_ppdu_stats_update_failed_bitmap(pdev,
3889 (void *)ppdu_user_desc,
3890 ppdu_info->ppdu_id,
3891 size);
3892 }
3893
3894 /**
3895 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv() - Process
3896 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
3897 * @pdev: DP PDEV handle
3898 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
3899 * @ppdu_info: per ppdu tlv structure
3900 *
3901 * Return: void
3902 */
dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)3903 static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
3904 struct dp_pdev *pdev, uint32_t *tag_buf,
3905 struct ppdu_info *ppdu_info)
3906 {
3907 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
3908 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
3909
3910 struct cdp_tx_completion_ppdu *ppdu_desc;
3911 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3912 uint8_t curr_user_index = 0;
3913 uint16_t peer_id;
3914 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
3915 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3916
3917 ppdu_desc =
3918 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3919
3920 tag_buf++;
3921
3922 peer_id =
3923 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
3924
3925 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3926 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3927 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3928 ppdu_user_desc->peer_id = peer_id;
3929
3930 ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
3931 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
3932 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
3933
3934 dp_process_ppdu_stats_update_failed_bitmap(pdev,
3935 (void *)ppdu_user_desc,
3936 ppdu_info->ppdu_id,
3937 size);
3938 }
3939
3940 /**
3941 * dp_process_ppdu_stats_user_cmpltn_common_tlv() - Process
3942 * htt_ppdu_stats_user_cmpltn_common_tlv
3943 * @pdev: DP PDEV handle
3944 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
3945 * @ppdu_info: per ppdu tlv structure
3946 *
3947 * Return: void
3948 */
dp_process_ppdu_stats_user_cmpltn_common_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)3949 static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
3950 struct dp_pdev *pdev, uint32_t *tag_buf,
3951 struct ppdu_info *ppdu_info)
3952 {
3953 uint16_t peer_id;
3954 struct cdp_tx_completion_ppdu *ppdu_desc;
3955 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
3956 uint8_t curr_user_index = 0;
3957 uint8_t bw_iter;
3958 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
3959 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
3960 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
3961
3962 ppdu_desc =
3963 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
3964
3965 tag_buf++;
3966 peer_id =
3967 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
3968
3969 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
3970 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
3971 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
3972 ppdu_user_desc->peer_id = peer_id;
3973
3974 ppdu_user_desc->completion_status =
3975 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
3976 *tag_buf);
3977
3978 ppdu_user_desc->tid =
3979 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
3980
3981 tag_buf++;
3982 if (qdf_likely(ppdu_user_desc->completion_status ==
3983 HTT_PPDU_STATS_USER_STATUS_OK)) {
3984 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
3985 ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
3986 ppdu_user_desc->ack_rssi_valid = 1;
3987 } else {
3988 ppdu_user_desc->ack_rssi_valid = 0;
3989 }
3990
3991 tag_buf++;
3992
3993 ppdu_user_desc->mpdu_success =
3994 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
3995
3996 ppdu_user_desc->mpdu_failed =
3997 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
3998 ppdu_user_desc->mpdu_success;
3999
4000 tag_buf++;
4001
4002 ppdu_user_desc->long_retries =
4003 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
4004
4005 ppdu_user_desc->short_retries =
4006 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
4007 ppdu_user_desc->retry_mpdus =
4008 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
4009
4010 ppdu_user_desc->is_ampdu =
4011 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
4012 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
4013
4014 ppdu_desc->resp_type =
4015 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
4016 ppdu_desc->mprot_type =
4017 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
4018 ppdu_desc->rts_success =
4019 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
4020 ppdu_desc->rts_failure =
4021 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
4022
4023 ppdu_user_desc->mprot_type = ppdu_desc->mprot_type;
4024 ppdu_user_desc->rts_success = ppdu_desc->rts_success;
4025 ppdu_user_desc->rts_failure = ppdu_desc->rts_failure;
4026
4027 ppdu_user_desc->pream_punct =
4028 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
4029
4030 ppdu_info->compltn_common_tlv++;
4031
4032 /*
4033 * MU BAR may send request to n users but we may received ack only from
4034 * m users. To have count of number of users respond back, we have a
4035 * separate counter bar_num_users per PPDU that get increment for every
4036 * htt_ppdu_stats_user_cmpltn_common_tlv
4037 */
4038 ppdu_desc->bar_num_users++;
4039
4040 tag_buf++;
4041 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
4042 ppdu_user_desc->rssi_chain[bw_iter] =
4043 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
4044 tag_buf++;
4045 }
4046
4047 ppdu_user_desc->sa_tx_antenna =
4048 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
4049
4050 tag_buf++;
4051 ppdu_user_desc->sa_is_training =
4052 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
4053 if (ppdu_user_desc->sa_is_training) {
4054 ppdu_user_desc->sa_goodput =
4055 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
4056 }
4057
4058 tag_buf++;
4059 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
4060 ppdu_user_desc->sa_max_rates[bw_iter] =
4061 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
4062 }
4063
4064 tag_buf += CDP_NUM_SA_BW;
4065 ppdu_user_desc->current_rate_per =
4066 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
4067
4068 tag_buf++;
4069 /* Skip SW RTS */
4070
4071 tag_buf++;
4072 /* Extract 320MHz MAX PHY ratecode */
4073 ppdu_user_desc->sa_max_rates[CDP_SA_BW320_INX] =
4074 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(*tag_buf);
4075 }
4076
4077 /**
4078 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv() - Process
4079 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
4080 * @pdev: DP PDEV handle
4081 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
4082 * @ppdu_info: per ppdu tlv structure
4083 *
4084 * Return: void
4085 */
dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)4086 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
4087 struct dp_pdev *pdev, uint32_t *tag_buf,
4088 struct ppdu_info *ppdu_info)
4089 {
4090 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
4091 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
4092 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4093 struct cdp_tx_completion_ppdu *ppdu_desc;
4094 uint8_t curr_user_index = 0;
4095 uint16_t peer_id;
4096 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4097
4098 ppdu_desc =
4099 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4100
4101 tag_buf++;
4102
4103 peer_id =
4104 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
4105
4106 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4107 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4108 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4109 ppdu_user_desc->peer_id = peer_id;
4110
4111 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
4112 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
4113 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
4114 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
4115 }
4116
4117 /**
4118 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv() - Process
4119 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
4120 * @pdev: DP PDEV handle
4121 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
4122 * @ppdu_info: per ppdu tlv structure
4123 *
4124 * Return: void
4125 */
dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)4126 static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
4127 struct dp_pdev *pdev, uint32_t *tag_buf,
4128 struct ppdu_info *ppdu_info)
4129 {
4130 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
4131 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
4132 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4133 struct cdp_tx_completion_ppdu *ppdu_desc;
4134 uint8_t curr_user_index = 0;
4135 uint16_t peer_id;
4136 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4137
4138 ppdu_desc =
4139 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4140
4141 tag_buf++;
4142
4143 peer_id =
4144 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
4145
4146 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4147 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4148 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4149 ppdu_user_desc->peer_id = peer_id;
4150
4151 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
4152 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
4153 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
4154 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
4155 }
4156
4157 /**
4158 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv() - Process
4159 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4160 * @pdev: DP PDEV handle
4161 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4162 * @ppdu_info: per ppdu tlv structure
4163 *
4164 * Return: void
4165 */
dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)4166 static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
4167 struct dp_pdev *pdev, uint32_t *tag_buf,
4168 struct ppdu_info *ppdu_info)
4169 {
4170 uint16_t peer_id;
4171 struct cdp_tx_completion_ppdu *ppdu_desc;
4172 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4173 uint8_t curr_user_index = 0;
4174 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4175
4176 ppdu_desc =
4177 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4178
4179 tag_buf += 2;
4180 peer_id =
4181 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
4182
4183 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4184 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4185 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4186 if (!ppdu_user_desc->ack_ba_tlv) {
4187 ppdu_user_desc->ack_ba_tlv = 1;
4188 } else {
4189 pdev->stats.ack_ba_comes_twice++;
4190 return;
4191 }
4192
4193 ppdu_user_desc->peer_id = peer_id;
4194
4195 tag_buf++;
4196 /* not to update ppdu_desc->tid from this TLV */
4197 ppdu_user_desc->num_mpdu =
4198 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
4199
4200 ppdu_user_desc->num_msdu =
4201 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
4202
4203 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
4204
4205 tag_buf++;
4206 ppdu_user_desc->start_seq =
4207 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
4208 *tag_buf);
4209
4210 tag_buf++;
4211 ppdu_user_desc->success_bytes = *tag_buf;
4212
4213 /* increase ack ba tlv counter on successful mpdu */
4214 if (ppdu_user_desc->num_mpdu)
4215 ppdu_info->ack_ba_tlv++;
4216
4217 if (ppdu_user_desc->ba_size == 0) {
4218 ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
4219 ppdu_user_desc->ba_bitmap[0] = 1;
4220 ppdu_user_desc->ba_size = 1;
4221 }
4222 }
4223
4224 /**
4225 * dp_process_ppdu_stats_user_common_array_tlv() - Process
4226 * htt_ppdu_stats_user_common_array_tlv
4227 * @pdev: DP PDEV handle
4228 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
4229 * @ppdu_info: per ppdu tlv structure
4230 *
4231 * Return: void
4232 */
dp_process_ppdu_stats_user_common_array_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)4233 static void dp_process_ppdu_stats_user_common_array_tlv(
4234 struct dp_pdev *pdev, uint32_t *tag_buf,
4235 struct ppdu_info *ppdu_info)
4236 {
4237 uint32_t peer_id;
4238 struct cdp_tx_completion_ppdu *ppdu_desc;
4239 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
4240 uint8_t curr_user_index = 0;
4241 struct htt_tx_ppdu_stats_info *dp_stats_buf;
4242 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4243
4244 ppdu_desc =
4245 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
4246
4247 tag_buf++;
4248 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
4249 tag_buf += 3;
4250 peer_id =
4251 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
4252
4253 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
4254 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4255 "Peer with peer_id: %u not found", peer_id);
4256 return;
4257 }
4258
4259 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
4260
4261 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
4262 ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
4263
4264 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
4265 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
4266
4267 tag_buf++;
4268
4269 ppdu_user_desc->success_msdus =
4270 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
4271 ppdu_user_desc->retry_msdus =
4272 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
4273 tag_buf++;
4274 ppdu_user_desc->failed_msdus =
4275 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
4276 }
4277
4278 /**
4279 * dp_process_ppdu_stats_user_compltn_flush_tlv() - Process
4280 * htt_ppdu_stats_flush_tlv
4281 * @pdev: DP PDEV handle
4282 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
4283 * @ppdu_info: per ppdu tlv structure
4284 *
4285 * Return: void
4286 */
4287 static void
dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,struct ppdu_info * ppdu_info)4288 dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
4289 uint32_t *tag_buf,
4290 struct ppdu_info *ppdu_info)
4291 {
4292 struct cdp_tx_completion_ppdu *ppdu_desc;
4293 uint32_t peer_id;
4294 uint8_t tid;
4295 struct dp_peer *peer;
4296 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4297 struct dp_mon_peer *mon_peer = NULL;
4298
4299 ppdu_desc = (struct cdp_tx_completion_ppdu *)
4300 qdf_nbuf_data(ppdu_info->nbuf);
4301 ppdu_desc->is_flush = 1;
4302
4303 tag_buf++;
4304 ppdu_desc->drop_reason = *tag_buf;
4305
4306 tag_buf++;
4307 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
4308 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
4309 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
4310
4311 tag_buf++;
4312 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
4313 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
4314
4315 ppdu_desc->num_users = 1;
4316 ppdu_desc->user[0].peer_id = peer_id;
4317 ppdu_desc->user[0].tid = tid;
4318
4319 ppdu_desc->queue_type =
4320 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
4321
4322 peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
4323 DP_MOD_ID_TX_PPDU_STATS);
4324 if (!peer)
4325 goto add_ppdu_to_sched_list;
4326
4327 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
4328 mon_peer = peer->monitor_peer;
4329 DP_STATS_INC(mon_peer,
4330 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
4331 ppdu_desc->num_msdu);
4332 }
4333
4334 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4335
4336 add_ppdu_to_sched_list:
4337 ppdu_info->done = 1;
4338 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
4339 mon_pdev->list_depth--;
4340 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
4341 ppdu_info_list_elem);
4342 mon_pdev->sched_comp_list_depth++;
4343 }
4344
4345 /**
4346 * dp_process_ppdu_stats_sch_cmd_status_tlv() - Process schedule command status tlv
4347 * Here we are not going to process the buffer.
4348 * @pdev: DP PDEV handle
4349 * @ppdu_info: per ppdu tlv structure
4350 *
4351 * Return: void
4352 */
4353 static void
dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev * pdev,struct ppdu_info * ppdu_info)4354 dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
4355 struct ppdu_info *ppdu_info)
4356 {
4357 struct cdp_tx_completion_ppdu *ppdu_desc;
4358 struct dp_peer *peer;
4359 uint8_t num_users;
4360 uint8_t i;
4361 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4362
4363 ppdu_desc = (struct cdp_tx_completion_ppdu *)
4364 qdf_nbuf_data(ppdu_info->nbuf);
4365
4366 num_users = ppdu_desc->bar_num_users;
4367
4368 for (i = 0; i < num_users; i++) {
4369 if (ppdu_desc->user[i].user_pos == 0) {
4370 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4371 /* update phy mode for bar frame */
4372 ppdu_desc->phy_mode =
4373 ppdu_desc->user[i].preamble;
4374 ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
4375 break;
4376 }
4377 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
4378 ppdu_desc->frame_ctrl =
4379 ppdu_desc->user[i].frame_ctrl;
4380 break;
4381 }
4382 }
4383 }
4384
4385 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
4386 ppdu_desc->delayed_ba) {
4387 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
4388
4389 for (i = 0; i < ppdu_desc->num_users; i++) {
4390 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
4391 uint64_t start_tsf;
4392 uint64_t end_tsf;
4393 uint32_t ppdu_id;
4394 struct dp_mon_peer *mon_peer;
4395
4396 ppdu_id = ppdu_desc->ppdu_id;
4397 peer = dp_peer_get_ref_by_id
4398 (pdev->soc, ppdu_desc->user[i].peer_id,
4399 DP_MOD_ID_TX_PPDU_STATS);
4400 /*
4401 * This check is to make sure peer is not deleted
4402 * after processing the TLVs.
4403 */
4404 if (!peer)
4405 continue;
4406
4407 if (!peer->monitor_peer) {
4408 dp_peer_unref_delete(peer,
4409 DP_MOD_ID_TX_PPDU_STATS);
4410 continue;
4411 }
4412
4413 mon_peer = peer->monitor_peer;
4414 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
4415 start_tsf = ppdu_desc->ppdu_start_timestamp;
4416 end_tsf = ppdu_desc->ppdu_end_timestamp;
4417 /*
4418 * save delayed ba user info
4419 */
4420 if (ppdu_desc->user[i].delayed_ba) {
4421 dp_peer_copy_delay_stats(peer,
4422 &ppdu_desc->user[i],
4423 ppdu_id);
4424 mon_peer->last_delayed_ba_ppduid = ppdu_id;
4425 delay_ppdu->ppdu_start_timestamp = start_tsf;
4426 delay_ppdu->ppdu_end_timestamp = end_tsf;
4427 }
4428 ppdu_desc->user[i].peer_last_delayed_ba =
4429 mon_peer->last_delayed_ba;
4430
4431 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4432
4433 if (ppdu_desc->user[i].delayed_ba &&
4434 !ppdu_desc->user[i].debug_copied) {
4435 QDF_TRACE(QDF_MODULE_ID_TXRX,
4436 QDF_TRACE_LEVEL_INFO_MED,
4437 "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
4438 __func__, __LINE__,
4439 ppdu_desc->ppdu_id,
4440 ppdu_desc->bar_ppdu_id,
4441 ppdu_desc->num_users,
4442 i,
4443 ppdu_desc->htt_frame_type);
4444 }
4445 }
4446 }
4447
4448 /*
4449 * when frame type is BAR and STATS_COMMON_TLV is set
4450 * copy the store peer delayed info to BAR status
4451 */
4452 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4453 for (i = 0; i < ppdu_desc->bar_num_users; i++) {
4454 struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
4455 uint64_t start_tsf;
4456 uint64_t end_tsf;
4457 struct dp_mon_peer *mon_peer;
4458
4459 peer = dp_peer_get_ref_by_id
4460 (pdev->soc,
4461 ppdu_desc->user[i].peer_id,
4462 DP_MOD_ID_TX_PPDU_STATS);
4463 /*
4464 * This check is to make sure peer is not deleted
4465 * after processing the TLVs.
4466 */
4467 if (!peer)
4468 continue;
4469
4470 if (!peer->monitor_peer) {
4471 dp_peer_unref_delete(peer,
4472 DP_MOD_ID_TX_PPDU_STATS);
4473 continue;
4474 }
4475
4476 mon_peer = peer->monitor_peer;
4477 if (ppdu_desc->user[i].completion_status !=
4478 HTT_PPDU_STATS_USER_STATUS_OK) {
4479 dp_peer_unref_delete(peer,
4480 DP_MOD_ID_TX_PPDU_STATS);
4481 continue;
4482 }
4483
4484 delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
4485 start_tsf = delay_ppdu->ppdu_start_timestamp;
4486 end_tsf = delay_ppdu->ppdu_end_timestamp;
4487
4488 if (mon_peer->last_delayed_ba) {
4489 dp_peer_copy_stats_to_bar(peer,
4490 &ppdu_desc->user[i]);
4491 ppdu_desc->ppdu_id =
4492 mon_peer->last_delayed_ba_ppduid;
4493 ppdu_desc->ppdu_start_timestamp = start_tsf;
4494 ppdu_desc->ppdu_end_timestamp = end_tsf;
4495 }
4496 ppdu_desc->user[i].peer_last_delayed_ba =
4497 mon_peer->last_delayed_ba;
4498 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4499 }
4500 }
4501
4502 TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
4503 mon_pdev->list_depth--;
4504 TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
4505 ppdu_info_list_elem);
4506 mon_pdev->sched_comp_list_depth++;
4507 }
4508
4509 /**
4510 * dp_validate_fix_ppdu_tlv() - Function to validate the length of PPDU
4511 * @pdev: DP pdev handle
4512 * @tag_buf: TLV buffer
4513 * @tlv_expected_size: Expected size of Tag
4514 * @tlv_len: TLV length received from FW
4515 *
4516 * If the TLV length sent as part of PPDU TLV is less that expected size i.e
4517 * size of corresponding data structure, pad the remaining bytes with zeros
4518 * and continue processing the TLVs
4519 *
4520 * Return: Pointer to updated TLV
4521 */
dp_validate_fix_ppdu_tlv(struct dp_pdev * pdev,uint32_t * tag_buf,uint16_t tlv_expected_size,uint16_t tlv_len)4522 static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
4523 uint32_t *tag_buf,
4524 uint16_t tlv_expected_size,
4525 uint16_t tlv_len)
4526 {
4527 uint32_t *tlv_desc = tag_buf;
4528 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4529
4530 qdf_assert_always(tlv_len != 0);
4531
4532 if (tlv_len < tlv_expected_size) {
4533 qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
4534 qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
4535 tlv_desc = mon_pdev->ppdu_tlv_buf;
4536 }
4537
4538 return tlv_desc;
4539 }
4540
4541 /**
4542 * dp_process_ppdu_tag() - Function to process the PPDU TLVs
4543 * @pdev: DP pdev handle
4544 * @tag_buf: TLV buffer
4545 * @tlv_len: length of tlv
4546 * @ppdu_info: per ppdu tlv structure
4547 *
4548 * Return: void
4549 */
dp_process_ppdu_tag(struct dp_pdev * pdev,uint32_t * tag_buf,uint32_t tlv_len,struct ppdu_info * ppdu_info)4550 static void dp_process_ppdu_tag(struct dp_pdev *pdev,
4551 uint32_t *tag_buf,
4552 uint32_t tlv_len,
4553 struct ppdu_info *ppdu_info)
4554 {
4555 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
4556 uint16_t tlv_expected_size;
4557 uint32_t *tlv_desc;
4558
4559 switch (tlv_type) {
4560 case HTT_PPDU_STATS_COMMON_TLV:
4561 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
4562 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4563 tlv_expected_size, tlv_len);
4564 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
4565 break;
4566 case HTT_PPDU_STATS_USR_COMMON_TLV:
4567 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
4568 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4569 tlv_expected_size, tlv_len);
4570 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
4571 ppdu_info);
4572 break;
4573 case HTT_PPDU_STATS_USR_RATE_TLV:
4574 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
4575 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4576 tlv_expected_size, tlv_len);
4577 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
4578 ppdu_info);
4579 break;
4580 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
4581 tlv_expected_size =
4582 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
4583 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4584 tlv_expected_size, tlv_len);
4585 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
4586 pdev, tlv_desc, ppdu_info);
4587 break;
4588 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
4589 tlv_expected_size =
4590 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
4591 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4592 tlv_expected_size, tlv_len);
4593 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
4594 pdev, tlv_desc, ppdu_info);
4595 break;
4596 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
4597 tlv_expected_size =
4598 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
4599 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4600 tlv_expected_size, tlv_len);
4601 dp_process_ppdu_stats_user_cmpltn_common_tlv(
4602 pdev, tlv_desc, ppdu_info);
4603 break;
4604 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
4605 tlv_expected_size =
4606 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
4607 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4608 tlv_expected_size, tlv_len);
4609 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
4610 pdev, tlv_desc, ppdu_info);
4611 break;
4612 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
4613 tlv_expected_size =
4614 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
4615 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4616 tlv_expected_size, tlv_len);
4617 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
4618 pdev, tlv_desc, ppdu_info);
4619 break;
4620 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
4621 tlv_expected_size =
4622 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
4623 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4624 tlv_expected_size, tlv_len);
4625 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
4626 pdev, tlv_desc, ppdu_info);
4627 break;
4628 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
4629 tlv_expected_size =
4630 sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
4631 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4632 tlv_expected_size, tlv_len);
4633 dp_process_ppdu_stats_user_common_array_tlv(
4634 pdev, tlv_desc, ppdu_info);
4635 break;
4636 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
4637 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
4638 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
4639 tlv_expected_size, tlv_len);
4640 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
4641 ppdu_info);
4642 break;
4643 case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
4644 dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
4645 break;
4646 default:
4647 break;
4648 }
4649 }
4650
4651 #ifdef WLAN_CONFIG_TELEMETRY_AGENT
4652 static inline
dp_ppdu_desc_user_airtime_consumption_update(struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * user)4653 void dp_ppdu_desc_user_airtime_consumption_update(
4654 struct dp_peer *peer,
4655 struct cdp_tx_completion_ppdu_user *user)
4656 {
4657 struct dp_mon_peer *mon_peer = NULL;
4658 uint8_t ac = 0;
4659
4660 mon_peer = peer->monitor_peer;
4661 if (qdf_unlikely(!mon_peer))
4662 return;
4663
4664 ac = TID_TO_WME_AC(user->tid);
4665 DP_STATS_INC(mon_peer, airtime_stats.tx_airtime_consumption[ac].consumption,
4666 user->phy_tx_time_us);
4667 }
4668 #else
4669 static inline
dp_ppdu_desc_user_airtime_consumption_update(struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * user)4670 void dp_ppdu_desc_user_airtime_consumption_update(
4671 struct dp_peer *peer,
4672 struct cdp_tx_completion_ppdu_user *user)
4673 { }
4674 #endif
4675
4676 #if defined(WLAN_ATF_ENABLE) || defined(WLAN_CONFIG_TELEMETRY_AGENT)
4677 static void
dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu * ppdu_desc,struct cdp_tx_completion_ppdu_user * user)4678 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
4679 struct dp_peer *peer,
4680 struct cdp_tx_completion_ppdu *ppdu_desc,
4681 struct cdp_tx_completion_ppdu_user *user)
4682 {
4683 uint32_t nss_ru_width_sum = 0;
4684 struct dp_mon_peer *mon_peer = NULL;
4685
4686 if (!pdev || !ppdu_desc || !user || !peer)
4687 return;
4688
4689 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
4690 return;
4691
4692 mon_peer = peer->monitor_peer;
4693 if (qdf_unlikely(!mon_peer))
4694 return;
4695
4696 nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
4697 if (!nss_ru_width_sum)
4698 nss_ru_width_sum = 1;
4699
4700 /*
4701 * For SU-MIMO PPDU phy Tx time is same for the single user.
4702 * For MU-MIMO phy Tx time is calculated per user as below
4703 * user phy tx time =
4704 * Entire PPDU duration * MU Ratio * OFDMA Ratio
4705 * MU Ratio = usr_nss / Sum_of_nss_of_all_users
4706 * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
4707 * usr_ru_widt = ru_end – ru_start + 1
4708 */
4709 if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
4710 user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
4711 } else {
4712 user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
4713 user->nss * user->ru_tones) / nss_ru_width_sum;
4714 }
4715
4716 dp_ppdu_desc_user_airtime_consumption_update(peer, user);
4717 }
4718 #else
4719 static void
dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu * ppdu_desc,struct cdp_tx_completion_ppdu_user * user)4720 dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
4721 struct dp_peer *peer,
4722 struct cdp_tx_completion_ppdu *ppdu_desc,
4723 struct cdp_tx_completion_ppdu_user *user)
4724 {
4725 }
4726 #endif
4727
4728 #ifdef WLAN_SUPPORT_CTRL_FRAME_STATS
4729 static void
dp_tx_ctrl_stats_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * user)4730 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
4731 struct cdp_tx_completion_ppdu_user *user)
4732 {
4733 struct dp_mon_peer *mon_peer = NULL;
4734 uint16_t fc = 0;
4735
4736 if (!pdev || !peer || !user)
4737 return;
4738
4739 mon_peer = peer->monitor_peer;
4740 if (qdf_unlikely(!mon_peer))
4741 return;
4742
4743 if (user->mprot_type) {
4744 DP_STATS_INCC(mon_peer,
4745 tx.rts_success, 1, user->rts_success);
4746 DP_STATS_INCC(mon_peer,
4747 tx.rts_failure, 1, user->rts_failure);
4748 }
4749 fc = user->frame_ctrl;
4750 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_TYPE_MASK) ==
4751 QDF_IEEE80211_FC0_TYPE_CTL) {
4752 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
4753 QDF_IEEE80211_FC0_SUBTYPE_VHT_NDP_AN)
4754 DP_STATS_INC(mon_peer, tx.ndpa_cnt, 1);
4755 if ((qdf_cpu_to_le16(fc) & QDF_IEEE80211_FC0_SUBTYPE_MASK) ==
4756 QDF_IEEE80211_FC0_SUBTYPE_BAR)
4757 DP_STATS_INC(mon_peer, tx.bar_cnt, 1);
4758 }
4759 }
4760 #else
4761 static void
dp_tx_ctrl_stats_update(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_tx_completion_ppdu_user * user)4762 dp_tx_ctrl_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
4763 struct cdp_tx_completion_ppdu_user *user)
4764 {
4765 }
4766 #endif /* WLAN_SUPPORT_CTRL_FRAME_STATS */
4767
4768 void
dp_ppdu_desc_user_stats_update(struct dp_pdev * pdev,struct ppdu_info * ppdu_info)4769 dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
4770 struct ppdu_info *ppdu_info)
4771 {
4772 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4773 struct dp_peer *peer = NULL;
4774 uint32_t tlv_bitmap_expected;
4775 uint32_t tlv_bitmap_default;
4776 uint16_t i;
4777 uint32_t num_users;
4778 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4779
4780 ppdu_desc = (struct cdp_tx_completion_ppdu *)
4781 qdf_nbuf_data(ppdu_info->nbuf);
4782
4783 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
4784 ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
4785
4786 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
4787 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
4788 mon_pdev->tx_capture_enabled) {
4789 if (ppdu_info->is_ampdu)
4790 tlv_bitmap_expected =
4791 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
4792 ppdu_info->tlv_bitmap);
4793 }
4794
4795 tlv_bitmap_default = tlv_bitmap_expected;
4796
4797 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
4798 num_users = ppdu_desc->bar_num_users;
4799 ppdu_desc->num_users = ppdu_desc->bar_num_users;
4800 } else {
4801 num_users = ppdu_desc->num_users;
4802 }
4803 qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
4804
4805 if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) {
4806 dp_ppdu_desc_get_txmode(ppdu_desc);
4807 dp_pdev_update_deter_stats(pdev, ppdu_desc);
4808 }
4809
4810 for (i = 0; i < num_users; i++) {
4811 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
4812 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
4813
4814 peer = dp_peer_get_ref_by_id(pdev->soc,
4815 ppdu_desc->user[i].peer_id,
4816 DP_MOD_ID_TX_PPDU_STATS);
4817 /*
4818 * This check is to make sure peer is not deleted
4819 * after processing the TLVs.
4820 */
4821 if (!peer)
4822 continue;
4823
4824 ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
4825
4826 dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc,
4827 &ppdu_desc->user[i]);
4828
4829 dp_tx_ctrl_stats_update(pdev, peer, &ppdu_desc->user[i]);
4830
4831 if (wlan_cfg_get_sawf_stats_config(pdev->soc->wlan_cfg_ctx)) {
4832 dp_ppdu_desc_user_deter_stats_update(pdev,
4833 peer,
4834 ppdu_desc,
4835 &ppdu_desc->user[i]);
4836 }
4837
4838 /*
4839 * different frame like DATA, BAR or CTRL has different
4840 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
4841 * receive other tlv in-order/sequential from fw.
4842 * Since ACK_BA_STATUS TLV come from Hardware it is
4843 * asynchronous So we need to depend on some tlv to confirm
4844 * all tlv is received for a ppdu.
4845 * So we depend on both SCHED_CMD_STATUS_TLV and
4846 * ACK_BA_STATUS_TLV. for failure packet we won't get
4847 * ACK_BA_STATUS_TLV.
4848 */
4849 if (!(ppdu_info->tlv_bitmap &
4850 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
4851 (!(ppdu_info->tlv_bitmap &
4852 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
4853 (ppdu_desc->user[i].completion_status ==
4854 HTT_PPDU_STATS_USER_STATUS_OK))) {
4855 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4856 continue;
4857 }
4858
4859 /*
4860 * Update tx stats for data frames having Qos as well as
4861 * non-Qos data tid
4862 */
4863
4864 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
4865 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
4866 (ppdu_desc->htt_frame_type ==
4867 HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
4868 ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
4869 (ppdu_desc->num_mpdu > 1))) &&
4870 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
4871 dp_tx_stats_update(pdev, peer,
4872 &ppdu_desc->user[i],
4873 ppdu_desc);
4874 }
4875
4876 dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
4877 tlv_bitmap_expected = tlv_bitmap_default;
4878 }
4879 }
4880
4881 #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_PKT_CAPTURE_TX_2_0) || \
4882 defined(WLAN_PKT_CAPTURE_RX_2_0)
4883 /**
4884 * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI
4885 *
4886 * @pdev: Datapath pdev handle
4887 * @nbuf: Buffer to be delivered to upper layer
4888 *
4889 * Return: void
4890 */
dp_tx_ppdu_desc_notify(struct dp_pdev * pdev,qdf_nbuf_t nbuf)4891 static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
4892 {
4893 struct dp_soc *soc = pdev->soc;
4894 struct dp_mon_ops *mon_ops = NULL;
4895
4896 mon_ops = dp_mon_ops_get(soc);
4897 if (mon_ops && mon_ops->mon_ppdu_desc_notify)
4898 mon_ops->mon_ppdu_desc_notify(pdev, nbuf);
4899 else
4900 qdf_nbuf_free(nbuf);
4901 }
4902
dp_ppdu_desc_deliver(struct dp_pdev * pdev,struct ppdu_info * ppdu_info)4903 void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
4904 struct ppdu_info *ppdu_info)
4905 {
4906 struct ppdu_info *s_ppdu_info = NULL;
4907 struct ppdu_info *ppdu_info_next = NULL;
4908 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
4909 qdf_nbuf_t nbuf;
4910 uint32_t time_delta = 0;
4911 bool starved = 0;
4912 bool matched = 0;
4913 bool recv_ack_ba_done = 0;
4914 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
4915
4916 if (ppdu_info->tlv_bitmap &
4917 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
4918 ppdu_info->done)
4919 recv_ack_ba_done = 1;
4920
4921 mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
4922
4923 s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
4924
4925 TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
4926 ppdu_info_list_elem, ppdu_info_next) {
4927 if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
4928 time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
4929 ppdu_info->tsf_l32;
4930 else
4931 time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
4932
4933 if (!s_ppdu_info->done && !recv_ack_ba_done) {
4934 if (time_delta < MAX_SCHED_STARVE) {
4935 dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
4936 pdev->pdev_id,
4937 s_ppdu_info->ppdu_id,
4938 s_ppdu_info->sched_cmdid,
4939 s_ppdu_info->tlv_bitmap,
4940 s_ppdu_info->tsf_l32,
4941 s_ppdu_info->done);
4942 break;
4943 }
4944 starved = 1;
4945 }
4946
4947 mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
4948 TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
4949 ppdu_info_list_elem);
4950 mon_pdev->sched_comp_list_depth--;
4951
4952 nbuf = s_ppdu_info->nbuf;
4953 qdf_assert_always(nbuf);
4954 ppdu_desc = (struct cdp_tx_completion_ppdu *)
4955 qdf_nbuf_data(nbuf);
4956 ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
4957
4958 if (starved) {
4959 dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
4960 ppdu_desc->frame_ctrl,
4961 ppdu_desc->htt_frame_type,
4962 ppdu_desc->tlv_bitmap,
4963 ppdu_desc->user[0].completion_status);
4964 starved = 0;
4965 }
4966
4967 if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
4968 ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
4969 matched = 1;
4970
4971 dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
4972
4973 qdf_mem_free(s_ppdu_info);
4974
4975 dp_tx_ppdu_desc_notify(pdev, nbuf);
4976
4977 if (matched)
4978 break;
4979 }
4980 }
4981 #endif
4982
4983 /**
4984 * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer
4985 * @pdev: Datapath pdev handle
4986 * @ppdu_info: per PPDU TLV descriptor
4987 *
4988 * Return: void
4989 */
dp_tx_ppdu_desc_deliver(struct dp_pdev * pdev,struct ppdu_info * ppdu_info)4990 static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev,
4991 struct ppdu_info *ppdu_info)
4992 {
4993 struct dp_soc *soc = pdev->soc;
4994 struct dp_mon_ops *mon_ops = NULL;
4995
4996 mon_ops = dp_mon_ops_get(soc);
4997
4998 if (mon_ops && mon_ops->mon_ppdu_desc_deliver) {
4999 mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info);
5000 } else {
5001 qdf_nbuf_free(ppdu_info->nbuf);
5002 ppdu_info->nbuf = NULL;
5003 qdf_mem_free(ppdu_info);
5004 }
5005 }
5006
5007 /**
5008 * dp_get_ppdu_desc() - Function to allocate new PPDU status
5009 * desc for new ppdu id
5010 * @pdev: DP pdev handle
5011 * @ppdu_id: PPDU unique identifier
5012 * @tlv_type: TLV type received
5013 * @tsf_l32: timestamp received along with ppdu stats indication header
5014 * @max_users: Maximum user for that particular ppdu
5015 *
5016 * Return: ppdu_info per ppdu tlv structure
5017 */
5018 static
dp_get_ppdu_desc(struct dp_pdev * pdev,uint32_t ppdu_id,uint8_t tlv_type,uint32_t tsf_l32,uint8_t max_users)5019 struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
5020 uint8_t tlv_type, uint32_t tsf_l32,
5021 uint8_t max_users)
5022 {
5023 struct ppdu_info *ppdu_info = NULL;
5024 struct ppdu_info *s_ppdu_info = NULL;
5025 struct ppdu_info *ppdu_info_next = NULL;
5026 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
5027 uint32_t size = 0;
5028 struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
5029 struct cdp_tx_completion_ppdu_user *tmp_user;
5030 uint32_t time_delta;
5031 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5032
5033 /*
5034 * Find ppdu_id node exists or not
5035 */
5036 TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
5037 ppdu_info_list_elem, ppdu_info_next) {
5038 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
5039 if (ppdu_info->tsf_l32 > tsf_l32)
5040 time_delta = (MAX_TSF_32 -
5041 ppdu_info->tsf_l32) + tsf_l32;
5042 else
5043 time_delta = tsf_l32 - ppdu_info->tsf_l32;
5044
5045 if (time_delta > WRAP_DROP_TSF_DELTA) {
5046 TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
5047 ppdu_info, ppdu_info_list_elem);
5048 mon_pdev->list_depth--;
5049 pdev->stats.ppdu_wrap_drop++;
5050 tmp_ppdu_desc =
5051 (struct cdp_tx_completion_ppdu *)
5052 qdf_nbuf_data(ppdu_info->nbuf);
5053 tmp_user = &tmp_ppdu_desc->user[0];
5054 dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
5055 ppdu_info->ppdu_id,
5056 ppdu_info->tsf_l32,
5057 ppdu_info->tlv_bitmap,
5058 tmp_user->completion_status,
5059 ppdu_info->compltn_common_tlv,
5060 ppdu_info->ack_ba_tlv,
5061 ppdu_id, tsf_l32,
5062 tlv_type);
5063 qdf_nbuf_free(ppdu_info->nbuf);
5064 ppdu_info->nbuf = NULL;
5065 qdf_mem_free(ppdu_info);
5066 } else {
5067 break;
5068 }
5069 }
5070 }
5071
5072 /*
5073 * check if it is ack ba tlv and if it is not there in ppdu info
5074 * list then check it in sched completion ppdu list
5075 */
5076 if (!ppdu_info &&
5077 tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
5078 TAILQ_FOREACH(s_ppdu_info,
5079 &mon_pdev->sched_comp_ppdu_list,
5080 ppdu_info_list_elem) {
5081 if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
5082 if (s_ppdu_info->tsf_l32 > tsf_l32)
5083 time_delta = (MAX_TSF_32 -
5084 s_ppdu_info->tsf_l32) +
5085 tsf_l32;
5086 else
5087 time_delta = tsf_l32 -
5088 s_ppdu_info->tsf_l32;
5089 if (time_delta < WRAP_DROP_TSF_DELTA) {
5090 ppdu_info = s_ppdu_info;
5091 break;
5092 }
5093 } else {
5094 /*
5095 * ACK BA STATUS TLV comes sequential order
5096 * if we received ack ba status tlv for second
5097 * ppdu and first ppdu is still waiting for
5098 * ACK BA STATUS TLV. Based on fw comment
5099 * we won't receive it tlv later. So we can
5100 * set ppdu info done.
5101 */
5102 if (s_ppdu_info)
5103 s_ppdu_info->done = 1;
5104 }
5105 }
5106 }
5107
5108 if (ppdu_info) {
5109 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
5110 /*
5111 * if we get tlv_type that is already been processed
5112 * for ppdu, that means we got a new ppdu with same
5113 * ppdu id. Hence Flush the older ppdu
5114 * for MUMIMO and OFDMA, In a PPDU we have
5115 * multiple user with same tlv types. tlv bitmap is
5116 * used to check whether SU or MU_MIMO/OFDMA
5117 */
5118 if (!(ppdu_info->tlv_bitmap &
5119 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
5120 return ppdu_info;
5121
5122 ppdu_desc = (struct cdp_tx_completion_ppdu *)
5123 qdf_nbuf_data(ppdu_info->nbuf);
5124
5125 /*
5126 * apart from ACK BA STATUS TLV rest all comes in order
5127 * so if tlv type not ACK BA STATUS TLV we can deliver
5128 * ppdu_info
5129 */
5130 if ((tlv_type ==
5131 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
5132 ((ppdu_desc->htt_frame_type ==
5133 HTT_STATS_FTYPE_SGEN_MU_BAR) ||
5134 (ppdu_desc->htt_frame_type ==
5135 HTT_STATS_FTYPE_SGEN_BE_MU_BAR)))
5136 return ppdu_info;
5137
5138 dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
5139 } else {
5140 return ppdu_info;
5141 }
5142 }
5143
5144 /*
5145 * Flush the head ppdu descriptor if ppdu desc list reaches max
5146 * threshold
5147 */
5148 if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
5149 ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
5150 TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
5151 ppdu_info, ppdu_info_list_elem);
5152 mon_pdev->list_depth--;
5153 pdev->stats.ppdu_drop++;
5154 qdf_nbuf_free(ppdu_info->nbuf);
5155 ppdu_info->nbuf = NULL;
5156 qdf_mem_free(ppdu_info);
5157 }
5158
5159 size = sizeof(struct cdp_tx_completion_ppdu) +
5160 (max_users * sizeof(struct cdp_tx_completion_ppdu_user));
5161
5162 /*
5163 * Allocate new ppdu_info node
5164 */
5165 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
5166 if (!ppdu_info)
5167 return NULL;
5168
5169 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
5170 0, 4, TRUE);
5171 if (!ppdu_info->nbuf) {
5172 qdf_mem_free(ppdu_info);
5173 return NULL;
5174 }
5175
5176 ppdu_info->ppdu_desc =
5177 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
5178 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
5179
5180 if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) {
5181 dp_mon_err("No tailroom for HTT PPDU");
5182 qdf_nbuf_free(ppdu_info->nbuf);
5183 ppdu_info->nbuf = NULL;
5184 ppdu_info->last_user = 0;
5185 qdf_mem_free(ppdu_info);
5186 return NULL;
5187 }
5188
5189 ppdu_info->ppdu_desc->max_users = max_users;
5190 ppdu_info->tsf_l32 = tsf_l32;
5191 /*
5192 * No lock is needed because all PPDU TLVs are processed in
5193 * same context and this list is updated in same context
5194 */
5195 TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
5196 ppdu_info_list_elem);
5197 mon_pdev->list_depth++;
5198 return ppdu_info;
5199 }
5200
5201 #define DP_HTT_PPDU_ID_MASK 0x00FFFFFF
5202 /**
5203 * dp_htt_mask_ppdu_id() - Function to mask ppdu_id
5204 * @ppdu_id: PPDU ID
5205 *
5206 * Return: Masked ppdu_id
5207 */
dp_htt_mask_ppdu_id(uint32_t ppdu_id)5208 static inline uint32_t dp_htt_mask_ppdu_id(uint32_t ppdu_id)
5209 {
5210 return (ppdu_id & DP_HTT_PPDU_ID_MASK);
5211 }
5212
5213 /**
5214 * dp_htt_process_tlv() - Function to process each PPDU TLVs
5215 * @pdev: DP pdev handle
5216 * @htt_t2h_msg: HTT target to host message
5217 *
5218 * Return: ppdu_info per ppdu tlv structure
5219 */
dp_htt_process_tlv(struct dp_pdev * pdev,qdf_nbuf_t htt_t2h_msg)5220 static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
5221 qdf_nbuf_t htt_t2h_msg)
5222 {
5223 uint32_t length;
5224 uint32_t ppdu_id;
5225 uint8_t tlv_type;
5226 uint32_t tlv_length, tlv_bitmap_expected;
5227 uint8_t *tlv_buf;
5228 struct ppdu_info *ppdu_info = NULL;
5229 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
5230 uint8_t max_users = CDP_MU_MAX_USERS;
5231 uint32_t tsf_l32;
5232 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5233
5234 uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
5235
5236 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
5237
5238 msg_word = msg_word + 1;
5239 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
5240 ppdu_id = dp_htt_mask_ppdu_id(ppdu_id);
5241
5242 msg_word = msg_word + 1;
5243 tsf_l32 = (uint32_t)(*msg_word);
5244
5245 msg_word = msg_word + 2;
5246 while (length > 0) {
5247 tlv_buf = (uint8_t *)msg_word;
5248 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
5249 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
5250 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
5251 pdev->stats.ppdu_stats_counter[tlv_type]++;
5252
5253 if (tlv_length == 0)
5254 break;
5255
5256 tlv_length += HTT_TLV_HDR_LEN;
5257
5258 /*
5259 * Not allocating separate ppdu descriptor for MGMT Payload
5260 * TLV as this is sent as separate WDI indication and it
5261 * doesn't contain any ppdu information
5262 */
5263 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
5264 mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
5265 mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
5266 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
5267 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
5268 (*(msg_word + 1));
5269 msg_word =
5270 (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5271 length -= (tlv_length);
5272 continue;
5273 }
5274
5275 /*
5276 * retrieve max_users if it's USERS_INFO,
5277 * else, it's 1 for COMPLTN_FLUSH,
5278 * else, use CDP_MU_MAX_USERS
5279 */
5280 if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
5281 max_users =
5282 HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
5283 } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
5284 max_users = 1;
5285 }
5286
5287 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
5288 tsf_l32, max_users);
5289 if (!ppdu_info)
5290 return NULL;
5291
5292 ppdu_info->ppdu_id = ppdu_id;
5293 ppdu_info->tlv_bitmap |= (1 << tlv_type);
5294
5295 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
5296
5297 /*
5298 * Increment pdev level tlv count to monitor
5299 * missing TLVs
5300 */
5301 mon_pdev->tlv_count++;
5302 ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
5303 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5304 length -= (tlv_length);
5305 }
5306
5307 if (!ppdu_info)
5308 return NULL;
5309
5310 mon_pdev->last_ppdu_id = ppdu_id;
5311
5312 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
5313
5314 if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
5315 mon_pdev->tx_capture_enabled) {
5316 if (ppdu_info->is_ampdu)
5317 tlv_bitmap_expected =
5318 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
5319 ppdu_info->tlv_bitmap);
5320 }
5321
5322 ppdu_desc = ppdu_info->ppdu_desc;
5323
5324 if (!ppdu_desc)
5325 return NULL;
5326
5327 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
5328 HTT_PPDU_STATS_USER_STATUS_OK) {
5329 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
5330 }
5331
5332 /*
5333 * for frame type DATA and BAR, we update stats based on MSDU,
5334 * successful msdu and mpdu are populate from ACK BA STATUS TLV
5335 * which comes out of order. successful mpdu also populated from
5336 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
5337 * we store successful mpdu from both tlv and compare before delivering
5338 * to make sure we received ACK BA STATUS TLV. For some self generated
5339 * frame we won't get ack ba status tlv so no need to wait for
5340 * ack ba status tlv.
5341 */
5342 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
5343 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
5344 /*
5345 * most of the time bar frame will have duplicate ack ba
5346 * status tlv
5347 */
5348 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
5349 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
5350 return NULL;
5351 /*
5352 * For data frame, compltn common tlv should match ack ba status
5353 * tlv and completion status. Reason we are checking first user
5354 * for ofdma, completion seen at next MU BAR frm, for mimo
5355 * only for first user completion will be immediate.
5356 */
5357 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
5358 (ppdu_desc->user[0].completion_status == 0 &&
5359 (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
5360 return NULL;
5361 }
5362
5363 /*
5364 * Once all the TLVs for a given PPDU has been processed,
5365 * return PPDU status to be delivered to higher layer.
5366 * tlv_bitmap_expected can't be available for different frame type.
5367 * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
5368 * apart from ACK BA TLV, FW sends other TLV in sequential order.
5369 * flush tlv comes separate.
5370 */
5371 if ((ppdu_info->tlv_bitmap != 0 &&
5372 (ppdu_info->tlv_bitmap &
5373 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
5374 (ppdu_info->tlv_bitmap &
5375 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
5376 ppdu_info->done = 1;
5377 return ppdu_info;
5378 }
5379
5380 return NULL;
5381 }
5382 #endif /* QCA_ENHANCED_STATS_SUPPORT */
5383
5384 #ifdef QCA_ENHANCED_STATS_SUPPORT
5385 /**
5386 * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to
5387 * consume stats received from FW via HTT
5388 * @pdev: Datapath pdev handle
5389 *
5390 * Return: void
5391 */
dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev * pdev)5392 static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev)
5393 {
5394 struct dp_soc *soc = pdev->soc;
5395 struct dp_mon_ops *mon_ops = NULL;
5396
5397 mon_ops = dp_mon_ops_get(soc);
5398 if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check)
5399 return mon_ops->mon_ppdu_stats_feat_enable_check(pdev);
5400 else
5401 return false;
5402 }
5403 #endif
5404
5405 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
dp_htt_process_smu_ppdu_stats_tlv(struct dp_soc * soc,qdf_nbuf_t htt_t2h_msg)5406 static void dp_htt_process_smu_ppdu_stats_tlv(struct dp_soc *soc,
5407 qdf_nbuf_t htt_t2h_msg)
5408 {
5409 uint32_t length;
5410 uint8_t tlv_type;
5411 uint32_t tlv_length, tlv_expected_size;
5412 uint8_t *tlv_buf;
5413
5414 uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
5415
5416 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
5417
5418 msg_word = msg_word + 4;
5419
5420 while (length > 0) {
5421 tlv_buf = (uint8_t *)msg_word;
5422 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
5423 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
5424
5425 if (tlv_length == 0)
5426 break;
5427
5428 tlv_length += HTT_TLV_HDR_LEN;
5429
5430 if (tlv_type == HTT_PPDU_STATS_FOR_SMU_TLV) {
5431 tlv_expected_size = sizeof(htt_ppdu_stats_for_smu_tlv);
5432
5433 if (tlv_length >= tlv_expected_size)
5434 dp_wdi_event_handler(
5435 WDI_EVENT_PKT_CAPTURE_PPDU_STATS,
5436 soc, msg_word, HTT_INVALID_VDEV,
5437 WDI_NO_VAL, 0);
5438 }
5439 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
5440 length -= (tlv_length);
5441 }
5442 }
5443 #endif
5444
5445 #if defined(WDI_EVENT_ENABLE)
5446 #ifdef QCA_ENHANCED_STATS_SUPPORT
5447 /**
5448 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
5449 * @soc: DP SOC handle
5450 * @pdev_id: pdev id
5451 * @htt_t2h_msg: HTT message nbuf
5452 *
5453 * Return: void
5454 */
dp_txrx_ppdu_stats_handler(struct dp_soc * soc,uint8_t pdev_id,qdf_nbuf_t htt_t2h_msg)5455 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5456 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5457 {
5458 struct dp_pdev *pdev;
5459 struct ppdu_info *ppdu_info = NULL;
5460 bool free_buf = true;
5461 struct dp_mon_pdev *mon_pdev;
5462
5463 if (pdev_id >= MAX_PDEV_CNT)
5464 return true;
5465
5466 pdev = soc->pdev_list[pdev_id];
5467 if (!pdev)
5468 return true;
5469
5470 mon_pdev = pdev->monitor_pdev;
5471 if (!mon_pdev)
5472 return true;
5473
5474 if (!dp_tx_ppdu_stats_feat_enable_check(pdev))
5475 return free_buf;
5476
5477 qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
5478 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
5479
5480 if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
5481 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
5482 (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
5483 QDF_STATUS_SUCCESS)
5484 free_buf = false;
5485 }
5486
5487 if (ppdu_info)
5488 dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
5489
5490 mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
5491 mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
5492 mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
5493
5494 qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
5495
5496 return free_buf;
5497 }
5498 #elif defined(WLAN_FEATURE_PKT_CAPTURE_V2)
dp_txrx_ppdu_stats_handler(struct dp_soc * soc,uint8_t pdev_id,qdf_nbuf_t htt_t2h_msg)5499 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5500 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5501 {
5502 if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
5503 dp_htt_process_smu_ppdu_stats_tlv(soc, htt_t2h_msg);
5504
5505 return true;
5506 }
5507 #elif (!defined(REMOVE_PKT_LOG))
dp_txrx_ppdu_stats_handler(struct dp_soc * soc,uint8_t pdev_id,qdf_nbuf_t htt_t2h_msg)5508 static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
5509 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
5510 {
5511 return true;
5512 }
5513 #endif/* QCA_ENHANCED_STATS_SUPPORT */
5514 #endif
5515
5516 #if defined(WDI_EVENT_ENABLE) &&\
5517 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG) || \
5518 defined(WLAN_FEATURE_PKT_CAPTURE_V2))
5519 bool
dp_ppdu_stats_ind_handler(struct htt_soc * soc,uint32_t * msg_word,qdf_nbuf_t htt_t2h_msg)5520 dp_ppdu_stats_ind_handler(struct htt_soc *soc,
5521 uint32_t *msg_word,
5522 qdf_nbuf_t htt_t2h_msg)
5523 {
5524 u_int8_t pdev_id;
5525 u_int8_t target_pdev_id;
5526 bool free_buf;
5527
5528 target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
5529 pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
5530 target_pdev_id);
5531 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
5532 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
5533 pdev_id);
5534
5535 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
5536 htt_t2h_msg);
5537
5538 return free_buf;
5539 }
5540 #endif
5541
5542 void
dp_mon_set_bsscolor(struct dp_pdev * pdev,uint8_t bsscolor)5543 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
5544 {
5545 pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
5546 }
5547
dp_pdev_get_filter_ucast_data(struct cdp_pdev * pdev_handle)5548 bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
5549 {
5550 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5551 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5552
5553 if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
5554 (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
5555 return true;
5556
5557 return false;
5558 }
5559
dp_pdev_get_filter_mcast_data(struct cdp_pdev * pdev_handle)5560 bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
5561 {
5562 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5563 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5564
5565 if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
5566 (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
5567 return true;
5568
5569 return false;
5570 }
5571
dp_pdev_get_filter_non_data(struct cdp_pdev * pdev_handle)5572 bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
5573 {
5574 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5575 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5576
5577 if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
5578 (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
5579 if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
5580 (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
5581 return true;
5582 }
5583 }
5584
5585 return false;
5586 }
5587
dp_mon_soc_cfg_init(struct dp_soc * soc)5588 QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
5589 {
5590 int target_type;
5591 struct dp_mon_soc *mon_soc = soc->monitor_soc;
5592 struct cdp_mon_ops *cdp_ops;
5593
5594 cdp_ops = dp_mon_cdp_ops_get(soc);
5595 target_type = hal_get_target_type(soc->hal_soc);
5596 switch (target_type) {
5597 case TARGET_TYPE_QCA6290:
5598 case TARGET_TYPE_QCA6390:
5599 case TARGET_TYPE_QCA6490:
5600 case TARGET_TYPE_QCA6750:
5601 case TARGET_TYPE_KIWI:
5602 case TARGET_TYPE_MANGO:
5603 case TARGET_TYPE_PEACH:
5604 case TARGET_TYPE_WCN6450:
5605 /* do nothing */
5606 break;
5607 case TARGET_TYPE_QCA8074:
5608 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5609 MON_BUF_MIN_ENTRIES);
5610 break;
5611 case TARGET_TYPE_QCA8074V2:
5612 case TARGET_TYPE_QCA6018:
5613 case TARGET_TYPE_QCA9574:
5614 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5615 MON_BUF_MIN_ENTRIES);
5616 mon_soc->hw_nac_monitor_support = 1;
5617 break;
5618 case TARGET_TYPE_QCN9000:
5619 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5620 MON_BUF_MIN_ENTRIES);
5621 mon_soc->hw_nac_monitor_support = 1;
5622 if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) {
5623 if (cdp_ops && cdp_ops->config_full_mon_mode)
5624 cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1);
5625 }
5626 break;
5627 case TARGET_TYPE_QCA5018:
5628 case TARGET_TYPE_QCN6122:
5629 case TARGET_TYPE_QCN9160:
5630 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5631 MON_BUF_MIN_ENTRIES);
5632 mon_soc->hw_nac_monitor_support = 1;
5633 break;
5634 case TARGET_TYPE_QCN9224:
5635 case TARGET_TYPE_QCA5332:
5636 case TARGET_TYPE_QCN6432:
5637 wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
5638 MON_BUF_MIN_ENTRIES);
5639 mon_soc->hw_nac_monitor_support = 1;
5640 mon_soc->monitor_mode_v2 = 1;
5641 break;
5642 default:
5643 dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
5644 qdf_assert_always(0);
5645 break;
5646 }
5647
5648 dp_mon_info("hw_nac_monitor_support = %d",
5649 mon_soc->hw_nac_monitor_support);
5650
5651 return QDF_STATUS_SUCCESS;
5652 }
5653
5654 /**
5655 * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration
5656 * @pdev: PDEV handle [Should be valid]
5657 *
5658 * Return: None
5659 */
dp_mon_pdev_per_target_config(struct dp_pdev * pdev)5660 static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev)
5661 {
5662 struct dp_soc *soc = pdev->soc;
5663 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
5664 int target_type;
5665
5666 target_type = hal_get_target_type(soc->hal_soc);
5667 switch (target_type) {
5668 case TARGET_TYPE_KIWI:
5669 case TARGET_TYPE_QCN9224:
5670 case TARGET_TYPE_QCA5332:
5671 case TARGET_TYPE_QCN6432:
5672 case TARGET_TYPE_MANGO:
5673 mon_pdev->is_tlv_hdr_64_bit = true;
5674 mon_pdev->tlv_hdr_size = HAL_RX_TLV64_HDR_SIZE;
5675 break;
5676 case TARGET_TYPE_PEACH:
5677 default:
5678 mon_pdev->is_tlv_hdr_64_bit = false;
5679 mon_pdev->tlv_hdr_size = HAL_RX_TLV32_HDR_SIZE;
5680 break;
5681 }
5682 }
5683
5684 static
dp_mon_rings_alloc(struct dp_pdev * pdev)5685 QDF_STATUS dp_mon_rings_alloc(struct dp_pdev *pdev)
5686 {
5687 QDF_STATUS status = QDF_STATUS_SUCCESS;
5688 struct dp_mon_ops *mon_ops;
5689
5690 mon_ops = dp_mon_ops_get(pdev->soc);
5691 if (!mon_ops) {
5692 dp_mon_err("mon_ops is NULL");
5693 return QDF_STATUS_E_FAILURE;
5694 }
5695
5696 if (mon_ops->mon_rings_alloc[0]) {
5697 status = mon_ops->mon_rings_alloc[0](pdev);
5698 if (QDF_IS_STATUS_ERROR(status)) {
5699 dp_mon_err("error: %d", status);
5700 goto error;
5701 }
5702 }
5703
5704 if (mon_ops->mon_rings_alloc[1]) {
5705 status = mon_ops->mon_rings_alloc[1](pdev);
5706 if (QDF_IS_STATUS_ERROR(status)) {
5707 dp_mon_err("error: %d", status);
5708 goto error;
5709 }
5710 }
5711
5712 error:
5713 return status;
5714 }
5715
5716 static
dp_mon_rings_free(struct dp_pdev * pdev)5717 void dp_mon_rings_free(struct dp_pdev *pdev)
5718 {
5719 struct dp_mon_ops *mon_ops;
5720
5721 mon_ops = dp_mon_ops_get(pdev->soc);
5722 if (!mon_ops) {
5723 dp_mon_err("mon_ops is NULL");
5724 return;
5725 }
5726
5727 if (mon_ops->mon_rings_free[0])
5728 mon_ops->mon_rings_free[0](pdev);
5729
5730 if (mon_ops->mon_rings_free[1])
5731 mon_ops->mon_rings_free[1](pdev);
5732 }
5733
5734 static
dp_mon_rings_init(struct dp_pdev * pdev)5735 QDF_STATUS dp_mon_rings_init(struct dp_pdev *pdev)
5736 {
5737 QDF_STATUS status = QDF_STATUS_SUCCESS;
5738 struct dp_mon_ops *mon_ops;
5739
5740 mon_ops = dp_mon_ops_get(pdev->soc);
5741 if (!mon_ops) {
5742 dp_mon_err("mon_ops is NULL");
5743 return QDF_STATUS_E_FAILURE;
5744 }
5745
5746 if (mon_ops->mon_rings_init[0]) {
5747 status = mon_ops->mon_rings_init[0](pdev);
5748 if (QDF_IS_STATUS_ERROR(status)) {
5749 dp_mon_err("error: %d", status);
5750 goto error;
5751 }
5752 }
5753
5754 if (mon_ops->mon_rings_init[1]) {
5755 status = mon_ops->mon_rings_init[1](pdev);
5756 if (QDF_IS_STATUS_ERROR(status)) {
5757 dp_mon_err("error: %d", status);
5758 goto error;
5759 }
5760 }
5761
5762 error:
5763 return status;
5764 }
5765
5766 static
dp_mon_rings_deinit(struct dp_pdev * pdev)5767 void dp_mon_rings_deinit(struct dp_pdev *pdev)
5768 {
5769 struct dp_mon_ops *mon_ops;
5770
5771 mon_ops = dp_mon_ops_get(pdev->soc);
5772 if (!mon_ops) {
5773 dp_mon_err("mon_ops is NULL");
5774 return;
5775 }
5776
5777 if (mon_ops->mon_rings_deinit[0])
5778 mon_ops->mon_rings_deinit[0](pdev);
5779
5780 if (mon_ops->mon_rings_deinit[1])
5781 mon_ops->mon_rings_deinit[1](pdev);
5782 }
5783
dp_mon_pdev_attach(struct dp_pdev * pdev)5784 QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
5785 {
5786 struct dp_soc *soc;
5787 struct dp_mon_pdev *mon_pdev;
5788 struct dp_mon_ops *mon_ops;
5789 qdf_size_t mon_pdev_context_size;
5790
5791 if (!pdev) {
5792 dp_mon_err("pdev is NULL");
5793 goto fail0;
5794 }
5795
5796 soc = pdev->soc;
5797
5798 mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV);
5799 mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size);
5800 if (!mon_pdev) {
5801 dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
5802 goto fail0;
5803 }
5804
5805 pdev->monitor_pdev = mon_pdev;
5806 mon_ops = dp_mon_ops_get(pdev->soc);
5807 if (!mon_ops) {
5808 dp_mon_err("%pK: Invalid monitor ops", pdev);
5809 goto fail1;
5810 }
5811
5812 if (mon_ops->mon_pdev_alloc) {
5813 if (mon_ops->mon_pdev_alloc(pdev)) {
5814 dp_mon_err("%pK: MONITOR pdev alloc failed", pdev);
5815 goto fail1;
5816 }
5817 }
5818
5819 if (dp_mon_rings_alloc(pdev)) {
5820 dp_mon_err("%pK: MONITOR rings setup failed", pdev);
5821 goto fail2;
5822 }
5823
5824 /* Rx monitor mode specific init */
5825 if (mon_ops->rx_mon_desc_pool_alloc) {
5826 if (mon_ops->rx_mon_desc_pool_alloc(pdev)) {
5827 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
5828 goto fail3;
5829 }
5830 }
5831
5832 if (mon_ops->mon_rx_ppdu_info_cache_create) {
5833 if (mon_ops->mon_rx_ppdu_info_cache_create(pdev)) {
5834 dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
5835 goto fail4;
5836 }
5837 }
5838 pdev->monitor_pdev = mon_pdev;
5839 dp_mon_pdev_per_target_config(pdev);
5840
5841 return QDF_STATUS_SUCCESS;
5842 fail4:
5843 if (mon_ops->rx_mon_desc_pool_free)
5844 mon_ops->rx_mon_desc_pool_free(pdev);
5845 fail3:
5846 dp_mon_rings_free(pdev);
5847 fail2:
5848 if (mon_ops->mon_pdev_free)
5849 mon_ops->mon_pdev_free(pdev);
5850 fail1:
5851 pdev->monitor_pdev = NULL;
5852 dp_context_free_mem(soc, DP_MON_PDEV_TYPE, mon_pdev);
5853 fail0:
5854 return QDF_STATUS_E_NOMEM;
5855 }
5856
dp_mon_pdev_detach(struct dp_pdev * pdev)5857 QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
5858 {
5859 struct dp_mon_pdev *mon_pdev;
5860 struct dp_mon_ops *mon_ops = NULL;
5861
5862 if (!pdev) {
5863 dp_mon_err("pdev is NULL");
5864 return QDF_STATUS_E_FAILURE;
5865 }
5866
5867 mon_pdev = pdev->monitor_pdev;
5868 if (!mon_pdev) {
5869 dp_mon_err("Monitor pdev is NULL");
5870 return QDF_STATUS_E_FAILURE;
5871 }
5872
5873 mon_ops = dp_mon_ops_get(pdev->soc);
5874 if (!mon_ops) {
5875 dp_mon_err("Monitor ops is NULL");
5876 return QDF_STATUS_E_FAILURE;
5877 }
5878
5879 if (mon_ops->mon_rx_ppdu_info_cache_destroy)
5880 mon_ops->mon_rx_ppdu_info_cache_destroy(pdev);
5881 if (mon_ops->rx_mon_desc_pool_free)
5882 mon_ops->rx_mon_desc_pool_free(pdev);
5883 dp_mon_rings_free(pdev);
5884 if (mon_ops->mon_pdev_free)
5885 mon_ops->mon_pdev_free(pdev);
5886
5887 dp_context_free_mem(pdev->soc, DP_MON_PDEV_TYPE, mon_pdev);
5888 pdev->monitor_pdev = NULL;
5889 return QDF_STATUS_SUCCESS;
5890 }
5891
5892 #ifdef WLAN_TX_PKT_CAPTURE_ENH
dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops * mon_ops)5893 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops)
5894 {
5895 mon_ops->mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_1_0;
5896 mon_ops->mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_1_0;
5897 mon_ops->mon_peer_tx_capture_filter_check =
5898 dp_peer_tx_capture_filter_check_1_0;
5899 }
5900 #elif defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(WLAN_FEATURE_LOCAL_PKT_CAPTURE)
dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops * mon_ops)5901 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops)
5902 {
5903 mon_ops->mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach_2_0;
5904 mon_ops->mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach_2_0;
5905 mon_ops->mon_peer_tx_capture_filter_check = NULL;
5906 }
5907 #elif (defined(WIFI_MONITOR_SUPPORT) && !defined(WLAN_TX_PKT_CAPTURE_ENH))
dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops * mon_ops)5908 void dp_mon_register_tx_pkt_enh_ops_1_0(struct dp_mon_ops *mon_ops)
5909 {
5910 mon_ops->mon_tx_ppdu_stats_attach = NULL;
5911 mon_ops->mon_tx_ppdu_stats_detach = NULL;
5912 mon_ops->mon_peer_tx_capture_filter_check = NULL;
5913 }
5914 #endif
5915
5916 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
5917 #if !defined(DISABLE_MON_CONFIG)
dp_mon_config_register_ops(struct dp_mon_ops * mon_ops)5918 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5919 {
5920 mon_ops->mon_pdev_htt_srng_setup[0] = dp_mon_htt_srng_setup_1_0;
5921 mon_ops->mon_pdev_htt_srng_setup[1] = dp_mon_pdev_htt_srng_setup_2_0;
5922 mon_ops->mon_soc_htt_srng_setup = dp_mon_soc_htt_srng_setup_2_0;
5923 }
5924 #else
dp_mon_config_register_ops(struct dp_mon_ops * mon_ops)5925 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5926 {
5927 }
5928 #endif
5929
dp_mon_register_lpc_ops_1_0(struct dp_mon_ops * mon_ops)5930 void dp_mon_register_lpc_ops_1_0(struct dp_mon_ops *mon_ops)
5931 {
5932 mon_ops->mon_soc_attach[0] = NULL;
5933 mon_ops->mon_soc_detach[0] = NULL;
5934 mon_ops->mon_soc_init[0] = NULL;
5935 mon_ops->mon_soc_deinit[0] = NULL;
5936 mon_ops->mon_soc_attach[1] = dp_mon_soc_attach_2_0;
5937 mon_ops->mon_soc_detach[1] = dp_mon_soc_detach_2_0;
5938 mon_ops->mon_soc_init[1] = dp_mon_soc_init_2_0;
5939 mon_ops->mon_soc_deinit[1] = dp_mon_soc_deinit_2_0;
5940
5941 dp_mon_config_register_ops(mon_ops);
5942
5943 mon_ops->mon_rings_alloc[0] = dp_mon_rings_alloc_1_0;
5944 mon_ops->mon_rings_free[0] = dp_mon_rings_free_1_0;
5945 mon_ops->mon_rings_init[0] = dp_mon_rings_init_1_0;
5946 mon_ops->mon_rings_deinit[0] = dp_mon_rings_deinit_1_0;
5947 mon_ops->mon_rings_alloc[1] = dp_pdev_mon_rings_alloc_2_0;
5948 mon_ops->mon_rings_free[1] = dp_pdev_mon_rings_free_2_0;
5949 mon_ops->mon_rings_init[1] = dp_pdev_mon_rings_init_2_0;
5950 mon_ops->mon_rings_deinit[1] = dp_pdev_mon_rings_deinit_2_0;
5951
5952 mon_ops->mon_filter_setup_tx_mon_mode =
5953 dp_mon_filter_setup_local_pkt_capture_tx;
5954 mon_ops->mon_filter_reset_tx_mon_mode =
5955 dp_mon_filter_reset_local_pkt_capture_tx;
5956 mon_ops->tx_mon_filter_update = dp_tx_mon_filter_update_2_0;
5957
5958 mon_ops->rx_hdr_length_set = dp_rx_mon_hdr_length_set;
5959 dp_mon_register_tx_pkt_enh_ops_1_0(mon_ops);
5960 }
5961 #else
5962 #if !defined(DISABLE_MON_CONFIG)
dp_mon_config_register_ops(struct dp_mon_ops * mon_ops)5963 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5964 {
5965 mon_ops->mon_pdev_htt_srng_setup[0] = dp_mon_htt_srng_setup_1_0;
5966 mon_ops->mon_pdev_htt_srng_setup[1] = NULL;
5967 mon_ops->mon_soc_htt_srng_setup = NULL;
5968 }
5969 #else
dp_mon_config_register_ops(struct dp_mon_ops * mon_ops)5970 static inline void dp_mon_config_register_ops(struct dp_mon_ops *mon_ops)
5971 {
5972 }
5973 #endif
5974
dp_mon_register_lpc_ops_1_0(struct dp_mon_ops * mon_ops)5975 void dp_mon_register_lpc_ops_1_0(struct dp_mon_ops *mon_ops)
5976 {
5977 mon_ops->mon_soc_attach[0] = NULL;
5978 mon_ops->mon_soc_detach[0] = NULL;
5979 mon_ops->mon_soc_init[0] = NULL;
5980 mon_ops->mon_soc_deinit[0] = NULL;
5981 mon_ops->mon_soc_attach[1] = NULL;
5982 mon_ops->mon_soc_detach[1] = NULL;
5983 mon_ops->mon_soc_init[1] = NULL;
5984 mon_ops->mon_soc_deinit[1] = NULL;
5985
5986 dp_mon_config_register_ops(mon_ops);
5987
5988 mon_ops->mon_rings_alloc[0] = dp_mon_rings_alloc_1_0;
5989 mon_ops->mon_rings_free[0] = dp_mon_rings_free_1_0;
5990 mon_ops->mon_rings_init[0] = dp_mon_rings_init_1_0;
5991 mon_ops->mon_rings_deinit[0] = dp_mon_rings_deinit_1_0;
5992 mon_ops->mon_rings_alloc[1] = NULL;
5993 mon_ops->mon_rings_free[1] = NULL;
5994 mon_ops->mon_rings_init[1] = NULL;
5995 mon_ops->mon_rings_deinit[1] = NULL;
5996
5997 mon_ops->mon_filter_setup_tx_mon_mode = NULL;
5998 mon_ops->mon_filter_reset_tx_mon_mode = NULL;
5999 mon_ops->tx_mon_filter_update = NULL;
6000
6001 mon_ops->rx_hdr_length_set = NULL;
6002 dp_mon_register_tx_pkt_enh_ops_1_0(mon_ops);
6003 }
6004 #endif
6005
dp_mon_pdev_init(struct dp_pdev * pdev)6006 QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
6007 {
6008 struct dp_mon_pdev *mon_pdev;
6009 struct dp_mon_ops *mon_ops = NULL;
6010
6011 if (!pdev) {
6012 dp_mon_err("pdev is NULL");
6013 return QDF_STATUS_E_FAILURE;
6014 }
6015
6016 mon_pdev = pdev->monitor_pdev;
6017
6018 mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
6019 if (!mon_pdev->invalid_mon_peer) {
6020 dp_mon_err("%pK: Memory allocation failed for invalid "
6021 "monitor peer", pdev);
6022 return QDF_STATUS_E_NOMEM;
6023 }
6024
6025 mon_ops = dp_mon_ops_get(pdev->soc);
6026 if (!mon_ops) {
6027 dp_mon_err("Monitor ops is NULL");
6028 goto fail0;
6029 }
6030
6031 mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
6032 if (!mon_pdev->filter) {
6033 dp_mon_err("%pK: Memory allocation failed for monitor filter",
6034 pdev);
6035 goto fail0;
6036 }
6037
6038 if (mon_ops->tx_mon_filter_alloc) {
6039 if (mon_ops->tx_mon_filter_alloc(pdev)) {
6040 dp_mon_err("%pK: Memory allocation failed for tx monitor "
6041 "filter", pdev);
6042 goto fail1;
6043 }
6044 }
6045
6046 qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
6047 qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
6048 mon_pdev->monitor_configured = false;
6049 mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
6050
6051 TAILQ_INIT(&mon_pdev->neighbour_peers_list);
6052 mon_pdev->neighbour_peers_added = false;
6053 mon_pdev->monitor_configured = false;
6054
6055 dp_mon_pdev_filter_init(mon_pdev);
6056 /*
6057 * initialize ppdu tlv list
6058 */
6059 TAILQ_INIT(&mon_pdev->ppdu_info_list);
6060 TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
6061
6062 mon_pdev->list_depth = 0;
6063 mon_pdev->tlv_count = 0;
6064 /* initlialize cal client timer */
6065 dp_cal_client_attach(&mon_pdev->cal_client_ctx,
6066 dp_pdev_to_cdp_pdev(pdev),
6067 pdev->soc->osdev,
6068 &dp_iterate_update_peer_list);
6069 if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
6070 goto fail2;
6071
6072 if (mon_ops->mon_lite_mon_alloc) {
6073 if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) {
6074 dp_mon_err("%pK: lite mon alloc failed", pdev);
6075 goto fail3;
6076 }
6077 }
6078
6079 if (dp_mon_rings_init(pdev)) {
6080 dp_mon_err("%pK: MONITOR rings setup failed", pdev);
6081 goto fail4;
6082 }
6083
6084 /* initialize sw monitor rx descriptors */
6085 if (mon_ops->rx_mon_desc_pool_init)
6086 mon_ops->rx_mon_desc_pool_init(pdev);
6087
6088 /* allocate buffers and replenish the monitor RxDMA ring */
6089 if (mon_ops->rx_mon_buffers_alloc) {
6090 if (mon_ops->rx_mon_buffers_alloc(pdev)) {
6091 dp_mon_err("%pK: rx mon buffers alloc failed", pdev);
6092 goto fail5;
6093 }
6094 }
6095
6096 /* attach monitor function */
6097 dp_monitor_tx_ppdu_stats_attach(pdev);
6098
6099 /* mon pdev extended init */
6100 if (mon_ops->mon_pdev_ext_init)
6101 mon_ops->mon_pdev_ext_init(pdev);
6102
6103 if (mon_ops->mon_rx_pdev_tlv_logger_init)
6104 mon_ops->mon_rx_pdev_tlv_logger_init(pdev);
6105
6106 mon_pdev->is_dp_mon_pdev_initialized = true;
6107 dp_mon_set_local_pkt_capture_running(mon_pdev, false);
6108
6109 return QDF_STATUS_SUCCESS;
6110
6111 fail5:
6112 if (mon_ops->rx_mon_desc_pool_deinit)
6113 mon_ops->rx_mon_desc_pool_deinit(pdev);
6114
6115 dp_mon_rings_deinit(pdev);
6116 fail4:
6117 if (mon_ops->mon_lite_mon_dealloc)
6118 mon_ops->mon_lite_mon_dealloc(pdev);
6119 fail3:
6120 dp_htt_ppdu_stats_detach(pdev);
6121 fail2:
6122 qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
6123 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
6124 if (mon_ops->tx_mon_filter_dealloc)
6125 mon_ops->tx_mon_filter_dealloc(pdev);
6126 fail1:
6127 dp_mon_filter_dealloc(mon_pdev);
6128 fail0:
6129 qdf_mem_free(mon_pdev->invalid_mon_peer);
6130 return QDF_STATUS_E_FAILURE;
6131 }
6132
dp_mon_pdev_deinit(struct dp_pdev * pdev)6133 QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
6134 {
6135 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
6136 struct dp_mon_ops *mon_ops = NULL;
6137
6138 mon_ops = dp_mon_ops_get(pdev->soc);
6139 if (!mon_ops) {
6140 dp_mon_err("Monitor ops is NULL");
6141 return QDF_STATUS_E_FAILURE;
6142 }
6143
6144 if (!mon_pdev->is_dp_mon_pdev_initialized)
6145 return QDF_STATUS_SUCCESS;
6146
6147 dp_mon_filters_reset(pdev);
6148
6149 /* mon pdev extended deinit */
6150 if (mon_ops->mon_pdev_ext_deinit)
6151 mon_ops->mon_pdev_ext_deinit(pdev);
6152
6153 if (mon_ops->mon_rx_pdev_tlv_logger_deinit)
6154 mon_ops->mon_rx_pdev_tlv_logger_deinit(pdev);
6155
6156 /* detach monitor function */
6157 dp_monitor_tx_ppdu_stats_detach(pdev);
6158
6159 if (mon_ops->mon_lite_mon_dealloc)
6160 mon_ops->mon_lite_mon_dealloc(pdev);
6161
6162 if (mon_ops->rx_mon_buffers_free)
6163 mon_ops->rx_mon_buffers_free(pdev);
6164 if (mon_ops->rx_mon_desc_pool_deinit)
6165 mon_ops->rx_mon_desc_pool_deinit(pdev);
6166 dp_mon_rings_deinit(pdev);
6167 dp_cal_client_detach(&mon_pdev->cal_client_ctx);
6168 dp_htt_ppdu_stats_detach(pdev);
6169 qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
6170 dp_neighbour_peers_detach(pdev);
6171 dp_pktlogmod_exit(pdev);
6172 if (mon_ops->tx_mon_filter_dealloc)
6173 mon_ops->tx_mon_filter_dealloc(pdev);
6174 if (mon_pdev->filter)
6175 dp_mon_filter_dealloc(mon_pdev);
6176 if (mon_pdev->invalid_mon_peer)
6177 qdf_mem_free(mon_pdev->invalid_mon_peer);
6178 mon_pdev->is_dp_mon_pdev_initialized = false;
6179 dp_mon_set_local_pkt_capture_running(mon_pdev, false);
6180
6181 return QDF_STATUS_SUCCESS;
6182 }
6183
dp_mon_vdev_attach(struct dp_vdev * vdev)6184 QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
6185 {
6186 struct dp_mon_vdev *mon_vdev;
6187 struct dp_pdev *pdev = vdev->pdev;
6188
6189 mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
6190 if (!mon_vdev) {
6191 dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
6192 return QDF_STATUS_E_NOMEM;
6193 }
6194
6195 if (pdev && pdev->monitor_pdev &&
6196 pdev->monitor_pdev->scan_spcl_vap_configured)
6197 dp_scan_spcl_vap_stats_attach(mon_vdev);
6198
6199 vdev->monitor_vdev = mon_vdev;
6200
6201 return QDF_STATUS_SUCCESS;
6202 }
6203
dp_mon_vdev_detach(struct dp_vdev * vdev)6204 QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
6205 {
6206 struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
6207 struct dp_pdev *pdev = vdev->pdev;
6208 struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc);
6209
6210 if (!mon_ops)
6211 return QDF_STATUS_E_FAILURE;
6212
6213 if (!mon_vdev)
6214 return QDF_STATUS_E_FAILURE;
6215
6216 if (pdev->monitor_pdev->scan_spcl_vap_configured)
6217 dp_scan_spcl_vap_stats_detach(mon_vdev);
6218
6219 qdf_mem_free(mon_vdev);
6220 vdev->monitor_vdev = NULL;
6221 /* set mvdev to NULL only if detach is called for monitor/special vap
6222 */
6223 if (pdev->monitor_pdev->mvdev == vdev)
6224 pdev->monitor_pdev->mvdev = NULL;
6225
6226 if (mon_ops->mon_lite_mon_vdev_delete)
6227 mon_ops->mon_lite_mon_vdev_delete(pdev, vdev);
6228
6229 return QDF_STATUS_SUCCESS;
6230 }
6231
6232 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6233 /**
6234 * dp_mon_peer_attach_notify() - Raise WDI event for peer create
6235 * @peer: DP Peer handle
6236 *
6237 * Return: none
6238 */
6239 static inline
dp_mon_peer_attach_notify(struct dp_peer * peer)6240 void dp_mon_peer_attach_notify(struct dp_peer *peer)
6241 {
6242 struct dp_mon_peer *mon_peer = peer->monitor_peer;
6243 struct dp_pdev *pdev;
6244 struct dp_soc *soc;
6245 struct cdp_peer_cookie peer_cookie;
6246
6247 pdev = peer->vdev->pdev;
6248 soc = pdev->soc;
6249
6250 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6251 QDF_MAC_ADDR_SIZE);
6252
6253 peer_cookie.ctx = NULL;
6254 peer_cookie.pdev_id = pdev->pdev_id;
6255 peer_cookie.cookie = pdev->next_peer_cookie++;
6256
6257 dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
6258 (void *)&peer_cookie,
6259 peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
6260
6261 if (soc->peerstats_enabled) {
6262 if (!peer_cookie.ctx) {
6263 pdev->next_peer_cookie--;
6264 qdf_err("Failed to initialize peer rate stats");
6265 mon_peer->peerstats_ctx = NULL;
6266 } else {
6267 mon_peer->peerstats_ctx =
6268 (struct cdp_peer_rate_stats_ctx *)
6269 peer_cookie.ctx;
6270 }
6271 }
6272 }
6273
6274 /**
6275 * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
6276 * @peer: DP Peer handle
6277 *
6278 * Return: none
6279 */
6280 static inline
dp_mon_peer_detach_notify(struct dp_peer * peer)6281 void dp_mon_peer_detach_notify(struct dp_peer *peer)
6282 {
6283 struct dp_mon_peer *mon_peer = peer->monitor_peer;
6284 struct dp_pdev *pdev;
6285 struct dp_soc *soc;
6286 struct cdp_peer_cookie peer_cookie;
6287
6288 pdev = peer->vdev->pdev;
6289 soc = pdev->soc;
6290 /* send peer destroy event to upper layer */
6291 qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6292 QDF_MAC_ADDR_SIZE);
6293 peer_cookie.ctx = NULL;
6294 peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx;
6295
6296 dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6297 soc,
6298 (void *)&peer_cookie,
6299 peer->peer_id,
6300 WDI_NO_VAL,
6301 pdev->pdev_id);
6302
6303 mon_peer->peerstats_ctx = NULL;
6304 }
6305 #else
6306 static inline
dp_mon_peer_attach_notify(struct dp_peer * peer)6307 void dp_mon_peer_attach_notify(struct dp_peer *peer)
6308 {
6309 peer->monitor_peer->peerstats_ctx = NULL;
6310 }
6311
6312 static inline
dp_mon_peer_detach_notify(struct dp_peer * peer)6313 void dp_mon_peer_detach_notify(struct dp_peer *peer)
6314 {
6315 peer->monitor_peer->peerstats_ctx = NULL;
6316 }
6317 #endif
6318
6319 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
dp_mon_peer_attach(struct dp_peer * peer)6320 QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
6321 {
6322 struct dp_mon_peer *mon_peer;
6323 struct dp_pdev *pdev;
6324
6325 mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
6326 if (!mon_peer) {
6327 dp_mon_err("%pK: MONITOR peer allocation failed", peer);
6328 return QDF_STATUS_E_NOMEM;
6329 }
6330
6331 peer->monitor_peer = mon_peer;
6332 pdev = peer->vdev->pdev;
6333 /*
6334 * In tx_monitor mode, filter may be set for unassociated peer
6335 * when unassociated peer get associated peer need to
6336 * update tx_cap_enabled flag to support peer filter.
6337 */
6338 dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6339
6340 DP_STATS_INIT(mon_peer);
6341 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
6342
6343 dp_mon_peer_attach_notify(peer);
6344
6345 return QDF_STATUS_SUCCESS;
6346 }
6347 #endif
6348
dp_mon_peer_detach(struct dp_peer * peer)6349 QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
6350 {
6351 struct dp_mon_peer *mon_peer = peer->monitor_peer;
6352
6353 if (!mon_peer)
6354 return QDF_STATUS_SUCCESS;
6355
6356 dp_mon_peer_detach_notify(peer);
6357
6358 qdf_mem_free(mon_peer);
6359 peer->monitor_peer = NULL;
6360
6361 return QDF_STATUS_SUCCESS;
6362 }
6363
6364 #ifndef DISABLE_MON_CONFIG
dp_mon_register_intr_ops(struct dp_soc * soc)6365 void dp_mon_register_intr_ops(struct dp_soc *soc)
6366 {
6367 struct dp_mon_ops *mon_ops = NULL;
6368
6369 mon_ops = dp_mon_ops_get(soc);
6370 if (!mon_ops) {
6371 dp_mon_err("Monitor ops is NULL");
6372 return;
6373 }
6374 if (mon_ops->mon_register_intr_ops)
6375 mon_ops->mon_register_intr_ops(soc);
6376 }
6377 #endif
6378
dp_mon_peer_get_peerstats_ctx(struct dp_peer * peer)6379 struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct
6380 dp_peer *peer)
6381 {
6382 struct dp_mon_peer *mon_peer = peer->monitor_peer;
6383
6384 if (mon_peer)
6385 return mon_peer->peerstats_ctx;
6386 else
6387 return NULL;
6388 }
6389
6390 #ifdef QCA_ENHANCED_STATS_SUPPORT
dp_mon_peer_reset_stats(struct dp_peer * peer)6391 void dp_mon_peer_reset_stats(struct dp_peer *peer)
6392 {
6393 struct dp_mon_peer *mon_peer = NULL;
6394
6395 mon_peer = peer->monitor_peer;
6396 if (!mon_peer)
6397 return;
6398
6399 DP_STATS_CLR(mon_peer);
6400 DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
6401 }
6402
dp_mon_peer_get_stats(struct dp_peer * peer,void * arg,enum cdp_stat_update_type type)6403 void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
6404 enum cdp_stat_update_type type)
6405 {
6406 struct dp_mon_peer *mon_peer = peer->monitor_peer;
6407 struct dp_mon_peer_stats *mon_peer_stats;
6408
6409 if (!mon_peer || !arg)
6410 return;
6411
6412 mon_peer_stats = &mon_peer->stats;
6413
6414 switch (type) {
6415 case UPDATE_PEER_STATS:
6416 {
6417 struct cdp_peer_stats *peer_stats =
6418 (struct cdp_peer_stats *)arg;
6419 DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
6420 break;
6421 }
6422 case UPDATE_VDEV_STATS_MLD:
6423 {
6424 struct cdp_vdev_stats *vdev_stats =
6425 (struct cdp_vdev_stats *)arg;
6426 DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
6427 break;
6428 }
6429 case UPDATE_VDEV_STATS:
6430 {
6431 struct dp_vdev_stats *vdev_stats =
6432 (struct dp_vdev_stats *)arg;
6433 DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
6434 break;
6435 }
6436 default:
6437 dp_mon_err("Invalid stats_update_type: %u", type);
6438 }
6439 }
6440
dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev * pdev)6441 void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev)
6442 {
6443 struct dp_mon_peer *mon_peer;
6444 struct dp_mon_peer_stats *mon_peer_stats;
6445 struct cdp_pdev_stats *pdev_stats;
6446
6447 if (!pdev || !pdev->monitor_pdev)
6448 return;
6449
6450 mon_peer = pdev->monitor_pdev->invalid_mon_peer;
6451 if (!mon_peer)
6452 return;
6453
6454 mon_peer_stats = &mon_peer->stats;
6455 pdev_stats = &pdev->stats;
6456 DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats);
6457 }
6458
6459 QDF_STATUS
dp_mon_peer_get_stats_param(struct dp_peer * peer,enum cdp_peer_stats_type type,cdp_peer_stats_param_t * buf)6460 dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type,
6461 cdp_peer_stats_param_t *buf)
6462 {
6463 QDF_STATUS ret = QDF_STATUS_SUCCESS;
6464 struct dp_mon_peer *mon_peer;
6465
6466 mon_peer = peer->monitor_peer;
6467 if (!mon_peer)
6468 return QDF_STATUS_E_FAILURE;
6469
6470 switch (type) {
6471 case cdp_peer_tx_rate:
6472 buf->tx_rate = mon_peer->stats.tx.tx_rate;
6473 break;
6474 case cdp_peer_tx_last_tx_rate:
6475 buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate;
6476 break;
6477 case cdp_peer_tx_ratecode:
6478 buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode;
6479 break;
6480 case cdp_peer_rx_rate:
6481 buf->rx_rate = mon_peer->stats.rx.rx_rate;
6482 break;
6483 case cdp_peer_rx_last_rx_rate:
6484 buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate;
6485 break;
6486 case cdp_peer_rx_ratecode:
6487 buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode;
6488 break;
6489 case cdp_peer_rx_avg_snr:
6490 buf->rx_avg_snr = mon_peer->stats.rx.avg_snr;
6491 break;
6492 case cdp_peer_rx_snr:
6493 buf->rx_snr = mon_peer->stats.rx.snr;
6494 break;
6495 case cdp_peer_rx_avg_rate:
6496 buf->rx_rate_avg = mon_peer->stats.rx.rnd_avg_rx_rate;
6497 break;
6498 case cdp_peer_tx_avg_rate:
6499 buf->tx_rate_avg = mon_peer->stats.tx.rnd_avg_tx_rate;
6500 break;
6501 default:
6502 dp_err("Invalid stats type: %u requested", type);
6503 ret = QDF_STATUS_E_FAILURE;
6504 }
6505
6506 return ret;
6507 }
6508 #endif
6509
dp_mon_ops_register(struct dp_soc * soc)6510 void dp_mon_ops_register(struct dp_soc *soc)
6511 {
6512 struct dp_mon_soc *mon_soc = soc->monitor_soc;
6513 uint32_t target_type;
6514
6515 target_type = hal_get_target_type(soc->hal_soc);
6516 switch (target_type) {
6517 case TARGET_TYPE_QCA6290:
6518 case TARGET_TYPE_QCA6390:
6519 case TARGET_TYPE_QCA6490:
6520 case TARGET_TYPE_QCA6750:
6521 case TARGET_TYPE_KIWI:
6522 case TARGET_TYPE_MANGO:
6523 case TARGET_TYPE_PEACH:
6524 case TARGET_TYPE_QCA8074:
6525 case TARGET_TYPE_QCA8074V2:
6526 case TARGET_TYPE_QCA6018:
6527 case TARGET_TYPE_QCA9574:
6528 case TARGET_TYPE_QCN9160:
6529 case TARGET_TYPE_QCN9000:
6530 case TARGET_TYPE_QCA5018:
6531 case TARGET_TYPE_QCN6122:
6532 case TARGET_TYPE_WCN6450:
6533 dp_mon_ops_register_1_0(mon_soc);
6534 dp_mon_ops_register_cmn_2_0(mon_soc);
6535 dp_mon_ops_register_tx_2_0(mon_soc);
6536 break;
6537 case TARGET_TYPE_QCN9224:
6538 case TARGET_TYPE_QCA5332:
6539 case TARGET_TYPE_QCN6432:
6540 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || defined(WLAN_PKT_CAPTURE_RX_2_0)
6541 dp_mon_ops_register_2_0(mon_soc);
6542 #endif
6543 break;
6544 default:
6545 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
6546 qdf_assert_always(0);
6547 break;
6548 }
6549 }
6550
6551 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
dp_mon_ops_free(struct dp_soc * soc)6552 void dp_mon_ops_free(struct dp_soc *soc)
6553 {
6554 struct cdp_ops *ops = soc->cdp_soc.ops;
6555 struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops;
6556 struct dp_mon_soc *mon_soc = soc->monitor_soc;
6557 struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
6558
6559 if (cdp_mon_ops)
6560 qdf_mem_free(cdp_mon_ops);
6561
6562 if (mon_ops)
6563 qdf_mem_free(mon_ops);
6564 }
6565 #else
dp_mon_ops_free(struct dp_soc * soc)6566 void dp_mon_ops_free(struct dp_soc *soc)
6567 {
6568 }
6569 #endif
6570
dp_mon_cdp_ops_register(struct dp_soc * soc)6571 void dp_mon_cdp_ops_register(struct dp_soc *soc)
6572 {
6573 struct cdp_ops *ops = soc->cdp_soc.ops;
6574 uint32_t target_type;
6575
6576 if (!ops) {
6577 dp_mon_err("cdp_ops is NULL");
6578 return;
6579 }
6580
6581 target_type = hal_get_target_type(soc->hal_soc);
6582 switch (target_type) {
6583 case TARGET_TYPE_QCA6290:
6584 case TARGET_TYPE_QCA6390:
6585 case TARGET_TYPE_QCA6490:
6586 case TARGET_TYPE_QCA6750:
6587 case TARGET_TYPE_KIWI:
6588 case TARGET_TYPE_MANGO:
6589 case TARGET_TYPE_PEACH:
6590 case TARGET_TYPE_QCA8074:
6591 case TARGET_TYPE_QCA8074V2:
6592 case TARGET_TYPE_QCA6018:
6593 case TARGET_TYPE_QCA9574:
6594 case TARGET_TYPE_QCN9160:
6595 case TARGET_TYPE_QCN9000:
6596 case TARGET_TYPE_QCA5018:
6597 case TARGET_TYPE_QCN6122:
6598 case TARGET_TYPE_WCN6450:
6599 dp_mon_cdp_ops_register_1_0(ops);
6600 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6601 dp_cfr_filter_register_1_0(ops);
6602 #endif
6603 if (target_type == TARGET_TYPE_QCN9000 ||
6604 target_type == TARGET_TYPE_QCN9160)
6605 ops->mon_ops->txrx_update_mon_mac_filter =
6606 dp_update_mon_mac_filter;
6607 break;
6608 case TARGET_TYPE_QCN9224:
6609 case TARGET_TYPE_QCA5332:
6610 case TARGET_TYPE_QCN6432:
6611 #if defined(WLAN_PKT_CAPTURE_TX_2_0) || defined(WLAN_PKT_CAPTURE_RX_2_0)
6612 dp_mon_cdp_ops_register_2_0(ops);
6613 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
6614 dp_cfr_filter_register_2_0(ops);
6615 #endif
6616 #endif /* WLAN_PKT_CAPTURE_TX_2_0 && WLAN_PKT_CAPTURE_RX_2_0 */
6617 break;
6618 default:
6619 dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
6620 qdf_assert_always(0);
6621 break;
6622 }
6623
6624 ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
6625 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
6626 dp_get_mon_vdev_from_pdev_wifi3;
6627 #ifdef DP_PEER_EXTENDED_API
6628 ops->misc_ops->pkt_log_init = dp_pkt_log_init;
6629 ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
6630 ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
6631 #endif
6632 ops->ctrl_ops->enable_peer_based_pktlog =
6633 dp_enable_peer_based_pktlog;
6634 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
6635 ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
6636 dp_peer_update_pkt_capture_params;
6637 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
6638 #ifdef WDI_EVENT_ENABLE
6639 ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
6640 #endif
6641 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
6642 ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
6643 dp_get_scan_spcl_vap_stats;
6644 #endif
6645 return;
6646 }
6647
6648 #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
6649 static inline void
dp_mon_cdp_mon_ops_deregister(struct cdp_ops * ops)6650 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
6651 {
6652 if (ops->mon_ops) {
6653 qdf_mem_free(ops->mon_ops);
6654 ops->mon_ops = NULL;
6655 }
6656 }
6657 #else
6658 static inline void
dp_mon_cdp_mon_ops_deregister(struct cdp_ops * ops)6659 dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
6660 {
6661 ops->mon_ops = NULL;
6662 }
6663 #endif
6664
dp_mon_cdp_ops_deregister(struct dp_soc * soc)6665 void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
6666 {
6667 struct cdp_ops *ops = soc->cdp_soc.ops;
6668
6669 if (!ops) {
6670 dp_mon_err("cdp_ops is NULL");
6671 return;
6672 }
6673
6674 dp_mon_cdp_mon_ops_deregister(ops);
6675
6676 ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
6677 ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
6678 #ifdef DP_PEER_EXTENDED_API
6679 ops->misc_ops->pkt_log_init = NULL;
6680 ops->misc_ops->pkt_log_con_service = NULL;
6681 ops->misc_ops->pkt_log_exit = NULL;
6682 #endif
6683 ops->ctrl_ops->enable_peer_based_pktlog = NULL;
6684 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
6685 ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
6686 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
6687 #ifdef WDI_EVENT_ENABLE
6688 ops->ctrl_ops->txrx_get_pldev = NULL;
6689 #endif
6690 return;
6691 }
6692
6693 #if defined(WDI_EVENT_ENABLE) &&\
6694 (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
6695 static inline
dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc * mon_soc)6696 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
6697 {
6698 mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL;
6699 }
6700 #else
6701 static inline
dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc * mon_soc)6702 void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
6703 {
6704 }
6705 #endif
6706
6707 #ifdef QCA_RSSI_DB2DBM
6708 /**
6709 * dp_mon_compute_min_nf() - calculate the min nf value in the
6710 * active chains 20 MHz subbands.
6711 * @conv_params: cdp_rssi_dbm_conv_param_dp structure value
6712 * @min_nf: location to store min NF value
6713 * @chain_idx: active chain index in nfHwdbm array
6714 *
6715 * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][])
6716 * considering row index as active chains and column
6717 * index as 20MHZ subbands per chain.
6718 * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index)
6719 * BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to
6720 * consider 0,1 index calculate min_nf value)
6721 *
6722 * Return: QDF_STATUS_SUCCESS if value set successfully
6723 * QDF_STATUS_E_INVAL false if error
6724 */
6725 static QDF_STATUS
dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp * conv_params,int8_t * min_nf,int chain_idx)6726 dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params,
6727 int8_t *min_nf, int chain_idx)
6728 {
6729 int j;
6730 *min_nf = conv_params->nf_hw_dbm[chain_idx][0];
6731
6732 switch (conv_params->curr_bw) {
6733 case CHAN_WIDTH_20:
6734 case CHAN_WIDTH_5:
6735 case CHAN_WIDTH_10:
6736 break;
6737 case CHAN_WIDTH_40:
6738 for (j = 1; j < SUB40BW; j++) {
6739 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6740 *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6741 }
6742 break;
6743 case CHAN_WIDTH_80:
6744 for (j = 1; j < SUB80BW; j++) {
6745 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6746 *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6747 }
6748 break;
6749 case CHAN_WIDTH_160:
6750 case CHAN_WIDTH_80P80:
6751 case CHAN_WIDTH_165:
6752 for (j = 1; j < SUB160BW; j++) {
6753 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6754 *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6755 }
6756 break;
6757 case CHAN_WIDTH_160P160:
6758 case CHAN_WIDTH_320:
6759 for (j = 1; j < SUB320BW; j++) {
6760 if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
6761 *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
6762 }
6763 break;
6764 default:
6765 dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw);
6766 return QDF_STATUS_E_INVAL;
6767 }
6768 return QDF_STATUS_SUCCESS;
6769 }
6770
6771 /**
6772 * dp_mon_pdev_params_rssi_dbm_conv() - to set rssi in dbm conversion
6773 * params into monitor pdev.
6774 * @cdp_soc: dp soc handle.
6775 * @params: cdp_rssi_db2dbm_param_dp structure value.
6776 *
6777 * Return: QDF_STATUS_SUCCESS if value set successfully
6778 * QDF_STATUS_E_INVAL false if error
6779 */
6780 QDF_STATUS
dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t * cdp_soc,struct cdp_rssi_db2dbm_param_dp * params)6781 dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc,
6782 struct cdp_rssi_db2dbm_param_dp *params)
6783 {
6784 struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params;
6785 uint8_t pdev_id = params->pdev_id;
6786 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6787 struct dp_pdev *pdev =
6788 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6789 struct dp_mon_pdev *mon_pdev;
6790 struct cdp_rssi_temp_off_param_dp temp_off_param;
6791 struct cdp_rssi_dbm_conv_param_dp conv_params;
6792 int8_t min_nf = 0;
6793 int i;
6794
6795 if (!soc->features.rssi_dbm_conv_support) {
6796 dp_cdp_err("rssi dbm conversion support is false");
6797 return QDF_STATUS_E_INVAL;
6798 }
6799 if (!pdev || !pdev->monitor_pdev) {
6800 dp_cdp_err("Invalid pdev_id %u", pdev_id);
6801 return QDF_STATUS_E_FAILURE;
6802 }
6803
6804 mon_pdev = pdev->monitor_pdev;
6805 mon_pdev->rssi_dbm_conv_support =
6806 soc->features.rssi_dbm_conv_support;
6807
6808 if (dp_rssi_params->rssi_temp_off_present) {
6809 temp_off_param = dp_rssi_params->temp_off_param;
6810 mon_pdev->rssi_offsets.rssi_temp_offset =
6811 temp_off_param.rssi_temp_offset;
6812 }
6813 if (dp_rssi_params->rssi_dbm_info_present) {
6814 conv_params = dp_rssi_params->rssi_dbm_param;
6815 for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) {
6816 if (conv_params.curr_rx_chainmask & (0x01 << i)) {
6817 if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf
6818 (&conv_params, &min_nf, i))
6819 return QDF_STATUS_E_INVAL;
6820 } else {
6821 continue;
6822 }
6823 }
6824 mon_pdev->rssi_offsets.xlna_bypass_offset =
6825 conv_params.xlna_bypass_offset;
6826 mon_pdev->rssi_offsets.xlna_bypass_threshold =
6827 conv_params.xlna_bypass_threshold;
6828 mon_pdev->rssi_offsets.xbar_config = conv_params.xbar_config;
6829 mon_pdev->rssi_offsets.min_nf_dbm = min_nf;
6830 mon_pdev->rssi_offsets.rssi_offset =
6831 mon_pdev->rssi_offsets.min_nf_dbm +
6832 mon_pdev->rssi_offsets.rssi_temp_offset;
6833 }
6834 return QDF_STATUS_SUCCESS;
6835 }
6836 #endif
6837
dp_mon_intr_ops_deregister(struct dp_soc * soc)6838 void dp_mon_intr_ops_deregister(struct dp_soc *soc)
6839 {
6840 struct dp_mon_soc *mon_soc = soc->monitor_soc;
6841
6842 mon_soc->mon_rx_process = NULL;
6843 dp_mon_ppdu_stats_handler_deregister(mon_soc);
6844 }
6845
dp_mon_feature_ops_deregister(struct dp_soc * soc)6846 void dp_mon_feature_ops_deregister(struct dp_soc *soc)
6847 {
6848 struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
6849
6850 if (!mon_ops) {
6851 dp_err("mon_ops is NULL");
6852 return;
6853 }
6854
6855 mon_ops->mon_config_debug_sniffer = NULL;
6856 mon_ops->mon_peer_tx_init = NULL;
6857 mon_ops->mon_peer_tx_cleanup = NULL;
6858 mon_ops->mon_htt_ppdu_stats_attach = NULL;
6859 mon_ops->mon_htt_ppdu_stats_detach = NULL;
6860 mon_ops->mon_print_pdev_rx_mon_stats = NULL;
6861 mon_ops->mon_set_bsscolor = NULL;
6862 mon_ops->mon_pdev_get_filter_ucast_data = NULL;
6863 mon_ops->mon_pdev_get_filter_mcast_data = NULL;
6864 mon_ops->mon_pdev_get_filter_non_data = NULL;
6865 mon_ops->mon_neighbour_peer_add_ast = NULL;
6866 #ifdef WLAN_TX_PKT_CAPTURE_ENH
6867 mon_ops->mon_peer_tid_peer_id_update = NULL;
6868 mon_ops->mon_tx_ppdu_stats_attach = NULL;
6869 mon_ops->mon_tx_ppdu_stats_detach = NULL;
6870 mon_ops->mon_tx_capture_debugfs_init = NULL;
6871 mon_ops->mon_tx_add_to_comp_queue = NULL;
6872 mon_ops->mon_peer_tx_capture_filter_check = NULL;
6873 mon_ops->mon_print_pdev_tx_capture_stats = NULL;
6874 mon_ops->mon_config_enh_tx_capture = NULL;
6875 #endif
6876 #ifdef WLAN_RX_PKT_CAPTURE_ENH
6877 mon_ops->mon_config_enh_rx_capture = NULL;
6878 #endif
6879 #ifdef QCA_SUPPORT_BPR
6880 mon_ops->mon_set_bpr_enable = NULL;
6881 #endif
6882 #ifdef ATH_SUPPORT_NAC
6883 mon_ops->mon_set_filter_neigh_peers = NULL;
6884 #endif
6885 #ifdef WLAN_ATF_ENABLE
6886 mon_ops->mon_set_atf_stats_enable = NULL;
6887 #endif
6888 #ifdef FEATURE_NAC_RSSI
6889 mon_ops->mon_filter_neighbour_peer = NULL;
6890 #endif
6891 #ifdef QCA_MCOPY_SUPPORT
6892 mon_ops->mon_filter_setup_mcopy_mode = NULL;
6893 mon_ops->mon_filter_reset_mcopy_mode = NULL;
6894 mon_ops->mon_mcopy_check_deliver = NULL;
6895 #endif
6896 #ifdef QCA_ENHANCED_STATS_SUPPORT
6897 mon_ops->mon_filter_setup_enhanced_stats = NULL;
6898 mon_ops->mon_tx_enable_enhanced_stats = NULL;
6899 mon_ops->mon_tx_disable_enhanced_stats = NULL;
6900 mon_ops->mon_ppdu_desc_deliver = NULL;
6901 mon_ops->mon_ppdu_desc_notify = NULL;
6902 mon_ops->mon_ppdu_stats_feat_enable_check = NULL;
6903 #ifdef WLAN_FEATURE_11BE
6904 mon_ops->mon_tx_stats_update = NULL;
6905 #endif
6906 #endif
6907 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6908 mon_ops->mon_filter_setup_smart_monitor = NULL;
6909 #endif
6910 mon_ops->mon_filter_set_reset_mon_mac_filter = NULL;
6911 #ifdef WLAN_RX_PKT_CAPTURE_ENH
6912 mon_ops->mon_filter_setup_rx_enh_capture = NULL;
6913 #endif
6914 #ifdef WDI_EVENT_ENABLE
6915 mon_ops->mon_set_pktlog_wifi3 = NULL;
6916 mon_ops->mon_filter_setup_rx_pkt_log_full = NULL;
6917 mon_ops->mon_filter_reset_rx_pkt_log_full = NULL;
6918 mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL;
6919 mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL;
6920 mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL;
6921 mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL;
6922 #ifdef BE_PKTLOG_SUPPORT
6923 mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
6924 mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
6925 #endif
6926 #endif
6927 #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
6928 mon_ops->mon_pktlogmod_exit = NULL;
6929 #endif
6930 mon_ops->rx_hdr_length_set = NULL;
6931 mon_ops->rx_packet_length_set = NULL;
6932 mon_ops->rx_wmask_subscribe = NULL;
6933 mon_ops->rx_pkt_tlv_offset = NULL;
6934 mon_ops->rx_enable_mpdu_logging = NULL;
6935 mon_ops->rx_enable_fpmo = NULL;
6936 mon_ops->mon_neighbour_peers_detach = NULL;
6937 mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL;
6938 mon_ops->mon_vdev_set_monitor_mode_rings = NULL;
6939 #ifdef QCA_ENHANCED_STATS_SUPPORT
6940 mon_ops->mon_rx_stats_update = NULL;
6941 mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
6942 mon_ops->mon_rx_populate_ppdu_info = NULL;
6943 #endif
6944 }
6945
dp_mon_soc_attach(struct dp_soc * soc)6946 QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
6947 {
6948 struct dp_mon_soc *mon_soc;
6949 qdf_size_t soc_context_size;
6950
6951 if (!soc) {
6952 dp_mon_err("dp_soc is NULL");
6953 return QDF_STATUS_E_FAILURE;
6954 }
6955
6956 if (soc->arch_ops.txrx_get_mon_context_size) {
6957 soc_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_SOC);
6958 mon_soc = dp_context_alloc_mem(soc, DP_MON_SOC_TYPE,
6959 soc_context_size);
6960 } else {
6961 mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
6962 }
6963 if (!mon_soc) {
6964 dp_mon_err("%pK: mem allocation failed", soc);
6965 return QDF_STATUS_E_NOMEM;
6966 }
6967 /* register monitor ops */
6968 soc->monitor_soc = mon_soc;
6969 dp_mon_ops_register(soc);
6970 dp_mon_register_intr_ops(soc);
6971
6972 dp_mon_cdp_ops_register(soc);
6973 dp_monitor_soc_attach(soc);
6974 dp_mon_register_feature_ops(soc);
6975 return QDF_STATUS_SUCCESS;
6976 }
6977
dp_mon_soc_detach(struct dp_soc * soc)6978 QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
6979 {
6980 struct dp_mon_soc *mon_soc;
6981
6982 if (!soc) {
6983 dp_mon_err("dp_soc is NULL");
6984 return QDF_STATUS_E_FAILURE;
6985 }
6986
6987 mon_soc = soc->monitor_soc;
6988 dp_monitor_vdev_timer_deinit(soc);
6989 dp_mon_cdp_ops_deregister(soc);
6990 dp_monitor_soc_detach(soc);
6991 soc->monitor_soc = NULL;
6992 qdf_mem_free(mon_soc);
6993 return QDF_STATUS_SUCCESS;
6994 }
6995
6996 #ifdef QCA_SUPPORT_FULL_MON
print_ring_tracker_stats(struct dp_mon_pdev * mon_pdev,uint8_t target)6997 static void print_ring_tracker_stats(struct dp_mon_pdev *mon_pdev,
6998 uint8_t target)
6999 {
7000 struct dp_ring_ppdu_id_tracker *tracker;
7001 uint8_t i;
7002
7003 if (target)
7004 tracker = mon_pdev->hist_ppdu_id_mon_s;
7005 else
7006 tracker = mon_pdev->hist_ppdu_id_mon_d;
7007
7008 for (i = 0; i < DP_HIST_TRACK_SIZE; i++) {
7009 qdf_print("idx: %d dest_ppdu_id: %d dest_time: %lld d_hp: %d ",
7010 i, tracker[i].ppdu_id_mon_dest,
7011 tracker[i].time_ppdu_id_mon_dest,
7012 tracker[i].dest_hp);
7013 qdf_print("d_tp: %d d_hw_hp: %d d_hw_tp: %d status_ppdu_id: %d",
7014 tracker[i].dest_tp,
7015 tracker[i].dest_hw_hp,
7016 tracker[i].dest_hw_tp,
7017 tracker[i].ppdu_id_mon_status);
7018 qdf_print(" status_time: %lld s_hp: %d s_tp: %d s_hw_hp: %d ",
7019 tracker[i].time_ppdu_id_mon_status,
7020 tracker[i].status_hp,
7021 tracker[i].status_tp,
7022 tracker[i].status_hw_hp);
7023 qdf_print("s_hw_tp: %d\n",
7024 tracker[i].status_hw_tp);
7025 }
7026 }
7027 #else
print_ring_tracker_stats(struct dp_mon_pdev * mon_pdev,uint8_t target)7028 static void print_ring_tracker_stats(struct dp_mon_pdev *mon_pdev,
7029 uint8_t target)
7030 {
7031 }
7032 #endif
7033
7034 void
dp_check_and_dump_full_mon_info(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int war)7035 dp_check_and_dump_full_mon_info(struct dp_soc *soc, struct dp_pdev *pdev,
7036 int mac_id, int war)
7037 {
7038 struct dp_mon_soc *mon_soc = soc->monitor_soc;
7039 struct dp_mon_pdev *mon_pdev;
7040 hal_soc_handle_t hal_soc;
7041 uint64_t buf_addr;
7042 void *mon_status_srng;
7043 void *rxdma_mon_status_ring_entry;
7044 struct hal_buf_info hbi;
7045 hal_ring_handle_t mon_dest_srng;
7046 void *ring_desc;
7047 struct hal_rx_mon_desc_info desc_info = {0};
7048 struct dp_rx_desc *rx_desc;
7049 uint64_t ppdu_id = 0;
7050
7051 if (!mon_soc) {
7052 dp_err("Monitor soc is NULL\n");
7053 return;
7054 }
7055
7056 if (!mon_soc->full_mon_mode) {
7057 dp_err("Full monitor mode is disable\n");
7058 return;
7059 }
7060
7061 /**
7062 * As rx_mon_ring_mask is set but workdone is 0
7063 * there is a more chance backpressure can happen.
7064 * dump the content of rx monitor status and destination ring
7065 * and move to next pointers.
7066 */
7067 mon_pdev = pdev->monitor_pdev;
7068 if (!mon_pdev) {
7069 dp_err("mon_pdev is NULL\n");
7070 return;
7071 }
7072
7073 hal_soc = soc->hal_soc;
7074
7075 if (!war)
7076 qdf_spin_lock_bh(&mon_pdev->mon_lock);
7077
7078 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
7079 if (!mon_status_srng)
7080 goto unlock_monitor;
7081
7082 dp_print_ring_stat_from_hal(soc, &soc->rxdma_mon_status_ring[mac_id],
7083 RXDMA_MONITOR_STATUS);
7084 rxdma_mon_status_ring_entry =
7085 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
7086
7087 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7088 "hold_mon_dest_ring: %d\n", mon_pdev->hold_mon_dest_ring);
7089 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7090 "mon_pdev last_ppdu_id: %d\n", mon_pdev->last_ppdu_id);
7091 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7092 "soc: %d\n", hal_get_target_type(hal_soc));
7093
7094 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7095 "reap_status:\n");
7096 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7097 "\t DP_MON_STATUS_NO_DMA : %lld\n",
7098 mon_pdev->reap_status[DP_MON_STATUS_NO_DMA]);
7099 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7100 "\t DP_MON_STATUS_MATCH : %lld\n",
7101 mon_pdev->reap_status[DP_MON_STATUS_MATCH]);
7102 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7103 "\t DP_MON_STATUS_LAG : %lld\n",
7104 mon_pdev->reap_status[DP_MON_STATUS_LAG]);
7105 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7106 "\t DP_MON_STATUS_LEAD : %lld\n",
7107 mon_pdev->reap_status[DP_MON_STATUS_LEAD]);
7108 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7109 "\t DP_MON_STATUS_REPLENISH : %lld\n",
7110 mon_pdev->reap_status[DP_MON_STATUS_REPLENISH]);
7111
7112 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7113 "prev_status:\n");
7114 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7115 "\t DP_MON_STATUS_NO_DMA : %lld\n",
7116 mon_pdev->prev_status[DP_MON_STATUS_NO_DMA]);
7117 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7118 "\t DP_MON_STATUS_MATCH : %lld\n",
7119 mon_pdev->prev_status[DP_MON_STATUS_MATCH]);
7120 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7121 "\t DP_MON_STATUS_LAG : %lld\n",
7122 mon_pdev->prev_status[DP_MON_STATUS_LAG]);
7123 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7124 "\t DP_MON_STATUS_LEAD : %lld\n",
7125 mon_pdev->prev_status[DP_MON_STATUS_LEAD]);
7126 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7127 "\t DP_MON_STATUS_REPLENISH : %lld\n",
7128 mon_pdev->prev_status[DP_MON_STATUS_REPLENISH]);
7129
7130 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7131 "match_stats:\n");
7132 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7133 "\t DP_MON_STATUS_LAG : %lld\n",
7134 mon_pdev->status_match[DP_MON_STATUS_LAG]);
7135 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7136 "\t DP_MON_STATUS_LEAD : %lld\n",
7137 mon_pdev->status_match[DP_MON_STATUS_LEAD]);
7138
7139 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7140 "mismatch: %d\n",
7141 mon_pdev->rx_mon_stats.ppdu_id_mismatch);
7142 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7143 "status_ppdu_drop: %d\n",
7144 mon_pdev->rx_mon_stats.status_ppdu_drop);
7145 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7146 "dest_ppdu_drop: %d\n",
7147 mon_pdev->rx_mon_stats.dest_ppdu_drop);
7148 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7149 "tlv_tag_status_err: %d\n",
7150 mon_pdev->rx_mon_stats.tlv_tag_status_err);
7151 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7152 "status_buf_done_war: %d\n",
7153 mon_pdev->rx_mon_stats.status_buf_done_war);
7154
7155 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7156 "soc[%pK] pdev[%pK] mac_id[%d]\n",
7157 soc, pdev, mac_id);
7158
7159 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7160 "MON DEST TRACKER STATS:\n");
7161 print_ring_tracker_stats(mon_pdev, 0);
7162
7163 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7164 "MON STA TRACKER STATS:\n");
7165 print_ring_tracker_stats(mon_pdev, 1);
7166
7167 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7168 "rxdma_mon_status_ring:\n");
7169 if (!rxdma_mon_status_ring_entry) {
7170 dp_err("rxdma_mon_status_ring_entry NULL\n");
7171 goto dump_mon_destination_ring;
7172 }
7173
7174 buf_addr =
7175 (HAL_RX_BUFFER_ADDR_31_0_GET(rxdma_mon_status_ring_entry) |
7176 ((uint64_t)
7177 (HAL_RX_BUFFER_ADDR_39_32_GET(rxdma_mon_status_ring_entry))
7178 << 32));
7179 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7180 "Buffer address : %llx\n", buf_addr);
7181 if (!buf_addr)
7182 goto dump_mon_destination_ring;
7183
7184 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
7185 (uint32_t *)rxdma_mon_status_ring_entry,
7186 &hbi);
7187
7188 print_hex_dump(KERN_ERR, "\tHAL_BUF_INFO: ", DUMP_PREFIX_NONE, 32, 4,
7189 &hbi, sizeof(struct hal_buf_info), false);
7190
7191 rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie);
7192 if (!rx_desc) {
7193 dp_err("rx_desc is NULL\n");
7194 goto dump_mon_destination_ring;
7195 }
7196
7197 print_hex_dump(KERN_ERR, "\tRX_DESC: ", DUMP_PREFIX_NONE, 32, 4,
7198 rx_desc, sizeof(struct dp_rx_desc), false);
7199
7200 dump_mon_destination_ring:
7201
7202 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7203 "rxdma_mon_destination_ring:\n");
7204 mon_dest_srng = pdev->soc->rxdma_mon_dst_ring[mac_id].hal_srng;
7205
7206 if (!mon_dest_srng) {
7207 dp_err("rxdma_mon_dst_ring hal_srng is NULL\n");
7208 goto unlock_monitor;
7209 }
7210
7211 dp_print_ring_stat_from_hal(soc, &soc->rxdma_mon_dst_ring[mac_id],
7212 RXDMA_MONITOR_DST);
7213
7214 ring_desc = hal_srng_dst_peek(hal_soc, mon_dest_srng);
7215 if (!ring_desc)
7216 goto unlock_monitor;
7217
7218 ppdu_id = hal_rx_hw_desc_get_ppduid_get(hal_soc, NULL, ring_desc);
7219 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7220 "Next dest ring ppdu id: %lld\n", ppdu_id);
7221 hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
7222 ring_desc, &desc_info);
7223 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7224 "Next desc_info ppdu_id: %d\n", desc_info.ppdu_id);
7225
7226 print_hex_dump(KERN_ERR, "\tDESC_INFO: ", DUMP_PREFIX_NONE, 32, 4,
7227 &desc_info, sizeof(struct hal_rx_mon_desc_info), false);
7228
7229 unlock_monitor:
7230 if (!war)
7231 qdf_spin_unlock_bh(&mon_pdev->mon_lock);
7232 }
7233