1 /*
2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #include "hal_hw_headers.h"
18 #include "dp_types.h"
19 #include "dp_rx.h"
20 #include "dp_peer.h"
21 #include "hal_rx.h"
22 #include "hal_api.h"
23 #include "qdf_trace.h"
24 #include "qdf_nbuf.h"
25 #include "hal_api_mon.h"
26 #include "dp_internal.h"
27 #include "qdf_mem.h" /* qdf_mem_malloc,free */
28 #include "dp_htt.h"
29 #include "dp_mon.h"
30 #include "dp_rx_mon.h"
31 #include "htt.h"
32 #include <dp_mon_1.0.h>
33 #include <dp_rx_mon_1.0.h>
34
35 #ifdef FEATURE_PERPKT_INFO
36 #include "dp_ratetable.h"
37 #endif
38
39 static inline
40 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
41 uint32_t mac_id,
42 struct dp_srng *dp_rxdma_srng,
43 struct rx_desc_pool *rx_desc_pool,
44 uint32_t num_req_buffers,
45 union dp_rx_desc_list_elem_t **desc_list,
46 union dp_rx_desc_list_elem_t **tail,
47 uint8_t owner);
48
49 /**
50 * dp_rx_mon_handle_status_buf_done() - Handle status buf DMA not done
51 *
52 * @pdev: DP pdev handle
53 * @mon_status_srng: Monitor status SRNG
54 *
55 * As per MAC team's suggestion, If HP + 2 entry's DMA done is set,
56 * skip HP + 1 entry and start processing in next interrupt.
57 * If HP + 2 entry's DMA done is not set, poll onto HP + 1 entry
58 * for it's DMA done TLV to be set.
59 *
60 * Return: enum dp_mon_reap_status
61 */
62 enum dp_mon_reap_status
dp_rx_mon_handle_status_buf_done(struct dp_pdev * pdev,void * mon_status_srng)63 dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev,
64 void *mon_status_srng)
65 {
66 struct dp_soc *soc = pdev->soc;
67 hal_soc_handle_t hal_soc;
68 void *ring_entry;
69 struct hal_buf_info hbi;
70 qdf_nbuf_t status_nbuf;
71 struct dp_rx_desc *rx_desc;
72 void *rx_tlv;
73 QDF_STATUS buf_status;
74 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
75
76 hal_soc = soc->hal_soc;
77
78 ring_entry = hal_srng_src_peek_n_get_next_next(hal_soc,
79 mon_status_srng);
80 if (!ring_entry) {
81 dp_rx_mon_status_debug("%pK: Monitor status ring entry is NULL for SRNG: %pK",
82 soc, mon_status_srng);
83 return DP_MON_STATUS_NO_DMA;
84 }
85
86 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_entry,
87 &hbi);
88 rx_desc = dp_rx_cookie_2_va_mon_status(soc, hbi.sw_cookie);
89
90 qdf_assert_always(rx_desc);
91
92 status_nbuf = rx_desc->nbuf;
93
94 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
95 QDF_DMA_FROM_DEVICE);
96
97 rx_tlv = qdf_nbuf_data(status_nbuf);
98 buf_status = hal_get_rx_status_done(rx_tlv);
99
100 /* If status buffer DMA is not done,
101 * 1. As per MAC team's suggestion, If HP + 2 entry's DMA done is set,
102 * replenish HP + 1 entry and start processing in next interrupt.
103 * 2. If HP + 2 entry's DMA done is not set
104 * hold on to mon destination ring.
105 */
106 if (buf_status != QDF_STATUS_SUCCESS) {
107 dp_err_rl("Monitor status ring: DMA is not done "
108 "for nbuf: %pK", status_nbuf);
109 mon_pdev->rx_mon_stats.tlv_tag_status_err++;
110 return DP_MON_STATUS_REPLENISH;
111 }
112
113 mon_pdev->rx_mon_stats.status_buf_done_war++;
114
115 return DP_MON_STATUS_REPLENISH;
116 }
117
118 #ifdef WLAN_RX_PKT_CAPTURE_ENH
119 #include "dp_rx_mon_feature.h"
120 #else
121 static QDF_STATUS
dp_rx_handle_enh_capture(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)122 dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
123 struct hal_rx_ppdu_info *ppdu_info)
124 {
125 return QDF_STATUS_SUCCESS;
126 }
127
128 static void
dp_rx_mon_enh_capture_process(struct dp_pdev * pdev,uint32_t tlv_status,qdf_nbuf_t status_nbuf,struct hal_rx_ppdu_info * ppdu_info,bool * nbuf_used)129 dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
130 qdf_nbuf_t status_nbuf,
131 struct hal_rx_ppdu_info *ppdu_info,
132 bool *nbuf_used)
133 {
134 }
135 #endif
136
137 #ifdef WLAN_TX_PKT_CAPTURE_ENH
138 #include "dp_rx_mon_feature.h"
139 #else
140 static QDF_STATUS
dp_send_ack_frame_to_stack(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)141 dp_send_ack_frame_to_stack(struct dp_soc *soc,
142 struct dp_pdev *pdev,
143 struct hal_rx_ppdu_info *ppdu_info)
144 {
145 return QDF_STATUS_SUCCESS;
146 }
147 #endif
148
149 #if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M)
150 static inline void
dp_rx_ul_ofdma_ru_size_to_width(uint32_t ru_size,uint32_t * ru_width)151 dp_rx_ul_ofdma_ru_size_to_width(
152 uint32_t ru_size,
153 uint32_t *ru_width)
154 {
155 uint32_t width;
156
157 width = 0;
158 switch (ru_size) {
159 case HTT_UL_OFDMA_V0_RU_SIZE_RU_26:
160 width = 1;
161 break;
162 case HTT_UL_OFDMA_V0_RU_SIZE_RU_52:
163 width = 2;
164 break;
165 case HTT_UL_OFDMA_V0_RU_SIZE_RU_106:
166 width = 4;
167 break;
168 case HTT_UL_OFDMA_V0_RU_SIZE_RU_242:
169 width = 9;
170 break;
171 case HTT_UL_OFDMA_V0_RU_SIZE_RU_484:
172 width = 18;
173 break;
174 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996:
175 width = 37;
176 break;
177 case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2:
178 width = 74;
179 break;
180 default:
181 dp_rx_mon_status_err("RU size to width convert err");
182 break;
183 }
184 *ru_width = width;
185 }
186
187 static inline void
dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info * ppdu_info)188 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info)
189 {
190 struct mon_rx_user_status *mon_rx_user_status;
191 uint32_t num_users;
192 uint32_t i;
193 uint32_t mu_ul_user_v0_word0;
194 uint32_t mu_ul_user_v0_word1;
195 uint32_t ru_width;
196 uint32_t ru_size;
197
198 if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA ||
199 ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO))
200 return;
201
202 num_users = ppdu_info->com_info.num_users;
203 if (num_users > HAL_MAX_UL_MU_USERS)
204 num_users = HAL_MAX_UL_MU_USERS;
205 for (i = 0; i < num_users; i++) {
206 mon_rx_user_status = &ppdu_info->rx_user_status[i];
207 mu_ul_user_v0_word0 =
208 mon_rx_user_status->mu_ul_user_v0_word0;
209 mu_ul_user_v0_word1 =
210 mon_rx_user_status->mu_ul_user_v0_word1;
211
212 if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET(
213 mu_ul_user_v0_word0) &&
214 !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET(
215 mu_ul_user_v0_word0)) {
216 mon_rx_user_status->mcs =
217 HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET(
218 mu_ul_user_v0_word1);
219 mon_rx_user_status->nss =
220 HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET(
221 mu_ul_user_v0_word1) + 1;
222
223 mon_rx_user_status->mu_ul_info_valid = 1;
224 mon_rx_user_status->ofdma_ru_start_index =
225 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET(
226 mu_ul_user_v0_word1);
227
228 ru_size =
229 HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET(
230 mu_ul_user_v0_word1);
231 dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width);
232 mon_rx_user_status->ofdma_ru_width = ru_width;
233 mon_rx_user_status->ofdma_ru_size = ru_size;
234 }
235 }
236 }
237 #else
238 static inline void
dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info * ppdu_info)239 dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info)
240 {
241 }
242 #endif
243
244 #ifdef QCA_UNDECODED_METADATA_SUPPORT
245 static inline bool
dp_rx_mon_check_phyrx_abort(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)246 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev,
247 struct hal_rx_ppdu_info *ppdu_info)
248 {
249 return (pdev->monitor_pdev->undecoded_metadata_capture &&
250 ppdu_info->rx_status.phyrx_abort);
251 }
252
253 static inline void
dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)254 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc,
255 struct dp_pdev *pdev,
256 struct hal_rx_ppdu_info *ppdu_info)
257 {
258 if (pdev->monitor_pdev->undecoded_metadata_capture)
259 dp_rx_handle_ppdu_undecoded_metadata(soc, pdev, ppdu_info);
260
261 pdev->monitor_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
262 }
263 #else
264 static inline bool
dp_rx_mon_check_phyrx_abort(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)265 dp_rx_mon_check_phyrx_abort(struct dp_pdev *pdev,
266 struct hal_rx_ppdu_info *ppdu_info)
267 {
268 return false;
269 }
270
271 static inline void
dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc * soc,struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)272 dp_rx_mon_handle_ppdu_undecoded_metadata(struct dp_soc *soc,
273 struct dp_pdev *pdev,
274 struct hal_rx_ppdu_info *ppdu_info)
275 {
276 }
277 #endif
278
279 #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
280 /**
281 * dp_rx_mon_update_scan_spcl_vap_stats() - Update special vap stats
282 * @pdev: dp pdev context
283 * @ppdu_info: ppdu info structure from ppdu ring
284 *
285 * Return: none
286 */
287 static inline void
dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)288 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev,
289 struct hal_rx_ppdu_info *ppdu_info)
290 {
291 struct mon_rx_user_status *rx_user_status = NULL;
292 struct dp_mon_pdev *mon_pdev = NULL;
293 struct dp_mon_vdev *mon_vdev = NULL;
294 uint32_t num_users = 0;
295 uint32_t user = 0;
296
297 mon_pdev = pdev->monitor_pdev;
298 if (!mon_pdev || !mon_pdev->mvdev)
299 return;
300
301 mon_vdev = mon_pdev->mvdev->monitor_vdev;
302 if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
303 return;
304
305 num_users = ppdu_info->com_info.num_users;
306 for (user = 0; user < num_users; user++) {
307 rx_user_status = &ppdu_info->rx_user_status[user];
308 mon_vdev->scan_spcl_vap_stats->rx_ok_pkts +=
309 rx_user_status->mpdu_cnt_fcs_ok;
310 mon_vdev->scan_spcl_vap_stats->rx_ok_bytes +=
311 rx_user_status->mpdu_ok_byte_count;
312 mon_vdev->scan_spcl_vap_stats->rx_err_pkts +=
313 rx_user_status->mpdu_cnt_fcs_err;
314 mon_vdev->scan_spcl_vap_stats->rx_err_bytes +=
315 rx_user_status->mpdu_err_byte_count;
316 }
317 mon_vdev->scan_spcl_vap_stats->rx_mgmt_pkts +=
318 ppdu_info->frm_type_info.rx_mgmt_cnt;
319 mon_vdev->scan_spcl_vap_stats->rx_ctrl_pkts +=
320 ppdu_info->frm_type_info.rx_ctrl_cnt;
321 mon_vdev->scan_spcl_vap_stats->rx_data_pkts +=
322 ppdu_info->frm_type_info.rx_data_cnt;
323 }
324 #else
325 static inline void
dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev * pdev,struct hal_rx_ppdu_info * ppdu_info)326 dp_rx_mon_update_scan_spcl_vap_stats(struct dp_pdev *pdev,
327 struct hal_rx_ppdu_info *ppdu_info)
328 {
329 }
330 #endif
331
332 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
333 /**
334 * dp_rx_mon_status_ring_record_entry() - Record one entry of a particular
335 * event type into the monitor status
336 * buffer tracking history.
337 * @soc: DP soc handle
338 * @event: event type
339 * @ring_desc: Monitor status ring descriptor
340 * @rx_desc: RX descriptor
341 * @nbuf: status buffer.
342 *
343 * Return: None
344 */
345 static void
dp_rx_mon_status_ring_record_entry(struct dp_soc * soc,enum dp_mon_status_process_event event,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)346 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc,
347 enum dp_mon_status_process_event event,
348 hal_ring_desc_t ring_desc,
349 struct dp_rx_desc *rx_desc,
350 qdf_nbuf_t nbuf)
351 {
352 struct dp_mon_stat_info_record *record;
353 struct hal_buf_info hbi;
354 uint32_t idx;
355
356 if (qdf_unlikely(!soc->mon_status_ring_history))
357 return;
358
359 idx = dp_history_get_next_index(&soc->mon_status_ring_history->index,
360 DP_MON_STATUS_HIST_MAX);
361
362 /* No NULL check needed for record since its an array */
363 record = &soc->mon_status_ring_history->entry[idx];
364
365 record->timestamp = qdf_get_log_timestamp();
366 record->event = event;
367 if (event == DP_MON_STATUS_BUF_REAP) {
368 hal_rx_buffer_addr_info_get_paddr(ring_desc, &hbi);
369
370 /* buffer_addr_info is the first element of ring_desc */
371 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
372 &hbi);
373
374 record->hbi.paddr = hbi.paddr;
375 record->hbi.sw_cookie = hbi.sw_cookie;
376 record->hbi.rbm = hbi.rbm;
377 record->rx_desc = rx_desc;
378 if (rx_desc) {
379 record->nbuf = rx_desc->nbuf;
380 record->rx_desc_nbuf_data = qdf_nbuf_data(rx_desc->nbuf);
381 } else {
382 record->nbuf = NULL;
383 record->rx_desc_nbuf_data = NULL;
384 }
385 }
386
387 if (event == DP_MON_STATUS_BUF_ENQUEUE) {
388 record->nbuf = nbuf;
389 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf);
390 }
391
392 if (event == DP_MON_STATUS_BUF_DEQUEUE) {
393 record->nbuf = nbuf;
394 if (nbuf)
395 record->rx_desc_nbuf_data = qdf_nbuf_data(nbuf);
396 else
397 record->rx_desc_nbuf_data = NULL;
398 }
399 }
400 #else
401 static void
dp_rx_mon_status_ring_record_entry(struct dp_soc * soc,enum dp_mon_status_process_event event,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)402 dp_rx_mon_status_ring_record_entry(struct dp_soc *soc,
403 enum dp_mon_status_process_event event,
404 hal_ring_desc_t ring_desc,
405 struct dp_rx_desc *rx_desc,
406 qdf_nbuf_t nbuf)
407 {
408 }
409 #endif
410
411 /**
412 * dp_rx_mon_status_process_tlv() - Process status TLV in status
413 * buffer on Rx status Queue posted by status SRNG processing.
414 * @soc: core txrx main context
415 * @int_ctx: interrupt context
416 * @mac_id: mac_id which is one of 3 mac_ids _ring
417 * @quota: amount of work which can be done
418 *
419 * Return: none
420 */
421 static inline void
dp_rx_mon_status_process_tlv(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)422 dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
423 uint32_t mac_id, uint32_t quota)
424 {
425 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
426 struct hal_rx_ppdu_info *ppdu_info;
427 qdf_nbuf_t status_nbuf;
428 uint8_t *rx_tlv;
429 uint8_t *rx_tlv_start;
430 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
431 struct cdp_pdev_mon_stats *rx_mon_stats;
432 int smart_mesh_status;
433 enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
434 bool nbuf_used;
435 uint32_t rx_enh_capture_mode;
436 struct dp_mon_soc *mon_soc = soc->monitor_soc;
437 struct dp_mon_pdev *mon_pdev;
438
439 if (qdf_unlikely(!pdev)) {
440 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc,
441 mac_id);
442 return;
443 }
444
445 mon_pdev = pdev->monitor_pdev;
446 ppdu_info = &mon_pdev->ppdu_info;
447 rx_mon_stats = &mon_pdev->rx_mon_stats;
448
449 if (qdf_unlikely(mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START))
450 return;
451
452 rx_enh_capture_mode = mon_pdev->rx_enh_capture_mode;
453
454 while (!qdf_nbuf_is_queue_empty(&mon_pdev->rx_status_q)) {
455
456 status_nbuf = qdf_nbuf_queue_remove(&mon_pdev->rx_status_q);
457 dp_rx_mon_status_ring_record_entry(soc,
458 DP_MON_STATUS_BUF_DEQUEUE,
459 NULL, NULL, status_nbuf);
460
461 if (qdf_unlikely(!status_nbuf))
462 return;
463
464 rx_tlv = qdf_nbuf_data(status_nbuf);
465 rx_tlv_start = rx_tlv;
466 nbuf_used = false;
467
468 if ((mon_pdev->mvdev) || (mon_pdev->enhanced_stats_en) ||
469 (mon_pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) ||
470 (mon_pdev->undecoded_metadata_capture) ||
471 (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
472 do {
473 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
474 ppdu_info, pdev->soc->hal_soc,
475 status_nbuf);
476
477 if (qdf_unlikely(IS_LOCAL_PKT_CAPTURE_RUNNING(mon_pdev, is_local_pkt_capture_running)))
478 dp_rx_handle_local_pkt_capture(pdev, ppdu_info, status_nbuf, tlv_status);
479
480 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
481 rx_mon_stats);
482
483 dp_rx_mon_enh_capture_process(pdev, tlv_status,
484 status_nbuf, ppdu_info,
485 &nbuf_used);
486
487 dp_rx_mcopy_process_ppdu_info(pdev,
488 ppdu_info,
489 tlv_status);
490
491 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv,
492 mon_pdev->is_tlv_hdr_64_bit);
493
494 if (qdf_unlikely(((rx_tlv - rx_tlv_start) >=
495 RX_MON_STATUS_BUF_SIZE) ||
496 (RX_MON_STATUS_BUF_SIZE -
497 (rx_tlv - rx_tlv_start) <
498 mon_pdev->tlv_hdr_size)))
499 break;
500
501 } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
502 (tlv_status == HAL_TLV_STATUS_HEADER) ||
503 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
504 (tlv_status == HAL_TLV_STATUS_MPDU_START) ||
505 (tlv_status == HAL_TLV_STATUS_MSDU_END));
506 }
507 dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info);
508 if (qdf_unlikely(mon_pdev->dp_peer_based_pktlog)) {
509 dp_rx_process_peer_based_pktlog(soc, ppdu_info,
510 status_nbuf,
511 pdev->pdev_id);
512 } else {
513 if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL))
514 pktlog_mode = WDI_EVENT_RX_DESC;
515 else if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE))
516 pktlog_mode = WDI_EVENT_LITE_RX;
517
518 if (qdf_unlikely(pktlog_mode != WDI_NO_VAL))
519 dp_wdi_event_handler(pktlog_mode, soc,
520 status_nbuf,
521 HTT_INVALID_PEER,
522 WDI_NO_VAL, pdev->pdev_id);
523 }
524
525 /* smart monitor vap and m_copy cannot co-exist */
526 if (qdf_unlikely(ppdu_info->rx_status.monitor_direct_used &&
527 mon_pdev->neighbour_peers_added &&
528 mon_pdev->mvdev)) {
529 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
530 pdev, ppdu_info, status_nbuf);
531 if (smart_mesh_status)
532 qdf_nbuf_free(status_nbuf);
533 } else if (qdf_unlikely(IS_LOCAL_PKT_CAPTURE_RUNNING(mon_pdev,
534 is_local_pkt_capture_running))) {
535 qdf_nbuf_free(status_nbuf);
536 } else if (qdf_unlikely(mon_pdev->mcopy_mode)) {
537 dp_rx_process_mcopy_mode(soc, pdev,
538 ppdu_info, tlv_status,
539 status_nbuf);
540 } else if (qdf_unlikely(rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
541 if (!nbuf_used)
542 qdf_nbuf_free(status_nbuf);
543
544 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
545 dp_rx_handle_enh_capture(soc,
546 pdev, ppdu_info);
547 } else {
548 qdf_nbuf_free(status_nbuf);
549 }
550
551 if (qdf_unlikely(tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE)) {
552 dp_rx_mon_deliver_non_std(soc, mac_id);
553 dp_mon_rx_ppdu_status_reset(mon_pdev);
554 } else if ((qdf_likely(tlv_status == HAL_TLV_STATUS_PPDU_DONE)) &&
555 (qdf_likely(!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info)))) {
556 rx_mon_stats->status_ppdu_done++;
557 dp_rx_mon_handle_mu_ul_info(ppdu_info);
558
559 if (qdf_unlikely(mon_pdev->tx_capture_enabled
560 != CDP_TX_ENH_CAPTURE_DISABLED))
561 dp_send_ack_frame_to_stack(soc, pdev,
562 ppdu_info);
563
564 if (qdf_likely(mon_pdev->enhanced_stats_en ||
565 mon_pdev->mcopy_mode ||
566 mon_pdev->neighbour_peers_added))
567 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
568 else if (dp_cfr_rcc_mode_status(pdev))
569 dp_rx_handle_cfr(soc, pdev, ppdu_info);
570
571 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
572
573 /* Collect spcl vap stats if configured */
574 if (qdf_unlikely(mon_pdev->scan_spcl_vap_configured))
575 dp_rx_mon_update_scan_spcl_vap_stats(pdev,
576 ppdu_info);
577
578 dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info);
579
580 /*
581 * if chan_num is not fetched correctly from ppdu RX TLV,
582 * get it from pdev saved.
583 */
584 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_num == 0))
585 mon_pdev->ppdu_info.rx_status.chan_num =
586 mon_pdev->mon_chan_num;
587 /*
588 * if chan_freq is not fetched correctly from ppdu RX TLV,
589 * get it from pdev saved.
590 */
591 if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.chan_freq == 0)) {
592 mon_pdev->ppdu_info.rx_status.chan_freq =
593 mon_pdev->mon_chan_freq;
594 }
595
596 if (!mon_soc->full_mon_mode)
597 dp_rx_mon_dest_process(soc, int_ctx, mac_id,
598 quota);
599
600 dp_mon_rx_ppdu_status_reset(mon_pdev);
601 } else {
602 dp_rx_mon_handle_ppdu_undecoded_metadata(soc, pdev,
603 ppdu_info);
604 }
605 }
606 return;
607 }
608
609 /*
610 * dp_rx_mon_status_srng_process() - Process monitor status ring
611 * post the status ring buffer to Rx status Queue for later
612 * processing when status ring is filled with status TLV.
613 * Allocate a new buffer to status ring if the filled buffer
614 * is posted.
615 * @soc: core txrx main context
616 * @int_ctx: interrupt context
617 * @mac_id: mac_id which is one of 3 mac_ids
618 * @quota: No. of ring entry that can be serviced in one shot.
619
620 * Return: uint32_t: No. of ring entry that is processed.
621 */
622 static inline uint32_t
dp_rx_mon_status_srng_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)623 dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
624 uint32_t mac_id, uint32_t quota)
625 {
626 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
627 hal_soc_handle_t hal_soc;
628 void *mon_status_srng;
629 void *rxdma_mon_status_ring_entry;
630 QDF_STATUS status;
631 enum dp_mon_reap_status reap_status;
632 uint32_t work_done = 0;
633 struct dp_mon_pdev *mon_pdev;
634
635 if (qdf_unlikely(!pdev)) {
636 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
637 soc, mac_id);
638 return work_done;
639 }
640
641 mon_pdev = pdev->monitor_pdev;
642
643 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
644
645 qdf_assert(mon_status_srng);
646 if (qdf_unlikely(!mon_status_srng ||
647 !hal_srng_initialized(mon_status_srng))) {
648
649 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
650 "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
651 __func__, __LINE__, mon_status_srng);
652 return work_done;
653 }
654
655 hal_soc = soc->hal_soc;
656
657 qdf_assert(hal_soc);
658
659 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_status_srng)))
660 goto done;
661
662 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
663 * BUFFER_ADDR_INFO STRUCT
664 */
665 while (qdf_likely((rxdma_mon_status_ring_entry =
666 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng))
667 && quota--)) {
668 struct hal_buf_info hbi;
669 qdf_nbuf_t status_nbuf;
670 struct dp_rx_desc *rx_desc;
671 uint8_t *status_buf;
672 qdf_dma_addr_t paddr;
673 uint64_t buf_addr;
674 struct rx_desc_pool *rx_desc_pool;
675
676 rx_desc_pool = &soc->rx_desc_status[mac_id];
677 buf_addr =
678 (HAL_RX_BUFFER_ADDR_31_0_GET(
679 rxdma_mon_status_ring_entry) |
680 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
681 rxdma_mon_status_ring_entry)) << 32));
682
683 if (qdf_likely(buf_addr)) {
684
685 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
686 (uint32_t *)rxdma_mon_status_ring_entry,
687 &hbi);
688 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
689 hbi.sw_cookie);
690 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_REAP,
691 rxdma_mon_status_ring_entry,
692 rx_desc, NULL);
693
694 qdf_assert_always(rx_desc);
695
696 if (qdf_unlikely(!dp_rx_desc_paddr_sanity_check(rx_desc,
697 buf_addr))) {
698 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
699 hal_srng_src_get_next(hal_soc, mon_status_srng);
700 continue;
701 }
702
703 status_nbuf = rx_desc->nbuf;
704
705 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
706 QDF_DMA_FROM_DEVICE);
707
708 status_buf = qdf_nbuf_data(status_nbuf);
709
710 status = hal_get_rx_status_done(status_buf);
711
712 if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
713 uint32_t hp, tp;
714 hal_get_sw_hptp(hal_soc, mon_status_srng,
715 &tp, &hp);
716 dp_info_rl("tlv tag status error hp:%u, tp:%u",
717 hp, tp);
718
719 /* RxDMA status done bit might not be set even
720 * though tp is moved by HW.
721 */
722
723 /* If done status is missing:
724 * 1. As per MAC team's suggestion,
725 * when HP + 1 entry is peeked and if DMA
726 * is not done and if HP + 2 entry's DMA done
727 * is set. skip HP + 1 entry and
728 * start processing in next interrupt.
729 * 2. If HP + 2 entry's DMA done is not set,
730 * poll onto HP + 1 entry DMA done to be set.
731 * Check status for same buffer for next time
732 * dp_rx_mon_status_srng_process
733 */
734 reap_status = dp_rx_mon_handle_status_buf_done(pdev,
735 mon_status_srng);
736 if (qdf_unlikely(reap_status == DP_MON_STATUS_NO_DMA))
737 continue;
738 else if (qdf_unlikely(reap_status == DP_MON_STATUS_REPLENISH)) {
739 if (!rx_desc->unmapped) {
740 qdf_nbuf_unmap_nbytes_single(
741 soc->osdev, status_nbuf,
742 QDF_DMA_FROM_DEVICE,
743 rx_desc_pool->buf_size);
744 rx_desc->unmapped = 1;
745 }
746 qdf_nbuf_free(status_nbuf);
747 goto buf_replenish;
748 }
749 }
750 qdf_nbuf_set_pktlen(status_nbuf,
751 RX_MON_STATUS_BUF_SIZE);
752
753 if (qdf_likely(!rx_desc->unmapped)) {
754 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
755 QDF_DMA_FROM_DEVICE,
756 rx_desc_pool->buf_size);
757 rx_desc->unmapped = 1;
758 }
759
760 /* Put the status_nbuf to queue */
761 qdf_nbuf_queue_add(&mon_pdev->rx_status_q, status_nbuf);
762 dp_rx_mon_status_ring_record_entry(soc, DP_MON_STATUS_BUF_ENQUEUE,
763 rxdma_mon_status_ring_entry,
764 rx_desc, status_nbuf);
765
766 } else {
767 union dp_rx_desc_list_elem_t *desc_list = NULL;
768 union dp_rx_desc_list_elem_t *tail = NULL;
769 uint32_t num_alloc_desc;
770
771 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
772 rx_desc_pool,
773 1,
774 &desc_list,
775 &tail);
776 /*
777 * No free descriptors available
778 */
779 if (qdf_unlikely(num_alloc_desc == 0)) {
780 work_done++;
781 break;
782 }
783
784 rx_desc = &desc_list->rx_desc;
785 }
786
787 buf_replenish:
788 status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
789
790 /*
791 * qdf_nbuf alloc or map failed,
792 * free the dp rx desc to free list,
793 * fill in NULL dma address at current HP entry,
794 * keep HP in mon_status_ring unchanged,
795 * wait next time dp_rx_mon_status_srng_process
796 * to fill in buffer at current HP.
797 */
798 if (qdf_unlikely(!status_nbuf)) {
799 union dp_rx_desc_list_elem_t *desc_list = NULL;
800 union dp_rx_desc_list_elem_t *tail = NULL;
801 struct rx_desc_pool *rx_desc_pool;
802
803 rx_desc_pool = &soc->rx_desc_status[mac_id];
804
805 dp_info_rl("fail to allocate or map qdf_nbuf");
806 dp_rx_add_to_free_desc_list(&desc_list,
807 &tail, rx_desc);
808 dp_rx_add_desc_list_to_free_list(soc, &desc_list,
809 &tail, mac_id, rx_desc_pool);
810
811 hal_rxdma_buff_addr_info_set(
812 hal_soc, rxdma_mon_status_ring_entry,
813 0, 0,
814 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
815 work_done++;
816 break;
817 }
818
819 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
820
821 rx_desc->nbuf = status_nbuf;
822 rx_desc->in_use = 1;
823 rx_desc->unmapped = 0;
824
825 hal_rxdma_buff_addr_info_set(hal_soc,
826 rxdma_mon_status_ring_entry,
827 paddr, rx_desc->cookie,
828 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
829
830 hal_srng_src_get_next(hal_soc, mon_status_srng);
831 work_done++;
832 }
833 done:
834
835 dp_srng_access_end(int_ctx, soc, mon_status_srng);
836
837 return work_done;
838
839 }
840
841 uint32_t
dp_rx_mon_status_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)842 dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx,
843 uint32_t mac_id, uint32_t quota)
844 {
845 uint32_t work_done;
846
847 work_done = dp_rx_mon_status_srng_process(soc, int_ctx, mac_id, quota);
848 quota -= work_done;
849 dp_rx_mon_status_process_tlv(soc, int_ctx, mac_id, quota);
850
851 return work_done;
852 }
853
854 QDF_STATUS
dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev * pdev,uint32_t mac_id)855 dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id)
856 {
857 uint8_t pdev_id = pdev->pdev_id;
858 struct dp_soc *soc = pdev->soc;
859 struct dp_srng *mon_status_ring;
860 uint32_t num_entries;
861 struct rx_desc_pool *rx_desc_pool;
862 union dp_rx_desc_list_elem_t *desc_list = NULL;
863 union dp_rx_desc_list_elem_t *tail = NULL;
864
865 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
866
867 num_entries = mon_status_ring->num_entries;
868
869 rx_desc_pool = &soc->rx_desc_status[mac_id];
870
871 dp_debug("Mon RX Desc Pool[%d] entries=%u",
872 pdev_id, num_entries);
873
874 return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring,
875 rx_desc_pool, num_entries,
876 &desc_list, &tail,
877 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
878 }
879
880 QDF_STATUS
dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev * pdev,uint32_t mac_id)881 dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
882 {
883 uint8_t pdev_id = pdev->pdev_id;
884 struct dp_soc *soc = pdev->soc;
885 struct dp_srng *mon_status_ring;
886 uint32_t num_entries;
887 struct rx_desc_pool *rx_desc_pool;
888
889 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
890
891 num_entries = mon_status_ring->num_entries;
892
893 rx_desc_pool = &soc->rx_desc_status[mac_id];
894
895 dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
896
897 rx_desc_pool->desc_type = QDF_DP_RX_DESC_STATUS_TYPE;
898 return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool);
899 }
900
901 void
dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev * pdev,uint32_t mac_id)902 dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
903 {
904 uint32_t i;
905 uint8_t pdev_id = pdev->pdev_id;
906 struct dp_soc *soc = pdev->soc;
907 struct dp_srng *mon_status_ring;
908 uint32_t num_entries;
909 struct rx_desc_pool *rx_desc_pool;
910 struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
911
912 mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
913
914 num_entries = mon_status_ring->num_entries;
915
916 rx_desc_pool = &soc->rx_desc_status[mac_id];
917
918 dp_debug("Mon RX Desc status Pool[%d] init entries=%u",
919 pdev_id, num_entries);
920
921 rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id);
922 rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE;
923 rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
924 /* Disable frag processing flag */
925 dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
926
927 dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool);
928
929 qdf_nbuf_queue_init(&mon_pdev->rx_status_q);
930
931 mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
932
933 qdf_mem_zero(&mon_pdev->ppdu_info, sizeof(mon_pdev->ppdu_info));
934
935 /*
936 * Set last_ppdu_id to HAL_INVALID_PPDU_ID in order to avoid ppdu_id
937 * match with '0' ppdu_id from monitor status ring
938 */
939 mon_pdev->ppdu_info.com_info.last_ppdu_id = HAL_INVALID_PPDU_ID;
940
941 qdf_mem_zero(&mon_pdev->rx_mon_stats, sizeof(mon_pdev->rx_mon_stats));
942
943 dp_rx_mon_init_dbg_ppdu_stats(&mon_pdev->ppdu_info,
944 &mon_pdev->rx_mon_stats);
945
946 for (i = 0; i < MAX_MU_USERS; i++) {
947 qdf_nbuf_queue_init(&mon_pdev->mpdu_q[i]);
948 mon_pdev->is_mpdu_hdr[i] = true;
949 }
950
951 qdf_mem_zero(mon_pdev->msdu_list,
952 sizeof(mon_pdev->msdu_list[MAX_MU_USERS]));
953
954 mon_pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
955 }
956
957 void
dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev * pdev,uint32_t mac_id)958 dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) {
959 uint8_t pdev_id = pdev->pdev_id;
960 struct dp_soc *soc = pdev->soc;
961 struct rx_desc_pool *rx_desc_pool;
962
963 rx_desc_pool = &soc->rx_desc_status[mac_id];
964
965 dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id);
966
967 dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
968 }
969
970 void
dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev * pdev,uint32_t mac_id)971 dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) {
972 uint8_t pdev_id = pdev->pdev_id;
973 struct dp_soc *soc = pdev->soc;
974 struct rx_desc_pool *rx_desc_pool;
975
976 rx_desc_pool = &soc->rx_desc_status[mac_id];
977
978 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
979
980 dp_rx_desc_pool_free(soc, rx_desc_pool);
981 }
982
983 void
dp_rx_pdev_mon_status_buffers_free(struct dp_pdev * pdev,uint32_t mac_id)984 dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
985 {
986 uint8_t pdev_id = pdev->pdev_id;
987 struct dp_soc *soc = pdev->soc;
988 struct rx_desc_pool *rx_desc_pool;
989
990 rx_desc_pool = &soc->rx_desc_status[mac_id];
991
992 dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
993
994 dp_rx_desc_nbuf_free(soc, rx_desc_pool, true);
995 }
996
997 /*
998 * dp_rx_buffers_replenish() - replenish monitor status ring with
999 * rx nbufs called during dp rx
1000 * monitor status ring initialization
1001 *
1002 * @soc: core txrx main context
1003 * @mac_id: mac_id which is one of 3 mac_ids
1004 * @dp_rxdma_srng: dp monitor status circular ring
1005 * @rx_desc_pool; Pointer to Rx descriptor pool
1006 * @num_req_buffers: number of buffer to be replenished
1007 * @desc_list: list of descs if called from dp rx monitor status
1008 * process or NULL during dp rx initialization or
1009 * out of buffer interrupt
1010 * @tail: tail of descs list
1011 * @owner: who owns the nbuf (host, NSS etc...)
1012 * Return: return success or failure
1013 */
1014 static inline
dp_rx_mon_status_buffers_replenish(struct dp_soc * dp_soc,uint32_t mac_id,struct dp_srng * dp_rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail,uint8_t owner)1015 QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
1016 uint32_t mac_id,
1017 struct dp_srng *dp_rxdma_srng,
1018 struct rx_desc_pool *rx_desc_pool,
1019 uint32_t num_req_buffers,
1020 union dp_rx_desc_list_elem_t **desc_list,
1021 union dp_rx_desc_list_elem_t **tail,
1022 uint8_t owner)
1023 {
1024 uint32_t num_alloc_desc;
1025 uint16_t num_desc_to_free = 0;
1026 uint32_t num_entries_avail;
1027 uint32_t count = 0;
1028 int sync_hw_ptr = 1;
1029 qdf_dma_addr_t paddr;
1030 qdf_nbuf_t rx_netbuf;
1031 void *rxdma_ring_entry;
1032 union dp_rx_desc_list_elem_t *next;
1033 void *rxdma_srng;
1034 struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
1035 uint32_t hp, tp;
1036
1037 if (!dp_pdev) {
1038 dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
1039 dp_soc, mac_id);
1040 return QDF_STATUS_E_FAILURE;
1041 }
1042
1043 rxdma_srng = dp_rxdma_srng->hal_srng;
1044
1045 qdf_assert(rxdma_srng);
1046
1047 dp_rx_mon_status_debug("%pK: requested %d buffers for replenish",
1048 dp_soc, num_req_buffers);
1049
1050 /*
1051 * if desc_list is NULL, allocate the descs from freelist
1052 */
1053 if (!(*desc_list)) {
1054
1055 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
1056 rx_desc_pool,
1057 num_req_buffers,
1058 desc_list,
1059 tail);
1060
1061 if (!num_alloc_desc) {
1062 dp_rx_mon_status_err("%pK: no free rx_descs in freelist",
1063 dp_soc);
1064 return QDF_STATUS_E_NOMEM;
1065 }
1066
1067 dp_rx_mon_status_debug("%pK: %d rx desc allocated", dp_soc,
1068 num_alloc_desc);
1069
1070 num_req_buffers = num_alloc_desc;
1071 }
1072
1073 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
1074 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
1075 rxdma_srng, sync_hw_ptr);
1076
1077 dp_rx_mon_status_debug("%pK: no of available entries in rxdma ring: %d",
1078 dp_soc, num_entries_avail);
1079
1080 if (num_entries_avail < num_req_buffers) {
1081 num_desc_to_free = num_req_buffers - num_entries_avail;
1082 num_req_buffers = num_entries_avail;
1083 }
1084
1085 while (count <= num_req_buffers) {
1086 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
1087
1088 /*
1089 * qdf_nbuf alloc or map failed,
1090 * keep HP in mon_status_ring unchanged,
1091 * wait dp_rx_mon_status_srng_process
1092 * to fill in buffer at current HP.
1093 */
1094 if (qdf_unlikely(!rx_netbuf)) {
1095 hal_get_sw_hptp(dp_soc->hal_soc, rxdma_srng, &tp, &hp);
1096 dp_err("%pK: qdf_nbuf allocate or map fail, count %d hp:%u tp:%u",
1097 dp_soc, count, hp, tp);
1098 /*
1099 * If buffer allocation fails on current HP, then
1100 * decrement HP so it will be set to previous index
1101 * where proper buffer is attached.
1102 */
1103 hal_srng_src_dec_hp(dp_soc->hal_soc,
1104 rxdma_srng);
1105
1106 hal_get_sw_hptp(dp_soc->hal_soc, rxdma_srng, &tp, &hp);
1107 dp_err("HP adjusted to proper buffer index, hp:%u tp:%u", hp, tp);
1108 break;
1109 }
1110
1111 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
1112
1113 next = (*desc_list)->next;
1114 rxdma_ring_entry = hal_srng_src_get_cur_hp_n_move_next(
1115 dp_soc->hal_soc,
1116 rxdma_srng);
1117
1118 if (qdf_unlikely(!rxdma_ring_entry)) {
1119 dp_rx_mon_status_err("%pK: rxdma_ring_entry is NULL, count - %d",
1120 dp_soc, count);
1121 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf,
1122 QDF_DMA_FROM_DEVICE,
1123 rx_desc_pool->buf_size);
1124 qdf_nbuf_free(rx_netbuf);
1125 break;
1126 }
1127
1128 (*desc_list)->rx_desc.nbuf = rx_netbuf;
1129 (*desc_list)->rx_desc.in_use = 1;
1130 (*desc_list)->rx_desc.unmapped = 0;
1131 count++;
1132
1133 hal_rxdma_buff_addr_info_set(dp_soc->hal_soc,
1134 rxdma_ring_entry, paddr,
1135 (*desc_list)->rx_desc.cookie,
1136 owner);
1137
1138 dp_rx_mon_status_debug("%pK: rx_desc=%pK, cookie=%d, nbuf=%pK, paddr=%pK",
1139 dp_soc, &(*desc_list)->rx_desc,
1140 (*desc_list)->rx_desc.cookie, rx_netbuf,
1141 (void *)paddr);
1142
1143 *desc_list = next;
1144 }
1145
1146 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1147
1148 dp_rx_mon_status_debug("%pK: successfully replenished %d buffers",
1149 dp_soc, num_req_buffers);
1150
1151 dp_rx_mon_status_debug("%pK: %d rx desc added back to free list",
1152 dp_soc, num_desc_to_free);
1153
1154 /*
1155 * add any available free desc back to the free list
1156 */
1157 if (*desc_list) {
1158 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
1159 mac_id, rx_desc_pool);
1160 }
1161
1162 return QDF_STATUS_SUCCESS;
1163 }
1164
1165 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1166 /**
1167 * dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for
1168 * a given mac
1169 * @pdev: DP pdev
1170 * @mac_id: mac id
1171 * @quota: maximum number of ring entries that can be processed
1172 *
1173 * Return: Number of ring entries reaped
1174 */
1175 static uint32_t
dp_mon_status_srng_drop_for_mac(struct dp_pdev * pdev,uint32_t mac_id,uint32_t quota)1176 dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
1177 uint32_t quota)
1178 {
1179 struct dp_soc *soc = pdev->soc;
1180 void *mon_status_srng;
1181 hal_soc_handle_t hal_soc;
1182 void *ring_desc;
1183 uint32_t reap_cnt = 0;
1184
1185 if (qdf_unlikely(!soc || !soc->hal_soc))
1186 return reap_cnt;
1187
1188 mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
1189
1190 if (qdf_unlikely(!mon_status_srng ||
1191 !hal_srng_initialized(mon_status_srng)))
1192 return reap_cnt;
1193
1194 hal_soc = soc->hal_soc;
1195
1196 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
1197 return reap_cnt;
1198
1199 while ((ring_desc =
1200 hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) &&
1201 reap_cnt < MON_DROP_REAP_LIMIT && quota--) {
1202 uint64_t buf_addr;
1203 struct hal_buf_info hbi;
1204 struct dp_rx_desc *rx_desc;
1205 qdf_nbuf_t status_nbuf;
1206 uint8_t *status_buf;
1207 enum dp_mon_reap_status reap_status;
1208 qdf_dma_addr_t iova;
1209 struct rx_desc_pool *rx_desc_pool;
1210
1211 rx_desc_pool = &soc->rx_desc_status[mac_id];
1212
1213 buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) |
1214 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32));
1215
1216 if (qdf_likely(buf_addr)) {
1217 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1218 (uint32_t *)ring_desc,
1219 &hbi);
1220 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
1221 hbi.sw_cookie);
1222
1223 qdf_assert_always(rx_desc);
1224
1225 status_nbuf = rx_desc->nbuf;
1226
1227 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
1228 QDF_DMA_FROM_DEVICE);
1229
1230 status_buf = qdf_nbuf_data(status_nbuf);
1231
1232 if (hal_get_rx_status_done(status_buf) !=
1233 QDF_STATUS_SUCCESS) {
1234 /* If done status is missing:
1235 * 1. As per MAC team's suggestion,
1236 * when HP + 1 entry is peeked and if DMA
1237 * is not done and if HP + 2 entry's DMA done
1238 * is set. skip HP + 1 entry and
1239 * start processing in next interrupt.
1240 * 2. If HP + 2 entry's DMA done is not set,
1241 * poll onto HP + 1 entry DMA done to be set.
1242 * Check status for same buffer for next time
1243 * dp_rx_mon_status_srng_process
1244 */
1245 reap_status =
1246 dp_rx_mon_handle_status_buf_done(pdev,
1247 mon_status_srng);
1248 if (reap_status == DP_MON_STATUS_NO_DMA)
1249 break;
1250 }
1251 qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
1252 QDF_DMA_FROM_DEVICE,
1253 rx_desc_pool->buf_size);
1254 qdf_nbuf_free(status_nbuf);
1255 } else {
1256 union dp_rx_desc_list_elem_t *rx_desc_elem;
1257
1258 qdf_spin_lock_bh(&rx_desc_pool->lock);
1259
1260 if (!rx_desc_pool->freelist) {
1261 qdf_spin_unlock_bh(&rx_desc_pool->lock);
1262 break;
1263 }
1264 rx_desc_elem = rx_desc_pool->freelist;
1265 rx_desc_pool->freelist = rx_desc_pool->freelist->next;
1266 qdf_spin_unlock_bh(&rx_desc_pool->lock);
1267
1268 rx_desc = &rx_desc_elem->rx_desc;
1269 }
1270
1271 status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
1272
1273 if (qdf_unlikely(!status_nbuf)) {
1274 union dp_rx_desc_list_elem_t *desc_list = NULL;
1275 union dp_rx_desc_list_elem_t *tail = NULL;
1276
1277 dp_info_rl("fail to allocate or map nbuf");
1278 dp_rx_add_to_free_desc_list(&desc_list, &tail,
1279 rx_desc);
1280 dp_rx_add_desc_list_to_free_list(soc,
1281 &desc_list,
1282 &tail, mac_id,
1283 rx_desc_pool);
1284
1285 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, 0, 0,
1286 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
1287 break;
1288 }
1289
1290 iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
1291
1292 rx_desc->nbuf = status_nbuf;
1293 rx_desc->in_use = 1;
1294
1295 hal_rxdma_buff_addr_info_set(hal_soc, ring_desc, iova,
1296 rx_desc->cookie,
1297 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id));
1298
1299 reap_cnt++;
1300 hal_srng_src_get_next(hal_soc, mon_status_srng);
1301 }
1302
1303 hal_srng_access_end(hal_soc, mon_status_srng);
1304
1305 return reap_cnt;
1306 }
1307
dp_mon_drop_packets_for_mac(struct dp_pdev * pdev,uint32_t mac_id,uint32_t quota,bool force_flush)1308 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
1309 uint32_t quota, bool force_flush)
1310 {
1311 uint32_t work_done;
1312
1313 work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota);
1314 dp_mon_dest_srng_drop_for_mac(pdev, mac_id, force_flush);
1315
1316 return work_done;
1317 }
1318 #else
dp_mon_drop_packets_for_mac(struct dp_pdev * pdev,uint32_t mac_id,uint32_t quota,bool force_flush)1319 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
1320 uint32_t quota, bool force_flush)
1321 {
1322 return 0;
1323 }
1324 #endif
1325