xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_tx_mon_2.0.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "qdf_types.h"
19 #include "hal_be_hw_headers.h"
20 #include "dp_types.h"
21 #include "hal_be_tx.h"
22 #include "hal_api.h"
23 #include "qdf_trace.h"
24 #include "hal_be_api_mon.h"
25 #include "dp_internal.h"
26 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
27 #include "dp_mon.h"
28 #include <dp_mon_2.0.h>
29 #include <dp_tx_mon_2.0.h>
30 #include <dp_be.h>
31 #include <hal_be_api_mon.h>
32 #include <dp_mon_filter_2.0.h>
33 #include "dp_ratetable.h"
34 #ifdef QCA_SUPPORT_LITE_MONITOR
35 #include "dp_lite_mon.h"
36 #endif
37 
38 #define MAX_TX_MONITOR_STUCK 50
39 
40 #ifdef TXMON_DEBUG
41 /*
42  * dp_tx_mon_debug_statu() - API to display tx monitor status
43  * @tx_mon_be - pointer to dp_pdev_tx_monitor_be
44  * @work_done - tx monitor work done
45  *
46  * Return: void
47  */
48 static inline void
dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be * tx_mon_be,uint32_t work_done)49 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be,
50 		       uint32_t work_done)
51 {
52 	if (tx_mon_be->mode && !work_done)
53 		tx_mon_be->stats.tx_mon_stuck++;
54 	else if (tx_mon_be->mode && work_done)
55 		tx_mon_be->stats.tx_mon_stuck = 0;
56 
57 	if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) {
58 		dp_mon_warn("Tx monitor block got stuck!!!!!");
59 		tx_mon_be->stats.tx_mon_stuck = 0;
60 		tx_mon_be->stats.total_tx_mon_stuck++;
61 	}
62 
63 	dp_mon_debug_rl("tx_ppdu_info[%u :D %u] STATUS[R %llu: F %llu] PKT_BUF[R %llu: F %llu : P %llu : S %llu]",
64 			tx_mon_be->tx_ppdu_info_list_depth,
65 			tx_mon_be->defer_ppdu_info_list_depth,
66 			tx_mon_be->stats.status_buf_recv,
67 			tx_mon_be->stats.status_buf_free,
68 			tx_mon_be->stats.pkt_buf_recv,
69 			tx_mon_be->stats.pkt_buf_free,
70 			tx_mon_be->stats.pkt_buf_processed,
71 			tx_mon_be->stats.pkt_buf_to_stack);
72 }
73 
74 #else
75 /*
76  * dp_tx_mon_debug_statu() - API to display tx monitor status
77  * @tx_mon_be - pointer to dp_pdev_tx_monitor_be
78  * @work_done - tx monitor work done
79  *
80  * Return: void
81  */
82 static inline void
dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be * tx_mon_be,uint32_t work_done)83 dp_tx_mon_debug_status(struct dp_pdev_tx_monitor_be *tx_mon_be,
84 		       uint32_t work_done)
85 {
86 	if (tx_mon_be->mode && !work_done)
87 		tx_mon_be->stats.tx_mon_stuck++;
88 	else if (tx_mon_be->mode && work_done)
89 		tx_mon_be->stats.tx_mon_stuck = 0;
90 
91 	if (tx_mon_be->stats.tx_mon_stuck > MAX_TX_MONITOR_STUCK) {
92 		dp_mon_warn("Tx monitor block got stuck!!!!!");
93 		tx_mon_be->stats.tx_mon_stuck = 0;
94 		tx_mon_be->stats.total_tx_mon_stuck++;
95 	}
96 }
97 #endif
98 
99 static inline uint32_t
dp_tx_mon_srng_process_2_0(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)100 dp_tx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
101 			   uint32_t mac_id, uint32_t quota)
102 {
103 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
104 	void *tx_mon_dst_ring_desc;
105 	hal_soc_handle_t hal_soc;
106 	void *mon_dst_srng;
107 	struct dp_mon_pdev *mon_pdev;
108 	struct dp_mon_pdev_be *mon_pdev_be;
109 	uint32_t work_done = 0;
110 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
111 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
112 	struct dp_pdev_tx_monitor_be *tx_mon_be = NULL;
113 	struct dp_mon_desc_pool *tx_mon_desc_pool = &mon_soc_be->tx_desc_mon;
114 	struct dp_tx_mon_desc_list mon_desc_list;
115 	uint32_t replenish_cnt = 0;
116 
117 	if (!pdev) {
118 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
119 		return work_done;
120 	}
121 
122 	mon_pdev = pdev->monitor_pdev;
123 	mon_dst_srng = mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng;
124 
125 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
126 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
127 			   soc, mon_dst_srng);
128 		return work_done;
129 	}
130 
131 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
132 	if (qdf_unlikely(!mon_pdev_be))
133 		return work_done;
134 
135 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
136 	hal_soc = soc->hal_soc;
137 
138 	qdf_assert((hal_soc && pdev));
139 
140 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
141 	mon_desc_list.desc_list = NULL;
142 	mon_desc_list.tail = NULL;
143 	mon_desc_list.tx_mon_reap_cnt = 0;
144 
145 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
146 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
147 			   __func__, __LINE__, mon_dst_srng);
148 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
149 		return work_done;
150 	}
151 
152 	while (qdf_likely((tx_mon_dst_ring_desc =
153 		(void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
154 				&& quota--)) {
155 		struct hal_mon_desc hal_mon_tx_desc = {0};
156 		struct dp_mon_desc *mon_desc = NULL;
157 		qdf_frag_t status_frag = NULL;
158 		uint32_t end_offset = 0;
159 
160 		hal_be_get_mon_dest_status(soc->hal_soc,
161 					   tx_mon_dst_ring_desc,
162 					   &hal_mon_tx_desc);
163 
164 		if (hal_mon_tx_desc.empty_descriptor) {
165 			/* update stats counter */
166 			dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d  DROP[PPDU:%d MPDU:%d TLV:%d] E_O_PPDU:%d",
167 				    hal_mon_tx_desc.ppdu_id,
168 				    hal_mon_tx_desc.initiator,
169 				    hal_mon_tx_desc.empty_descriptor,
170 				    hal_mon_tx_desc.ring_id,
171 				    hal_mon_tx_desc.looping_count,
172 				    hal_mon_tx_desc.ppdu_drop_count,
173 				    hal_mon_tx_desc.mpdu_drop_count,
174 				    hal_mon_tx_desc.tlv_drop_count,
175 				    hal_mon_tx_desc.end_of_ppdu_dropped);
176 
177 			tx_mon_be->stats.ppdu_drop_cnt +=
178 				hal_mon_tx_desc.ppdu_drop_count;
179 			tx_mon_be->stats.mpdu_drop_cnt +=
180 				hal_mon_tx_desc.mpdu_drop_count;
181 			tx_mon_be->stats.tlv_drop_cnt +=
182 				hal_mon_tx_desc.tlv_drop_count;
183 			work_done++;
184 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
185 			continue;
186 		}
187 
188 		dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d",
189 			    hal_mon_tx_desc.ppdu_id,
190 			    hal_mon_tx_desc.initiator,
191 			    hal_mon_tx_desc.empty_descriptor,
192 			    hal_mon_tx_desc.ring_id,
193 			    hal_mon_tx_desc.looping_count,
194 			    hal_mon_tx_desc.buf_addr,
195 			    hal_mon_tx_desc.end_offset,
196 			    hal_mon_tx_desc.end_reason);
197 
198 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_tx_desc.buf_addr);
199 		qdf_assert_always(mon_desc);
200 
201 		if (!mon_desc->unmapped) {
202 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
203 					   DP_MON_DATA_BUFFER_SIZE,
204 					   QDF_DMA_FROM_DEVICE);
205 			mon_desc->unmapped = 1;
206 		}
207 
208 		if (mon_desc->magic != DP_MON_DESC_MAGIC) {
209 			dp_mon_err("Invalid monitor descriptor");
210 			qdf_assert_always(0);
211 		}
212 
213 		end_offset = hal_mon_tx_desc.end_offset;
214 
215 		status_frag = (qdf_frag_t)(mon_desc->buf_addr);
216 		mon_desc->buf_addr = NULL;
217 		/* increment reap count */
218 		++mon_desc_list.tx_mon_reap_cnt;
219 
220 		/* add the mon_desc to free list */
221 		dp_mon_add_to_free_desc_list(&mon_desc_list.desc_list,
222 					     &mon_desc_list.tail, mon_desc);
223 
224 
225 		if (qdf_unlikely(!status_frag)) {
226 			dp_mon_debug("P_ID:%d INIT:%d E_DESC:%d R_ID:%d L_CNT:%d BUF_ADDR: 0x%llx E_OFF: %d E_REA: %d",
227 				     hal_mon_tx_desc.ppdu_id,
228 				     hal_mon_tx_desc.initiator,
229 				     hal_mon_tx_desc.empty_descriptor,
230 				     hal_mon_tx_desc.ring_id,
231 				     hal_mon_tx_desc.looping_count,
232 				     hal_mon_tx_desc.buf_addr,
233 				     hal_mon_tx_desc.end_offset,
234 				     hal_mon_tx_desc.end_reason);
235 
236 			work_done++;
237 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
238 			continue;
239 		}
240 
241 		tx_mon_be->stats.status_buf_recv++;
242 
243 		if ((hal_mon_tx_desc.end_reason == HAL_MON_FLUSH_DETECTED) ||
244 		    (hal_mon_tx_desc.end_reason == HAL_MON_PPDU_TRUNCATED)) {
245 			tx_mon_be->be_ppdu_id = hal_mon_tx_desc.ppdu_id;
246 
247 			dp_tx_mon_update_end_reason(mon_pdev,
248 						    hal_mon_tx_desc.ppdu_id,
249 						    hal_mon_tx_desc.end_reason);
250 			/* check and free packet buffer from status buffer */
251 			dp_tx_mon_status_free_packet_buf(pdev, status_frag,
252 							 end_offset,
253 							 &mon_desc_list);
254 
255 			tx_mon_be->stats.status_buf_free++;
256 			qdf_frag_free(status_frag);
257 
258 			work_done++;
259 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
260 			continue;
261 		}
262 
263 		dp_tx_process_pktlog_be(soc, pdev,
264 					status_frag,
265 					end_offset);
266 
267 		dp_tx_mon_process_status_tlv(soc, pdev,
268 					     &hal_mon_tx_desc,
269 					     status_frag,
270 					     end_offset,
271 					     &mon_desc_list);
272 
273 		work_done++;
274 		hal_srng_dst_get_next(hal_soc, mon_dst_srng);
275 	}
276 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
277 
278 	if (mon_desc_list.tx_mon_reap_cnt) {
279 		dp_mon_buffers_replenish(soc, &mon_soc_be->tx_mon_buf_ring,
280 					 tx_mon_desc_pool,
281 					 mon_desc_list.tx_mon_reap_cnt,
282 					 &mon_desc_list.desc_list,
283 					 &mon_desc_list.tail,
284 					 &replenish_cnt);
285 	}
286 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
287 	dp_mon_debug("mac_id: %d, work_done:%d tx_monitor_reap_cnt:%d",
288 		     mac_id, work_done, mon_desc_list.tx_mon_reap_cnt);
289 
290 	tx_mon_be->stats.total_tx_mon_reap_cnt += mon_desc_list.tx_mon_reap_cnt;
291 	tx_mon_be->stats.totat_tx_mon_replenish_cnt += replenish_cnt;
292 	dp_tx_mon_debug_status(tx_mon_be, work_done);
293 
294 	return work_done;
295 }
296 
297 uint32_t
dp_tx_mon_process_2_0(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)298 dp_tx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
299 		      uint32_t mac_id, uint32_t quota)
300 {
301 	uint32_t work_done;
302 
303 	work_done = dp_tx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
304 
305 	return work_done;
306 }
307 
308 void
dp_tx_mon_print_ring_stat_2_0(struct dp_pdev * pdev)309 dp_tx_mon_print_ring_stat_2_0(struct dp_pdev *pdev)
310 {
311 	struct dp_soc *soc = pdev->soc;
312 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
313 	struct dp_mon_soc_be *mon_soc_be =
314 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
315 	int lmac_id;
316 
317 	lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, pdev->pdev_id);
318 	dp_print_ring_stat_from_hal(soc, &mon_soc_be->tx_mon_buf_ring,
319 				    TX_MONITOR_BUF);
320 	dp_print_ring_stat_from_hal(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
321 				    TX_MONITOR_DST);
322 }
323 
324 void
dp_tx_mon_buf_desc_pool_deinit(struct dp_soc * soc)325 dp_tx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
326 {
327 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
328 	struct dp_mon_soc_be *mon_soc_be =
329 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
330 
331 	dp_mon_desc_pool_deinit(&mon_soc_be->tx_desc_mon);
332 }
333 
334 QDF_STATUS
dp_tx_mon_buf_desc_pool_init(struct dp_soc * soc)335 dp_tx_mon_buf_desc_pool_init(struct dp_soc *soc)
336 {
337 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
338 	struct dp_mon_soc_be *mon_soc_be =
339 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
340 	uint32_t num_entries;
341 
342 	num_entries =
343 		wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc->wlan_cfg_ctx);
344 
345 	return dp_mon_desc_pool_init(&mon_soc_be->tx_desc_mon, num_entries);
346 }
347 
dp_tx_mon_buf_desc_pool_free(struct dp_soc * soc)348 void dp_tx_mon_buf_desc_pool_free(struct dp_soc *soc)
349 {
350 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
351 	struct dp_mon_soc_be *mon_soc_be =
352 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
353 
354 	if (mon_soc_be)
355 		dp_mon_desc_pool_free(soc, &mon_soc_be->tx_desc_mon,
356 				      DP_MON_TX_DESC_POOL_TYPE);
357 }
358 
dp_tx_mon_soc_init_2_0(struct dp_soc * soc)359 QDF_STATUS dp_tx_mon_soc_init_2_0(struct dp_soc *soc)
360 {
361 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
362 	struct dp_mon_soc_be *mon_soc_be =
363 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
364 
365 	if (dp_srng_init(soc, &mon_soc_be->tx_mon_buf_ring,
366 			 TX_MONITOR_BUF, 0, 0)) {
367 		dp_mon_err("%pK: " RNG_ERR "tx_mon_buf_ring", soc);
368 		goto fail;
369 	}
370 
371 	if (dp_tx_mon_buf_desc_pool_init(soc)) {
372 		dp_mon_err("%pK: " RNG_ERR "tx mon desc pool init", soc);
373 		goto fail;
374 	}
375 
376 	return QDF_STATUS_SUCCESS;
377 fail:
378 	return QDF_STATUS_E_FAILURE;
379 }
380 
dp_tx_mon_soc_deinit_2_0(struct dp_soc * soc,uint32_t lmac_id)381 void dp_tx_mon_soc_deinit_2_0(struct dp_soc *soc, uint32_t lmac_id)
382 {
383 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
384 	struct dp_mon_soc_be *mon_soc_be =
385 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
386 
387 	dp_tx_mon_buffers_free(soc);
388 	dp_tx_mon_buf_desc_pool_deinit(soc);
389 	dp_srng_deinit(soc, &mon_soc_be->tx_mon_buf_ring, TX_MONITOR_BUF, 0);
390 }
391 
392 QDF_STATUS
dp_tx_mon_buf_desc_pool_alloc(struct dp_soc * soc)393 dp_tx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
394 {
395 	struct dp_mon_desc_pool *tx_mon_desc_pool;
396 	int entries;
397 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
398 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
399 	struct dp_mon_soc_be *mon_soc_be =
400 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
401 
402 	soc_cfg_ctx = soc->wlan_cfg_ctx;
403 
404 	entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
405 
406 
407 	tx_mon_desc_pool = &mon_soc_be->tx_desc_mon;
408 
409 	qdf_print("%s:%d tx mon buf desc pool entries: %d", __func__, __LINE__, entries);
410 	return dp_mon_desc_pool_alloc(soc, DP_MON_TX_DESC_POOL_TYPE,
411 				      entries, tx_mon_desc_pool);
412 }
413 
414 void
dp_tx_mon_buffers_free(struct dp_soc * soc)415 dp_tx_mon_buffers_free(struct dp_soc *soc)
416 {
417 	struct dp_mon_desc_pool *tx_mon_desc_pool;
418 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
419 	struct dp_mon_soc_be *mon_soc_be =
420 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
421 
422 	tx_mon_desc_pool = &mon_soc_be->tx_desc_mon;
423 
424 	dp_mon_pool_frag_unmap_and_free(soc, tx_mon_desc_pool);
425 }
426 
427 QDF_STATUS
dp_tx_mon_buffers_alloc(struct dp_soc * soc,uint32_t size)428 dp_tx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
429 {
430 	struct dp_srng *mon_buf_ring;
431 	struct dp_mon_desc_pool *tx_mon_desc_pool;
432 	union dp_mon_desc_list_elem_t *desc_list = NULL;
433 	union dp_mon_desc_list_elem_t *tail = NULL;
434 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
435 	struct dp_mon_soc_be *mon_soc_be =
436 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
437 
438 	mon_buf_ring = &mon_soc_be->tx_mon_buf_ring;
439 
440 	tx_mon_desc_pool = &mon_soc_be->tx_desc_mon;
441 
442 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
443 					tx_mon_desc_pool,
444 					size,
445 					&desc_list, &tail, NULL);
446 }
447 
448 #ifdef WLAN_TX_PKT_CAPTURE_ENH_BE
449 
450 /*
451  * dp_tx_mon_nbuf_get_num_frag() - get total number of fragments
452  * @buf: Network buf instance
453  *
454  * Return: number of fragments
455  */
456 static inline
dp_tx_mon_nbuf_get_num_frag(qdf_nbuf_t nbuf)457 uint32_t dp_tx_mon_nbuf_get_num_frag(qdf_nbuf_t nbuf)
458 {
459 	uint32_t num_frag = 0;
460 
461 	if (qdf_unlikely(!nbuf))
462 		return num_frag;
463 
464 	num_frag = qdf_nbuf_get_nr_frags_in_fraglist(nbuf);
465 
466 	return num_frag;
467 }
468 
469 /*
470  * dp_tx_mon_free_usr_mpduq() - API to free user mpduq
471  * @tx_ppdu_info - pointer to tx_ppdu_info
472  * @usr_idx - user index
473  * @tx_mon_be - pointer to tx capture be
474  *
475  * Return: void
476  */
dp_tx_mon_free_usr_mpduq(struct dp_tx_ppdu_info * tx_ppdu_info,uint8_t usr_idx,struct dp_pdev_tx_monitor_be * tx_mon_be)477 void dp_tx_mon_free_usr_mpduq(struct dp_tx_ppdu_info *tx_ppdu_info,
478 			      uint8_t usr_idx,
479 			      struct dp_pdev_tx_monitor_be *tx_mon_be)
480 {
481 	qdf_nbuf_queue_t *mpdu_q;
482 	uint32_t num_frag = 0;
483 	qdf_nbuf_t buf = NULL;
484 
485 	if (qdf_unlikely(!tx_ppdu_info))
486 		return;
487 
488 	mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q);
489 
490 	while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) {
491 		num_frag += dp_tx_mon_nbuf_get_num_frag(buf);
492 		qdf_nbuf_free(buf);
493 	}
494 	tx_mon_be->stats.pkt_buf_free += num_frag;
495 }
496 
497 /*
498  * dp_tx_mon_free_ppdu_info() - API to free dp_tx_ppdu_info
499  * @tx_ppdu_info - pointer to tx_ppdu_info
500  * @tx_mon_be - pointer to tx capture be
501  *
502  * Return: void
503  */
dp_tx_mon_free_ppdu_info(struct dp_tx_ppdu_info * tx_ppdu_info,struct dp_pdev_tx_monitor_be * tx_mon_be)504 void dp_tx_mon_free_ppdu_info(struct dp_tx_ppdu_info *tx_ppdu_info,
505 			      struct dp_pdev_tx_monitor_be *tx_mon_be)
506 {
507 	uint32_t user = 0;
508 
509 	for (; user < TXMON_PPDU_HAL(tx_ppdu_info, num_users); user++) {
510 		qdf_nbuf_queue_t *mpdu_q;
511 		uint32_t num_frag = 0;
512 		qdf_nbuf_t buf = NULL;
513 
514 		mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user, mpdu_q);
515 
516 		while ((buf = qdf_nbuf_queue_remove(mpdu_q)) != NULL) {
517 			num_frag += dp_tx_mon_nbuf_get_num_frag(buf);
518 			qdf_nbuf_free(buf);
519 		}
520 		tx_mon_be->stats.pkt_buf_free += num_frag;
521 	}
522 
523 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0;
524 	qdf_mem_free(tx_ppdu_info);
525 }
526 
527 /*
528  * dp_tx_mon_get_ppdu_info() - API to allocate dp_tx_ppdu_info
529  * @pdev - pdev handle
530  * @type - type of ppdu_info data or protection
531  * @num_user - number user in a ppdu_info
532  * @ppdu_id - ppdu_id number
533  *
534  * Return: pointer to dp_tx_ppdu_info
535  */
dp_tx_mon_get_ppdu_info(struct dp_pdev * pdev,enum tx_ppdu_info_type type,uint8_t num_user,uint32_t ppdu_id)536 struct dp_tx_ppdu_info *dp_tx_mon_get_ppdu_info(struct dp_pdev *pdev,
537 						enum tx_ppdu_info_type type,
538 						uint8_t num_user,
539 						uint32_t ppdu_id)
540 {
541 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
542 	struct dp_mon_pdev_be *mon_pdev_be =
543 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
544 	struct dp_pdev_tx_monitor_be *tx_mon_be =
545 			&mon_pdev_be->tx_monitor_be;
546 	struct dp_tx_ppdu_info *tx_ppdu_info;
547 	size_t sz_ppdu_info = 0;
548 	uint8_t i;
549 
550 	/* allocate new tx_ppdu_info */
551 	sz_ppdu_info = (sizeof(struct dp_tx_ppdu_info) +
552 			(sizeof(struct mon_rx_user_status) * num_user));
553 
554 	tx_ppdu_info = (struct dp_tx_ppdu_info *)qdf_mem_malloc(sz_ppdu_info);
555 	if (!tx_ppdu_info) {
556 		dp_mon_err("allocation of tx_ppdu_info type[%d] failed!!!",
557 			   type);
558 		return NULL;
559 	}
560 
561 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 0;
562 	TXMON_PPDU_HAL(tx_ppdu_info, num_users) = num_user;
563 	TXMON_PPDU_HAL(tx_ppdu_info, ppdu_id) = ppdu_id;
564 	TXMON_PPDU(tx_ppdu_info, ppdu_id) = ppdu_id;
565 
566 	for (i = 0; i < num_user; i++) {
567 		qdf_nbuf_queue_t *mpdu_q;
568 
569 		mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, i, mpdu_q);
570 		qdf_nbuf_queue_init(mpdu_q);
571 	}
572 
573 	/* assign tx_ppdu_info to monitor pdev for reference */
574 	if (type == TX_PROT_PPDU_INFO) {
575 		qdf_mem_zero(&tx_mon_be->prot_status_info, sizeof(struct hal_tx_status_info));
576 		tx_mon_be->tx_prot_ppdu_info = tx_ppdu_info;
577 		TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 0;
578 	} else {
579 		qdf_mem_zero(&tx_mon_be->data_status_info, sizeof(struct hal_tx_status_info));
580 		tx_mon_be->tx_data_ppdu_info = tx_ppdu_info;
581 		TXMON_PPDU_HAL(tx_ppdu_info, is_data) = 1;
582 	}
583 
584 	return tx_ppdu_info;
585 }
586 
587 /*
588  * dp_print_pdev_tx_monitor_stats_2_0: print tx capture stats
589  * @pdev: DP PDEV handle
590  *
591  * return: void
592  */
dp_print_pdev_tx_monitor_stats_2_0(struct dp_pdev * pdev)593 void dp_print_pdev_tx_monitor_stats_2_0(struct dp_pdev *pdev)
594 {
595 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
596 	struct dp_mon_pdev_be *mon_pdev_be =
597 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
598 	struct dp_pdev_tx_monitor_be *tx_mon_be =
599 			&mon_pdev_be->tx_monitor_be;
600 	struct dp_tx_monitor_drop_stats stats = {0};
601 
602 	qdf_mem_copy(&stats, &tx_mon_be->stats,
603 		     sizeof(struct dp_tx_monitor_drop_stats));
604 
605 	/* TX monitor stats needed for beryllium */
606 	DP_PRINT_STATS("\n\tTX Capture BE stats mode[%d]:", tx_mon_be->mode);
607 	DP_PRINT_STATS("\tbuffer pending : %u", tx_mon_be->last_frag_q_idx);
608 	DP_PRINT_STATS("\treplenish count: %llu",
609 		       stats.totat_tx_mon_replenish_cnt);
610 	DP_PRINT_STATS("\treap count     : %llu", stats.total_tx_mon_reap_cnt);
611 	DP_PRINT_STATS("\tmonitor stuck  : %u", stats.total_tx_mon_stuck);
612 	DP_PRINT_STATS("\tStatus buffer");
613 	DP_PRINT_STATS("\t\treceived  : %llu", stats.status_buf_recv);
614 	DP_PRINT_STATS("\t\tfree      : %llu", stats.status_buf_free);
615 	DP_PRINT_STATS("\tPacket buffer");
616 	DP_PRINT_STATS("\t\treceived  : %llu", stats.pkt_buf_recv);
617 	DP_PRINT_STATS("\t\tfree      : %llu", stats.pkt_buf_free);
618 	DP_PRINT_STATS("\t\tprocessed : %llu", stats.pkt_buf_processed);
619 	DP_PRINT_STATS("\t\tto stack  : %llu", stats.pkt_buf_to_stack);
620 	DP_PRINT_STATS("\tppdu info");
621 	DP_PRINT_STATS("\t\tthreshold : %llu", stats.ppdu_info_drop_th);
622 	DP_PRINT_STATS("\t\tflush     : %llu", stats.ppdu_info_drop_flush);
623 	DP_PRINT_STATS("\t\ttruncated : %llu", stats.ppdu_info_drop_trunc);
624 	DP_PRINT_STATS("\tDrop stats");
625 	DP_PRINT_STATS("\t\tppdu drop : %llu", stats.ppdu_drop_cnt);
626 	DP_PRINT_STATS("\t\tmpdu drop : %llu", stats.mpdu_drop_cnt);
627 	DP_PRINT_STATS("\t\ttlv drop : %llu", stats.tlv_drop_cnt);
628 }
629 
630 #ifdef QCA_SUPPORT_LITE_MONITOR
dp_lite_mon_free_tx_peers(struct dp_pdev * pdev)631 static void dp_lite_mon_free_tx_peers(struct dp_pdev *pdev)
632 {
633 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
634 	struct dp_mon_pdev_be *mon_pdev_be =
635 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
636 	struct dp_lite_mon_tx_config *lite_mon_tx_config;
637 
638 	lite_mon_tx_config = mon_pdev_be->lite_mon_tx_config;
639 	qdf_spin_lock_bh(&lite_mon_tx_config->lite_mon_tx_lock);
640 	dp_lite_mon_free_peers(pdev, &lite_mon_tx_config->tx_config);
641 	qdf_spin_unlock_bh(&lite_mon_tx_config->lite_mon_tx_lock);
642 }
643 #else
dp_lite_mon_free_tx_peers(struct dp_pdev * pdev)644 static void dp_lite_mon_free_tx_peers(struct dp_pdev *pdev)
645 {
646 }
647 #endif
648 
649 /*
650  * dp_config_enh_tx_monitor_2_0()- API to enable/disable enhanced tx capture
651  * @pdev_handle: DP_PDEV handle
652  * @val: user provided value
653  *
654  * Return: QDF_STATUS
655  */
656 QDF_STATUS
dp_config_enh_tx_monitor_2_0(struct dp_pdev * pdev,uint8_t val)657 dp_config_enh_tx_monitor_2_0(struct dp_pdev *pdev, uint8_t val)
658 {
659 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
660 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
661 	struct dp_mon_pdev_be *mon_pdev_be =
662 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
663 	struct dp_pdev_tx_monitor_be *tx_mon_be =
664 			&mon_pdev_be->tx_monitor_be;
665 	struct dp_soc *soc = pdev->soc;
666 	uint16_t num_of_buffers;
667 	QDF_STATUS status;
668 
669 	soc_cfg_ctx = soc->wlan_cfg_ctx;
670 	switch (val) {
671 	case TX_MON_BE_DISABLE:
672 	{
673 		tx_mon_be->mode = TX_MON_BE_DISABLE;
674 		mon_pdev_be->tx_mon_mode = 0;
675 		mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B;
676 		/* Free any peers that were added for tx peer filtering */
677 		dp_lite_mon_free_tx_peers(pdev);
678 		break;
679 	}
680 	case TX_MON_BE_FULL_CAPTURE:
681 	{
682 		num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
683 		status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev,
684 								   num_of_buffers);
685 		if (status != QDF_STATUS_SUCCESS) {
686 			dp_mon_err("Tx monitor buffer allocation failed");
687 			return status;
688 		}
689 		qdf_mem_zero(&tx_mon_be->stats,
690 			     sizeof(struct dp_tx_monitor_drop_stats));
691 		tx_mon_be->last_tsft = 0;
692 		tx_mon_be->last_ppdu_timestamp = 0;
693 		tx_mon_be->mode = TX_MON_BE_FULL_CAPTURE;
694 		mon_pdev_be->tx_mon_mode = 1;
695 		mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH;
696 		break;
697 	}
698 	case TX_MON_BE_PEER_FILTER:
699 	{
700 		status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev,
701 								   DP_MON_RING_FILL_LEVEL_DEFAULT);
702 		if (status != QDF_STATUS_SUCCESS) {
703 			dp_mon_err("Tx monitor buffer allocation failed");
704 			return status;
705 		}
706 		tx_mon_be->mode = TX_MON_BE_PEER_FILTER;
707 		mon_pdev_be->tx_mon_mode = 2;
708 		mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_256B;
709 		break;
710 	}
711 	default:
712 	{
713 		return QDF_STATUS_E_INVAL;
714 	}
715 	}
716 
717 	dp_mon_info("Tx monitor mode:%d mon_mode_flag:%d config_length:%d",
718 		    tx_mon_be->mode, mon_pdev_be->tx_mon_mode,
719 		    mon_pdev_be->tx_mon_filter_length);
720 
721 	dp_mon_filter_setup_tx_mon_mode(pdev);
722 	dp_tx_mon_filter_update(pdev);
723 
724 	return QDF_STATUS_SUCCESS;
725 }
726 
727 /*
728  * dp_peer_set_tx_capture_enabled_2_0() -  add tx monitor peer filter
729  * @pdev: Datapath PDEV handle
730  * @peer: Datapath PEER handle
731  * @is_tx_pkt_cap_enable: flag for tx capture enable/disable
732  * @peer_mac: peer mac address
733  *
734  * Return: status
735  */
dp_peer_set_tx_capture_enabled_2_0(struct dp_pdev * pdev_handle,struct dp_peer * peer_handle,uint8_t is_tx_pkt_cap_enable,uint8_t * peer_mac)736 QDF_STATUS dp_peer_set_tx_capture_enabled_2_0(struct dp_pdev *pdev_handle,
737 					      struct dp_peer *peer_handle,
738 					      uint8_t is_tx_pkt_cap_enable,
739 					      uint8_t *peer_mac)
740 {
741 	return QDF_STATUS_SUCCESS;
742 }
743 
744 #ifdef QCA_SUPPORT_LITE_MONITOR
dp_fill_lite_mon_vdev(struct cdp_tx_indication_info * tx_cap_info,struct dp_mon_pdev_be * mon_pdev_be)745 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info,
746 				  struct dp_mon_pdev_be *mon_pdev_be)
747 {
748 	struct dp_lite_mon_config *config;
749 	struct dp_vdev *lite_mon_vdev;
750 
751 	config = &mon_pdev_be->lite_mon_tx_config->tx_config;
752 	lite_mon_vdev = config->lite_mon_vdev;
753 
754 	if (lite_mon_vdev)
755 		tx_cap_info->osif_vdev = lite_mon_vdev->osif_vdev;
756 }
757 
758 /**
759  * dp_lite_mon_filter_ppdu() - Filter frames at ppdu level
760  * @mpdu_count: mpdu count in the nbuf queue
761  * @level: Lite monitor filter level
762  *
763  * Return: QDF_STATUS
764  */
765 static inline QDF_STATUS
dp_lite_mon_filter_ppdu(uint8_t mpdu_count,uint8_t level)766 dp_lite_mon_filter_ppdu(uint8_t mpdu_count, uint8_t level)
767 {
768 	if (level == CDP_LITE_MON_LEVEL_PPDU && mpdu_count > 1)
769 		return QDF_STATUS_E_CANCELED;
770 
771 	return QDF_STATUS_SUCCESS;
772 }
773 
774 /**
775  * dp_lite_mon_filter_peer() - filter frames with peer
776  * @config: Lite monitor configuration
777  * @wh: Pointer to ieee80211_frame
778  *
779  * Return: QDF_STATUS
780  */
781 static inline QDF_STATUS
dp_lite_mon_filter_peer(struct dp_lite_mon_tx_config * config,struct ieee80211_frame_min_one * wh)782 dp_lite_mon_filter_peer(struct dp_lite_mon_tx_config *config,
783 			struct ieee80211_frame_min_one *wh)
784 {
785 	struct dp_lite_mon_peer *peer;
786 
787 	/* Return here if sw peer filtering is not required or if peer count
788 	 * is zero
789 	 */
790 	if (!config->sw_peer_filtering || !config->tx_config.peer_count)
791 		return QDF_STATUS_SUCCESS;
792 
793 	TAILQ_FOREACH(peer, &config->tx_config.peer_list, peer_list_elem) {
794 		if (!qdf_mem_cmp(&peer->peer_mac.raw[0],
795 				 &wh->i_addr1[0], QDF_MAC_ADDR_SIZE)) {
796 			return QDF_STATUS_SUCCESS;
797 		}
798 	}
799 
800 	return QDF_STATUS_E_ABORTED;
801 }
802 
803 /**
804  * dp_lite_mon_filter_subtype() - filter frames with subtype
805  * @config: Lite monitor configuration
806  * @wh: Pointer to ieee80211_frame
807  *
808  * Return: QDF_STATUS
809  */
810 static inline QDF_STATUS
dp_lite_mon_filter_subtype(struct dp_lite_mon_tx_config * config,struct ieee80211_frame_min_one * wh)811 dp_lite_mon_filter_subtype(struct dp_lite_mon_tx_config *config,
812 			   struct ieee80211_frame_min_one *wh)
813 {
814 	uint16_t mgmt_filter, ctrl_filter, data_filter, type, subtype;
815 	uint8_t is_mcast = 0;
816 
817 	/* Return here if subtype filtering is not required */
818 	if (!config->subtype_filtering)
819 		return QDF_STATUS_SUCCESS;
820 
821 	mgmt_filter = config->tx_config.mgmt_filter[DP_MON_FRM_FILTER_MODE_FP];
822 	ctrl_filter = config->tx_config.ctrl_filter[DP_MON_FRM_FILTER_MODE_FP];
823 	data_filter = config->tx_config.data_filter[DP_MON_FRM_FILTER_MODE_FP];
824 
825 	type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK);
826 	subtype = ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
827 		IEEE80211_FC0_SUBTYPE_SHIFT);
828 
829 	switch (type) {
830 	case QDF_IEEE80211_FC0_TYPE_MGT:
831 		if (mgmt_filter >> subtype & 0x1)
832 			return QDF_STATUS_SUCCESS;
833 		else
834 			return QDF_STATUS_E_ABORTED;
835 	case QDF_IEEE80211_FC0_TYPE_CTL:
836 		if (ctrl_filter >> subtype & 0x1)
837 			return QDF_STATUS_SUCCESS;
838 		else
839 			return QDF_STATUS_E_ABORTED;
840 	case QDF_IEEE80211_FC0_TYPE_DATA:
841 		is_mcast = DP_FRAME_IS_MULTICAST(wh->i_addr1);
842 		if ((is_mcast && (data_filter & FILTER_DATA_MCAST)) ||
843 		    (!is_mcast && (data_filter & FILTER_DATA_UCAST)))
844 			return QDF_STATUS_SUCCESS;
845 		return QDF_STATUS_E_ABORTED;
846 	default:
847 		return QDF_STATUS_E_INVAL;
848 	}
849 }
850 
851 /**
852  * dp_lite_mon_filter_peer_subtype() - filter frames with subtype and peer
853  * @config: Lite monitor configuration
854  * @buf: Pointer to nbuf
855  *
856  * Return: QDF_STATUS
857  */
858 static inline QDF_STATUS
dp_lite_mon_filter_peer_subtype(struct dp_lite_mon_tx_config * config,qdf_nbuf_t buf)859 dp_lite_mon_filter_peer_subtype(struct dp_lite_mon_tx_config *config,
860 				qdf_nbuf_t buf)
861 {
862 	struct ieee80211_frame_min_one *wh;
863 	qdf_nbuf_t nbuf;
864 	QDF_STATUS ret;
865 
866 	/* Return here if subtype and peer filtering is not required */
867 	if (!config->subtype_filtering && !config->sw_peer_filtering &&
868 	    !config->tx_config.peer_count)
869 		return QDF_STATUS_SUCCESS;
870 
871 	if (dp_tx_mon_nbuf_get_num_frag(buf)) {
872 		wh = (struct ieee80211_frame_min_one *)qdf_nbuf_get_frag_addr(buf, 0);
873 	} else {
874 		nbuf = qdf_nbuf_get_ext_list(buf);
875 		if (nbuf)
876 			wh = (struct ieee80211_frame_min_one *)qdf_nbuf_data(nbuf);
877 		else
878 			return QDF_STATUS_E_INVAL;
879 	}
880 
881 	ret = dp_lite_mon_filter_subtype(config, wh);
882 	if (ret)
883 		return ret;
884 
885 	ret = dp_lite_mon_filter_peer(config, wh);
886 	if (ret)
887 		return ret;
888 
889 	return QDF_STATUS_SUCCESS;
890 }
891 
892 /**
893  * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor
894  * @pdev: Pointer to physical device
895  * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure
896  * @buf: qdf nbuf structure of buffer
897  * @mpdu_count: mpdu count in the nbuf queue
898  *
899  * Return: QDF_STATUS
900  */
901 static inline QDF_STATUS
dp_tx_lite_mon_filtering(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,qdf_nbuf_t buf,int mpdu_count)902 dp_tx_lite_mon_filtering(struct dp_pdev *pdev,
903 			 struct dp_tx_ppdu_info *tx_ppdu_info,
904 			 qdf_nbuf_t buf, int mpdu_count)
905 {
906 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
907 	struct dp_mon_pdev_be *mon_pdev_be =
908 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
909 	struct dp_lite_mon_tx_config *config =
910 		mon_pdev_be->lite_mon_tx_config;
911 	QDF_STATUS ret;
912 
913 	if (!dp_lite_mon_is_tx_enabled(mon_pdev) &&
914 	    !config->tx_config.peer_count)
915 		return QDF_STATUS_SUCCESS;
916 
917 	/* PPDU level filtering */
918 	ret = dp_lite_mon_filter_ppdu(mpdu_count, config->tx_config.level);
919 	if (ret)
920 		return ret;
921 
922 	/* Subtype and peer filtering */
923 	ret = dp_lite_mon_filter_peer_subtype(config, buf);
924 	if (ret)
925 		return ret;
926 
927 	return QDF_STATUS_SUCCESS;
928 }
929 
930 #else
dp_fill_lite_mon_vdev(struct cdp_tx_indication_info * tx_cap_info,struct dp_mon_pdev_be * mon_pdev_be)931 static void dp_fill_lite_mon_vdev(struct cdp_tx_indication_info *tx_cap_info,
932 				  struct dp_mon_pdev_be *mon_pdev_be)
933 {
934 }
935 
936 /**
937  * dp_tx_lite_mon_filtering() - Additional filtering for lite monitor
938  * @pdev: Pointer to physical device
939  * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure
940  * @buf: qdf nbuf structure of buffer
941  * @mpdu_count: mpdu count in the nbuf queue
942  *
943  * Return: QDF_STATUS
944  */
945 static inline QDF_STATUS
dp_tx_lite_mon_filtering(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,qdf_nbuf_t buf,int mpdu_count)946 dp_tx_lite_mon_filtering(struct dp_pdev *pdev,
947 			 struct dp_tx_ppdu_info *tx_ppdu_info,
948 			 qdf_nbuf_t buf, int mpdu_count)
949 {
950 	return QDF_STATUS_SUCCESS;
951 }
952 #endif
953 
954 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
955 /**
956  * dp_tx_mon_lpc_type_filtering() - Additional filtering for lpc
957  * @pdev: Pointer to physical device
958  * @tx_ppdu_info: pointer to dp_tx_ppdu_info structure
959  * @buf: qdf nbuf structure of buffer
960  *
961  * Return: QDF_STATUS
962  */
963 static inline QDF_STATUS
dp_tx_mon_lpc_type_filtering(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,qdf_nbuf_t buf)964 dp_tx_mon_lpc_type_filtering(struct dp_pdev *pdev,
965 			     struct dp_tx_ppdu_info *tx_ppdu_info,
966 			     qdf_nbuf_t buf)
967 {
968 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
969 	qdf_nbuf_t nbuf;
970 	struct ieee80211_frame_min_one *wh;
971 	uint16_t mgmt_filter, ctrl_filter, data_filter, type;
972 
973 	if (qdf_unlikely(!IS_LOCAL_PKT_CAPTURE_RUNNING(mon_pdev,
974 			is_local_pkt_capture_running)))
975 		return QDF_STATUS_E_ABORTED;
976 
977 	if (dp_tx_mon_nbuf_get_num_frag(buf)) {
978 		wh = (struct ieee80211_frame_min_one *)qdf_nbuf_get_frag_addr(buf, 0);
979 	} else {
980 		nbuf = qdf_nbuf_get_ext_list(buf);
981 		if (nbuf)
982 			wh = (struct ieee80211_frame_min_one *)qdf_nbuf_data(nbuf);
983 		else
984 			return QDF_STATUS_E_ABORTED;
985 	}
986 
987 	mgmt_filter = mon_pdev->fp_mgmt_filter;
988 	ctrl_filter = mon_pdev->fp_ctrl_filter;
989 	data_filter = mon_pdev->fp_data_filter;
990 
991 	type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK);
992 
993 	switch (type) {
994 	case QDF_IEEE80211_FC0_TYPE_MGT:
995 		return mgmt_filter ? QDF_STATUS_SUCCESS : QDF_STATUS_E_ABORTED;
996 	case QDF_IEEE80211_FC0_TYPE_CTL:
997 		return ctrl_filter ? QDF_STATUS_SUCCESS : QDF_STATUS_E_ABORTED;
998 	case QDF_IEEE80211_FC0_TYPE_DATA:
999 		return data_filter ? QDF_STATUS_SUCCESS : QDF_STATUS_E_ABORTED;
1000 	default:
1001 		return QDF_STATUS_E_ABORTED;
1002 	}
1003 
1004 	return QDF_STATUS_SUCCESS;
1005 }
1006 
1007 static int
dp_tx_handle_local_pkt_capture(struct dp_pdev * pdev,qdf_nbuf_t nbuf)1008 dp_tx_handle_local_pkt_capture(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
1009 {
1010 	struct dp_mon_vdev *mon_vdev;
1011 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1012 
1013 	if (!mon_pdev->mvdev) {
1014 		dp_mon_err("Monitor vdev is NULL !!");
1015 		return 1;
1016 	}
1017 
1018 	mon_vdev = mon_pdev->mvdev->monitor_vdev;
1019 
1020 	if (mon_vdev && mon_vdev->osif_rx_mon)
1021 		mon_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev, nbuf, NULL);
1022 
1023 	return 0;
1024 }
1025 #else
1026 static int
dp_tx_handle_local_pkt_capture(struct dp_pdev * pdev,qdf_nbuf_t nbuf)1027 dp_tx_handle_local_pkt_capture(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
1028 {
1029 	return 0;
1030 }
1031 
1032 static inline QDF_STATUS
dp_tx_mon_lpc_type_filtering(struct dp_pdev * pdev,struct dp_tx_ppdu_info * tx_ppdu_info,qdf_nbuf_t buf)1033 dp_tx_mon_lpc_type_filtering(struct dp_pdev *pdev,
1034 			     struct dp_tx_ppdu_info *tx_ppdu_info,
1035 			     qdf_nbuf_t buf)
1036 {
1037 	return QDF_STATUS_SUCCESS;
1038 }
1039 
1040 #endif
1041 
1042 /**
1043  * dp_tx_mon_send_to_stack() - API to send to stack
1044  * @pdev: pdev Handle
1045  * @mpdu: pointer to mpdu
1046  * @num_frag: number of frag in mpdu
1047  * @ppdu_id: ppdu id of the mpdu
1048  *
1049  * Return: void
1050  */
1051 static void
dp_tx_mon_send_to_stack(struct dp_pdev * pdev,qdf_nbuf_t mpdu,uint32_t num_frag,uint32_t ppdu_id)1052 dp_tx_mon_send_to_stack(struct dp_pdev *pdev, qdf_nbuf_t mpdu,
1053 			uint32_t num_frag, uint32_t ppdu_id)
1054 {
1055 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1056 	struct dp_mon_pdev_be *mon_pdev_be =
1057 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1058 	struct dp_pdev_tx_monitor_be *tx_mon_be =
1059 			&mon_pdev_be->tx_monitor_be;
1060 	struct cdp_tx_indication_info tx_capture_info = {0};
1061 
1062 	tx_mon_be->stats.pkt_buf_to_stack += num_frag;
1063 
1064 	tx_capture_info.radiotap_done = 1;
1065 	tx_capture_info.mpdu_nbuf = mpdu;
1066 	tx_capture_info.mpdu_info.ppdu_id = ppdu_id;
1067 
1068 	if (qdf_unlikely(IS_LOCAL_PKT_CAPTURE_RUNNING(mon_pdev,
1069 			is_local_pkt_capture_running))) {
1070 		int ret = dp_tx_handle_local_pkt_capture(pdev, mpdu);
1071 
1072 		/*
1073 		 * On error, free the memory here,
1074 		 * otherwise it will be freed by the network stack
1075 		 */
1076 		if (ret)
1077 			qdf_nbuf_free(mpdu);
1078 		return;
1079 	} else if (!dp_lite_mon_is_tx_enabled(mon_pdev)) {
1080 		dp_wdi_event_handler(WDI_EVENT_TX_PKT_CAPTURE,
1081 				     pdev->soc,
1082 				     &tx_capture_info,
1083 				     HTT_INVALID_PEER,
1084 				     WDI_NO_VAL,
1085 				     pdev->pdev_id);
1086 	} else {
1087 		dp_fill_lite_mon_vdev(&tx_capture_info, mon_pdev_be);
1088 		dp_wdi_event_handler(WDI_EVENT_LITE_MON_TX,
1089 				     pdev->soc,
1090 				     &tx_capture_info,
1091 				     HTT_INVALID_PEER,
1092 				     WDI_NO_VAL,
1093 				     pdev->pdev_id);
1094 	}
1095 	if (tx_capture_info.mpdu_nbuf)
1096 		qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
1097 }
1098 
1099 /**
1100  * dp_tx_mon_send_per_usr_mpdu() - API to send per usr mpdu to stack
1101  * @pdev: pdev Handle
1102  * @ppdu_info: pointer to dp_tx_ppdu_info
1103  * @user_idx: current user index
1104  *
1105  * Return: void
1106  */
1107 static void
dp_tx_mon_send_per_usr_mpdu(struct dp_pdev * pdev,struct dp_tx_ppdu_info * ppdu_info,uint8_t user_idx)1108 dp_tx_mon_send_per_usr_mpdu(struct dp_pdev *pdev,
1109 			    struct dp_tx_ppdu_info *ppdu_info,
1110 			    uint8_t user_idx)
1111 {
1112 	qdf_nbuf_queue_t *usr_mpdu_q = NULL;
1113 	qdf_nbuf_t buf = NULL;
1114 	uint8_t mpdu_count = 0;
1115 
1116 	usr_mpdu_q = &TXMON_PPDU_USR(ppdu_info, user_idx, mpdu_q);
1117 
1118 	while ((buf = qdf_nbuf_queue_remove(usr_mpdu_q)) != NULL) {
1119 		uint32_t num_frag = dp_tx_mon_nbuf_get_num_frag(buf);
1120 
1121 		ppdu_info->hal_txmon.rx_status.rx_user_status =
1122 				&ppdu_info->hal_txmon.rx_user_status[user_idx];
1123 
1124 		if (dp_tx_lite_mon_filtering(pdev, ppdu_info, buf,
1125 					     ++mpdu_count) ||
1126 		    dp_tx_mon_lpc_type_filtering(pdev, ppdu_info, buf)) {
1127 			qdf_nbuf_free(buf);
1128 			continue;
1129 		}
1130 
1131 		qdf_nbuf_update_radiotap(&ppdu_info->hal_txmon.rx_status,
1132 					 buf, qdf_nbuf_headroom(buf));
1133 
1134 		dp_tx_mon_send_to_stack(pdev, buf, num_frag,
1135 					TXMON_PPDU(ppdu_info, ppdu_id));
1136 	}
1137 }
1138 
1139 #define PHY_MEDIUM_MHZ	960
1140 #define PHY_TIMESTAMP_WRAP (0xFFFFFFFF / PHY_MEDIUM_MHZ)
1141 
1142 /**
1143  * dp_populate_tsft_from_phy_timestamp() - API to get tsft from phy timestamp
1144  * @pdev: pdev Handle
1145  * @ppdu_info: ppdi_info Handle
1146  *
1147  * Return: QDF_STATUS
1148  */
1149 static QDF_STATUS
dp_populate_tsft_from_phy_timestamp(struct dp_pdev * pdev,struct dp_tx_ppdu_info * ppdu_info)1150 dp_populate_tsft_from_phy_timestamp(struct dp_pdev *pdev,
1151 				    struct dp_tx_ppdu_info *ppdu_info)
1152 {
1153 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1154 	struct dp_mon_pdev_be *mon_pdev_be =
1155 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1156 	struct dp_pdev_tx_monitor_be *tx_mon_be =
1157 			&mon_pdev_be->tx_monitor_be;
1158 	uint64_t tsft = 0;
1159 	uint32_t ppdu_timestamp = 0;
1160 
1161 	tsft = TXMON_PPDU_COM(ppdu_info, tsft);
1162 	ppdu_timestamp = TXMON_PPDU_COM(ppdu_info, ppdu_timestamp);
1163 
1164 	if (tsft && ppdu_timestamp) {
1165 		/* update tsft and ppdu timestamp */
1166 		tx_mon_be->last_tsft = tsft;
1167 		tx_mon_be->last_ppdu_timestamp = ppdu_timestamp;
1168 	} else if (!tx_mon_be->last_ppdu_timestamp || !tx_mon_be->last_tsft) {
1169 		return QDF_STATUS_E_EMPTY;
1170 	}
1171 
1172 	if (!tsft && ppdu_timestamp) {
1173 		/* response window */
1174 		uint32_t cur_usec = ppdu_timestamp / PHY_MEDIUM_MHZ;
1175 		uint32_t last_usec = (tx_mon_be->last_ppdu_timestamp /
1176 				      PHY_MEDIUM_MHZ);
1177 		uint32_t diff = 0;
1178 
1179 		if (last_usec < cur_usec) {
1180 			diff = cur_usec - last_usec;
1181 			tsft = tx_mon_be->last_tsft + diff;
1182 		} else {
1183 			diff = (PHY_TIMESTAMP_WRAP - last_usec) + cur_usec;
1184 			tsft = tx_mon_be->last_tsft + diff;
1185 		}
1186 		TXMON_PPDU_COM(ppdu_info, tsft) = tsft;
1187 		/* update tsft and ppdu timestamp */
1188 		tx_mon_be->last_tsft = tsft;
1189 		tx_mon_be->last_ppdu_timestamp = ppdu_timestamp;
1190 	}
1191 
1192 	if (!TXMON_PPDU_COM(ppdu_info, tsft) &&
1193 	    !TXMON_PPDU_COM(ppdu_info, ppdu_timestamp))
1194 		return QDF_STATUS_E_EMPTY;
1195 
1196 	return QDF_STATUS_SUCCESS;
1197 }
1198 
1199 /**
1200  * dp_tx_mon_update_channel_freq() - API to update channel frequency and number
1201  * @pdev: pdev Handle
1202  * @soc: soc Handle
1203  * @freq: Frequency
1204  *
1205  * Return: void
1206  */
1207 static inline void
dp_tx_mon_update_channel_freq(struct dp_pdev * pdev,struct dp_soc * soc,uint16_t freq)1208 dp_tx_mon_update_channel_freq(struct dp_pdev *pdev, struct dp_soc *soc,
1209 			      uint16_t freq)
1210 {
1211 	if (soc && soc->cdp_soc.ol_ops->freq_to_channel) {
1212 		uint8_t c_num;
1213 
1214 		c_num = soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
1215 							     pdev->pdev_id,
1216 							     freq);
1217 		pdev->operating_channel.num = c_num;
1218 	}
1219 
1220 	if (soc && soc->cdp_soc.ol_ops->freq_to_band) {
1221 		uint8_t band;
1222 
1223 		band = soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
1224 							 pdev->pdev_id,
1225 							 freq);
1226 		pdev->operating_channel.band = band;
1227 	}
1228 }
1229 
1230 /**
1231  * dp_tx_mon_update_radiotap() - API to update radiotap information
1232  * @pdev: pdev Handle
1233  * @ppdu_info: pointer to dp_tx_ppdu_info
1234  *
1235  * Return: void
1236  */
1237 static void
dp_tx_mon_update_radiotap(struct dp_pdev * pdev,struct dp_tx_ppdu_info * ppdu_info)1238 dp_tx_mon_update_radiotap(struct dp_pdev *pdev,
1239 			  struct dp_tx_ppdu_info *ppdu_info)
1240 {
1241 	uint32_t usr_idx = 0;
1242 	uint32_t num_users = 0;
1243 
1244 	num_users = TXMON_PPDU_HAL(ppdu_info, num_users);
1245 
1246 	if (qdf_unlikely(TXMON_PPDU_COM(ppdu_info, chan_freq) == 0 &&
1247 			 TXMON_PPDU_COM(ppdu_info, chan_num) == 0)) {
1248 		TXMON_PPDU_COM(ppdu_info, chan_freq) =
1249 				pdev->operating_channel.freq;
1250 		TXMON_PPDU_COM(ppdu_info, chan_num) =
1251 				pdev->operating_channel.num;
1252 	} else if (TXMON_PPDU_COM(ppdu_info, chan_freq) != 0 &&
1253 		   TXMON_PPDU_COM(ppdu_info, chan_num) == 0) {
1254 		uint16_t freq = TXMON_PPDU_COM(ppdu_info, chan_freq);
1255 
1256 		if (qdf_unlikely(pdev->operating_channel.freq != freq)) {
1257 			dp_tx_mon_update_channel_freq(pdev, pdev->soc, freq);
1258 			pdev->operating_channel.freq = freq;
1259 		}
1260 		TXMON_PPDU_COM(ppdu_info,
1261 			       chan_num) = pdev->operating_channel.num;
1262 	}
1263 
1264 	if (QDF_STATUS_SUCCESS !=
1265 	    dp_populate_tsft_from_phy_timestamp(pdev, ppdu_info))
1266 		return;
1267 
1268 	/* update mlo timestamp */
1269 	TXMON_PPDU_COM(ppdu_info, tsft) =
1270 			(TXMON_PPDU_COM(ppdu_info, tsft) +
1271 			 pdev->timestamp.mlo_offset_lo_us +
1272 			 ((uint64_t)pdev->timestamp.mlo_offset_hi_us << 32));
1273 
1274 	for (usr_idx = 0; usr_idx < num_users; usr_idx++) {
1275 		qdf_nbuf_queue_t *mpdu_q = NULL;
1276 
1277 		/* set AMPDU flag if number mpdu is more than 1 */
1278 		mpdu_q = &TXMON_PPDU_USR(ppdu_info, usr_idx, mpdu_q);
1279 		if (mpdu_q && (qdf_nbuf_queue_len(mpdu_q) > 1)) {
1280 			TXMON_PPDU_COM(ppdu_info,
1281 				       rs_flags) |= IEEE80211_AMPDU_FLAG;
1282 			TXMON_PPDU_USR(ppdu_info, usr_idx, is_ampdu) = 1;
1283 		}
1284 
1285 		if (qdf_unlikely(!TXMON_PPDU_COM(ppdu_info, rate))) {
1286 			uint32_t rate = 0;
1287 			uint32_t rix = 0;
1288 			uint16_t ratecode = 0;
1289 
1290 			rate = dp_getrateindex(TXMON_PPDU_COM(ppdu_info, sgi),
1291 					       TXMON_PPDU_USR(ppdu_info,
1292 							      usr_idx, mcs),
1293 					       TXMON_PPDU_COM(ppdu_info, nss),
1294 					       TXMON_PPDU_COM(ppdu_info,
1295 							      preamble_type),
1296 					       TXMON_PPDU_COM(ppdu_info, bw),
1297 					       0,
1298 					       &rix, &ratecode);
1299 
1300 			/* update rate */
1301 			TXMON_PPDU_COM(ppdu_info, rate) = rate;
1302 		}
1303 
1304 		dp_tx_mon_send_per_usr_mpdu(pdev, ppdu_info, usr_idx);
1305 	}
1306 }
1307 
1308 /**
1309  * dp_tx_mon_ppdu_process - Deferred PPDU stats handler
1310  * @context: Opaque work context (PDEV)
1311  *
1312  * Return: none
1313  */
dp_tx_mon_ppdu_process(void * context)1314 static void dp_tx_mon_ppdu_process(void *context)
1315 {
1316 	struct dp_pdev *pdev = (struct dp_pdev *)context;
1317 	struct dp_mon_pdev *mon_pdev;
1318 	struct dp_mon_pdev_be *mon_pdev_be;
1319 	struct dp_tx_ppdu_info *defer_ppdu_info = NULL;
1320 	struct dp_tx_ppdu_info *defer_ppdu_info_next = NULL;
1321 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1322 
1323 	/* sanity check */
1324 	if (qdf_unlikely(!pdev))
1325 		return;
1326 
1327 	mon_pdev = pdev->monitor_pdev;
1328 
1329 	if (qdf_unlikely(!mon_pdev))
1330 		return;
1331 
1332 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1333 	if (qdf_unlikely(!mon_pdev_be))
1334 		return;
1335 
1336 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1337 	if (qdf_unlikely(TX_MON_BE_DISABLE == tx_mon_be->mode &&
1338 			 !dp_lite_mon_is_tx_enabled(mon_pdev)))
1339 		return;
1340 
1341 	/* take lock here */
1342 	qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1343 	STAILQ_CONCAT(&tx_mon_be->defer_tx_ppdu_info_queue,
1344 		      &tx_mon_be->tx_ppdu_info_queue);
1345 	tx_mon_be->defer_ppdu_info_list_depth +=
1346 		tx_mon_be->tx_ppdu_info_list_depth;
1347 	tx_mon_be->tx_ppdu_info_list_depth = 0;
1348 	qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1349 
1350 	STAILQ_FOREACH_SAFE(defer_ppdu_info,
1351 			    &tx_mon_be->defer_tx_ppdu_info_queue,
1352 			    tx_ppdu_info_queue_elem, defer_ppdu_info_next) {
1353 		/* remove dp_tx_ppdu_info from the list */
1354 		STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue,
1355 			      defer_ppdu_info,
1356 			      dp_tx_ppdu_info,
1357 			      tx_ppdu_info_queue_elem);
1358 		tx_mon_be->defer_ppdu_info_list_depth--;
1359 
1360 		dp_tx_mon_update_radiotap(pdev, defer_ppdu_info);
1361 
1362 		/* free the ppdu_info */
1363 		dp_tx_mon_free_ppdu_info(defer_ppdu_info, tx_mon_be);
1364 		defer_ppdu_info = NULL;
1365 	}
1366 }
1367 
dp_tx_ppdu_stats_attach_2_0(struct dp_pdev * pdev)1368 void dp_tx_ppdu_stats_attach_2_0(struct dp_pdev *pdev)
1369 {
1370 	struct dp_mon_pdev *mon_pdev;
1371 	struct dp_mon_pdev_be *mon_pdev_be;
1372 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1373 
1374 	if (qdf_unlikely(!pdev))
1375 		return;
1376 
1377 	mon_pdev = pdev->monitor_pdev;
1378 
1379 	if (qdf_unlikely(!mon_pdev))
1380 		return;
1381 
1382 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1383 	if (qdf_unlikely(!mon_pdev_be))
1384 		return;
1385 
1386 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1387 
1388 	STAILQ_INIT(&tx_mon_be->tx_ppdu_info_queue);
1389 	tx_mon_be->tx_ppdu_info_list_depth = 0;
1390 
1391 	STAILQ_INIT(&tx_mon_be->defer_tx_ppdu_info_queue);
1392 	tx_mon_be->defer_ppdu_info_list_depth = 0;
1393 
1394 	qdf_spinlock_create(&tx_mon_be->tx_mon_list_lock);
1395 	/* Work queue setup for TX MONITOR post handling */
1396 	qdf_create_work(0, &tx_mon_be->post_ppdu_work,
1397 			dp_tx_mon_ppdu_process, pdev);
1398 
1399 	tx_mon_be->post_ppdu_workqueue =
1400 			qdf_alloc_unbound_workqueue("tx_mon_ppdu_work_queue");
1401 }
1402 
dp_tx_ppdu_stats_detach_2_0(struct dp_pdev * pdev)1403 void dp_tx_ppdu_stats_detach_2_0(struct dp_pdev *pdev)
1404 {
1405 	struct dp_mon_pdev *mon_pdev;
1406 	struct dp_mon_pdev_be *mon_pdev_be;
1407 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1408 	struct dp_tx_ppdu_info *tx_ppdu_info = NULL;
1409 	struct dp_tx_ppdu_info *tx_ppdu_info_next = NULL;
1410 
1411 	if (qdf_unlikely(!pdev))
1412 		return;
1413 
1414 	mon_pdev = pdev->monitor_pdev;
1415 
1416 	if (qdf_unlikely(!mon_pdev))
1417 		return;
1418 
1419 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1420 	if (qdf_unlikely(!mon_pdev_be))
1421 		return;
1422 
1423 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1424 	/* TODO: disable tx_monitor, to avoid further packet from HW */
1425 	dp_monitor_config_enh_tx_capture(pdev, TX_MON_BE_DISABLE);
1426 
1427 	/* flush workqueue */
1428 	qdf_flush_workqueue(0, tx_mon_be->post_ppdu_workqueue);
1429 	qdf_destroy_workqueue(0, tx_mon_be->post_ppdu_workqueue);
1430 
1431 	/*
1432 	 * TODO: iterate both tx_ppdu_info and defer_ppdu_info_list
1433 	 * free the tx_ppdu_info and decrement depth
1434 	 */
1435 	qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1436 	STAILQ_FOREACH_SAFE(tx_ppdu_info,
1437 			    &tx_mon_be->tx_ppdu_info_queue,
1438 			    tx_ppdu_info_queue_elem, tx_ppdu_info_next) {
1439 		/* remove dp_tx_ppdu_info from the list */
1440 		STAILQ_REMOVE(&tx_mon_be->tx_ppdu_info_queue, tx_ppdu_info,
1441 			      dp_tx_ppdu_info, tx_ppdu_info_queue_elem);
1442 		/* decrement list length */
1443 		tx_mon_be->tx_ppdu_info_list_depth--;
1444 		/* free tx_ppdu_info */
1445 		dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be);
1446 	}
1447 	qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1448 
1449 	qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1450 	STAILQ_FOREACH_SAFE(tx_ppdu_info,
1451 			    &tx_mon_be->defer_tx_ppdu_info_queue,
1452 			    tx_ppdu_info_queue_elem, tx_ppdu_info_next) {
1453 		/* remove dp_tx_ppdu_info from the list */
1454 		STAILQ_REMOVE(&tx_mon_be->defer_tx_ppdu_info_queue,
1455 			      tx_ppdu_info,
1456 			      dp_tx_ppdu_info, tx_ppdu_info_queue_elem);
1457 		/* decrement list length */
1458 		tx_mon_be->defer_ppdu_info_list_depth--;
1459 		/* free tx_ppdu_info */
1460 		dp_tx_mon_free_ppdu_info(tx_ppdu_info, tx_mon_be);
1461 	}
1462 	qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1463 
1464 	qdf_spinlock_destroy(&tx_mon_be->tx_mon_list_lock);
1465 }
1466 #endif /* WLAN_TX_PKT_CAPTURE_ENH_BE */
1467 
1468 #if (defined(WIFI_MONITOR_SUPPORT) && defined(WLAN_TX_MON_CORE_DEBUG))
1469 /*
1470  * dp_config_enh_tx_core_monitor_2_0()- API to validate core framework
1471  * @pdev_handle: DP_PDEV handle
1472  * @val: user provided value
1473  *
1474  * Return: QDF_STATUS
1475  */
1476 QDF_STATUS
dp_config_enh_tx_core_monitor_2_0(struct dp_pdev * pdev,uint8_t val)1477 dp_config_enh_tx_core_monitor_2_0(struct dp_pdev *pdev, uint8_t val)
1478 {
1479 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1480 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1481 	struct dp_mon_pdev_be *mon_pdev_be =
1482 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1483 	struct dp_pdev_tx_monitor_be *tx_mon_be =
1484 			&mon_pdev_be->tx_monitor_be;
1485 	struct dp_soc *soc = pdev->soc;
1486 	uint16_t num_of_buffers;
1487 	QDF_STATUS status;
1488 
1489 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1490 	switch (val) {
1491 	case TX_MON_BE_FRM_WRK_DISABLE:
1492 	{
1493 		tx_mon_be->mode = val;
1494 		mon_pdev_be->tx_mon_mode = 0;
1495 		mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_64B;
1496 		break;
1497 	}
1498 	case TX_MON_BE_FRM_WRK_FULL_CAPTURE:
1499 	{
1500 		num_of_buffers = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1501 		status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev,
1502 								   num_of_buffers);
1503 		if (status != QDF_STATUS_SUCCESS) {
1504 			dp_mon_err("Tx monitor buffer allocation failed");
1505 			return status;
1506 		}
1507 		tx_mon_be->mode = val;
1508 		qdf_mem_zero(&tx_mon_be->stats,
1509 			     sizeof(struct dp_tx_monitor_drop_stats));
1510 		tx_mon_be->mode = val;
1511 		mon_pdev_be->tx_mon_mode = 1;
1512 		mon_pdev_be->tx_mon_filter_length = DEFAULT_DMA_LENGTH;
1513 		break;
1514 	}
1515 	case TX_MON_BE_FRM_WRK_128B_CAPTURE:
1516 	{
1517 		status = dp_vdev_set_monitor_mode_buf_rings_tx_2_0(pdev,
1518 								   DP_MON_RING_FILL_LEVEL_DEFAULT);
1519 		if (status != QDF_STATUS_SUCCESS) {
1520 			dp_mon_err("Tx monitor buffer allocation failed");
1521 			return status;
1522 		}
1523 		tx_mon_be->mode = val;
1524 		mon_pdev_be->tx_mon_mode = 1;
1525 		mon_pdev_be->tx_mon_filter_length = DMA_LENGTH_128B;
1526 		break;
1527 	}
1528 	default:
1529 	{
1530 		return QDF_STATUS_E_INVAL;
1531 	}
1532 	}
1533 
1534 	dp_mon_debug("Tx monitor mode:%d mon_mode_flag:%d config_length:%d",
1535 		    tx_mon_be->mode, mon_pdev_be->tx_mon_mode,
1536 		    mon_pdev_be->tx_mon_filter_length);
1537 
1538 	/* send HTT msg to configure TLV based on mode */
1539 	dp_mon_filter_setup_tx_mon_mode(pdev);
1540 	dp_tx_mon_filter_update(pdev);
1541 
1542 	return QDF_STATUS_SUCCESS;
1543 }
1544 #endif
1545 
1546 #ifdef WLAN_PKT_CAPTURE_TX_2_0
dp_tx_mon_pdev_htt_srng_setup_2_0(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)1547 QDF_STATUS dp_tx_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
1548 					     struct dp_pdev *pdev,
1549 					     int mac_id,
1550 					     int mac_for_pdev)
1551 {
1552 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1553 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1554 
1555 	return htt_srng_setup(soc->htt_handle, mac_for_pdev,
1556 			      mon_soc_be->tx_mon_dst_ring[mac_id].hal_srng,
1557 			      TX_MONITOR_DST);
1558 }
1559 
dp_tx_mon_soc_htt_srng_setup_2_0(struct dp_soc * soc,int mac_id)1560 QDF_STATUS dp_tx_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc,
1561 					    int mac_id)
1562 {
1563 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1564 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1565 
1566 	hal_set_low_threshold(mon_soc_be->tx_mon_buf_ring.hal_srng, 0);
1567 	return htt_srng_setup(soc->htt_handle, mac_id,
1568 				mon_soc_be->tx_mon_buf_ring.hal_srng,
1569 				TX_MONITOR_BUF);
1570 }
1571 
dp_tx_mon_pdev_rings_alloc_2_0(struct dp_pdev * pdev,uint32_t lmac_id)1572 QDF_STATUS dp_tx_mon_pdev_rings_alloc_2_0(struct dp_pdev *pdev, uint32_t lmac_id)
1573 {
1574 	struct dp_soc *soc = pdev->soc;
1575 	int entries;
1576 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
1577 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1578 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1579 
1580 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
1581 	entries = wlan_cfg_get_dma_tx_mon_dest_ring_size(pdev_cfg_ctx);
1582 
1583 	return dp_srng_alloc(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1584 				  TX_MONITOR_DST, entries, 0);
1585 }
1586 
dp_tx_mon_pdev_rings_free_2_0(struct dp_pdev * pdev,uint32_t lmac_id)1587 void dp_tx_mon_pdev_rings_free_2_0(struct dp_pdev *pdev, uint32_t lmac_id)
1588 {
1589 	struct dp_soc *soc = pdev->soc;
1590 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1591 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1592 
1593 	dp_srng_free(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id]);
1594 }
1595 
dp_tx_mon_pdev_rings_init_2_0(struct dp_pdev * pdev,uint32_t lmac_id)1596 QDF_STATUS dp_tx_mon_pdev_rings_init_2_0(struct dp_pdev *pdev, uint32_t lmac_id)
1597 {
1598 	struct dp_soc *soc = pdev->soc;
1599 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1600 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1601 
1602 	return dp_srng_init(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1603 				 TX_MONITOR_DST, pdev->pdev_id, lmac_id);
1604 }
1605 
dp_tx_mon_pdev_rings_deinit_2_0(struct dp_pdev * pdev,uint32_t lmac_id)1606 void dp_tx_mon_pdev_rings_deinit_2_0(struct dp_pdev *pdev, uint32_t lmac_id)
1607 {
1608 	struct dp_soc *soc = pdev->soc;
1609 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1610 	struct dp_mon_soc_be *mon_soc_be =
1611 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1612 
1613 	dp_srng_deinit(soc, &mon_soc_be->tx_mon_dst_ring[lmac_id],
1614 		       TX_MONITOR_DST, pdev->pdev_id);
1615 }
1616 
dp_tx_mon_soc_attach_2_0(struct dp_soc * soc,uint32_t lmac_id)1617 QDF_STATUS dp_tx_mon_soc_attach_2_0(struct dp_soc *soc, uint32_t lmac_id)
1618 {
1619 	int entries;
1620 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
1621 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1622 	struct dp_mon_soc_be *mon_soc_be =
1623 		dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1624 
1625 	entries = wlan_cfg_get_dp_soc_tx_mon_buf_ring_size(soc_cfg_ctx);
1626 	qdf_print("%s:%d tx mon buf entries: %d", __func__, __LINE__, entries);
1627 
1628 	return dp_srng_alloc(soc, &mon_soc_be->tx_mon_buf_ring,
1629 			  TX_MONITOR_BUF, entries, 0);
1630 }
1631 
dp_tx_mon_soc_detach_2_0(struct dp_soc * soc,uint32_t lmac_id)1632 QDF_STATUS dp_tx_mon_soc_detach_2_0(struct dp_soc *soc, uint32_t lmac_id)
1633 {
1634 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1635 	struct dp_mon_soc_be *mon_soc_be =
1636 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1637 
1638 	if (!mon_soc_be) {
1639 		dp_mon_err("DP MON SOC NULL");
1640 		return QDF_STATUS_E_FAILURE;
1641 	}
1642 
1643 	dp_tx_mon_buf_desc_pool_free(soc);
1644 	dp_srng_free(soc, &mon_soc_be->tx_mon_buf_ring);
1645 	return QDF_STATUS_SUCCESS;
1646 }
1647 
1648 #endif
1649