xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_rx_mon_dest_1.0.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_hw_headers.h"
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_peer.h"
22 #include "hal_rx.h"
23 #include "hal_api.h"
24 #include "qdf_trace.h"
25 #include "qdf_nbuf.h"
26 #include "hal_api_mon.h"
27 #include "dp_htt.h"
28 #include "dp_mon.h"
29 #include "dp_rx_mon.h"
30 #include "wlan_cfg.h"
31 #include "dp_internal.h"
32 #include "dp_rx_buffer_pool.h"
33 #include <dp_mon_1.0.h>
34 #include <dp_rx_mon_1.0.h>
35 
36 #ifdef WLAN_TX_PKT_CAPTURE_ENH
37 #include "dp_rx_mon_feature.h"
38 #endif
39 
40 /*
41  * PPDU id is from 0 to 64k-1. PPDU id read from status ring and PPDU id
42  * read from destination ring shall track each other. If the distance of
43  * two ppdu id is less than 20000. It is assume no wrap around. Otherwise,
44  * It is assume wrap around.
45  */
46 #define NOT_PPDU_ID_WRAP_AROUND 20000
47 /*
48  * The destination ring processing is stuck if the destrination is not
49  * moving while status ring moves 16 ppdu. the destination ring processing
50  * skips this destination ring ppdu as walkaround
51  */
52 #define MON_DEST_RING_STUCK_MAX_CNT 16
53 
54 #ifdef WLAN_TX_PKT_CAPTURE_ENH
55 void
dp_handle_tx_capture(struct dp_soc * soc,struct dp_pdev * pdev,qdf_nbuf_t mon_mpdu)56 dp_handle_tx_capture(struct dp_soc *soc, struct dp_pdev *pdev,
57 		     qdf_nbuf_t mon_mpdu)
58 {
59 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
60 	struct hal_rx_ppdu_info *ppdu_info = &mon_pdev->ppdu_info;
61 
62 	if (mon_pdev->tx_capture_enabled
63 	    == CDP_TX_ENH_CAPTURE_DISABLED)
64 		return;
65 
66 	if ((ppdu_info->sw_frame_group_id ==
67 	      HAL_MPDU_SW_FRAME_GROUP_CTRL_NDPA) ||
68 	     (ppdu_info->sw_frame_group_id ==
69 	      HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR))
70 		dp_handle_tx_capture_from_dest(soc, pdev, mon_mpdu);
71 }
72 
73 #ifdef QCA_MONITOR_PKT_SUPPORT
74 static void
dp_tx_capture_get_user_id(struct dp_pdev * dp_pdev,void * rx_desc_tlv)75 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv)
76 {
77 	struct dp_mon_pdev *mon_pdev = dp_pdev->monitor_pdev;
78 
79 	if (mon_pdev->tx_capture_enabled
80 	    != CDP_TX_ENH_CAPTURE_DISABLED)
81 		mon_pdev->ppdu_info.rx_info.user_id =
82 			hal_rx_hw_desc_mpdu_user_id(dp_pdev->soc->hal_soc,
83 						    rx_desc_tlv);
84 }
85 #endif
86 #else
87 static void
dp_tx_capture_get_user_id(struct dp_pdev * dp_pdev,void * rx_desc_tlv)88 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv)
89 {
90 }
91 #endif
92 
93 #ifdef QCA_MONITOR_PKT_SUPPORT
94 /**
95  * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
96  *			      (WBM), following error handling
97  *
98  * @dp_pdev: core txrx pdev context
99  * @buf_addr_info: void pointer to monitor link descriptor buf addr info
100  * @mac_id: mac_id for which the link desc is released.
101  *
102  * Return: QDF_STATUS
103  */
104 QDF_STATUS
dp_rx_mon_link_desc_return(struct dp_pdev * dp_pdev,hal_buff_addrinfo_t buf_addr_info,int mac_id)105 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
106 	hal_buff_addrinfo_t buf_addr_info, int mac_id)
107 {
108 	hal_ring_handle_t hal_ring_hdl;
109 	hal_soc_handle_t hal_soc;
110 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
111 	void *src_srng_desc;
112 
113 	hal_soc = dp_pdev->soc->hal_soc;
114 
115 	hal_ring_hdl = dp_monitor_get_link_desc_ring(dp_pdev->soc, mac_id);
116 
117 	qdf_assert(hal_ring_hdl);
118 
119 	if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring_hdl))) {
120 
121 		/* TODO */
122 		/*
123 		 * Need API to convert from hal_ring pointer to
124 		 * Ring Type / Ring Id combo
125 		 */
126 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
127 			"%s %d : \
128 			HAL RING Access For WBM Release SRNG Failed -- %pK",
129 			__func__, __LINE__, hal_ring_hdl);
130 		goto done;
131 	}
132 
133 	src_srng_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
134 
135 	if (qdf_likely(src_srng_desc)) {
136 		/* Return link descriptor through WBM ring (SW2WBM)*/
137 		hal_rx_mon_msdu_link_desc_set(hal_soc,
138 				src_srng_desc, buf_addr_info);
139 		status = QDF_STATUS_SUCCESS;
140 	} else {
141 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
142 			"%s %d -- Monitor Link Desc WBM Release Ring Full",
143 			__func__, __LINE__);
144 	}
145 done:
146 	hal_srng_access_end(hal_soc, hal_ring_hdl);
147 	return status;
148 }
149 
150 /**
151  * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW
152  *			      (WBM), following error handling
153  *
154  * @soc: core DP main context
155  * @mac_id: mac id which is one of 3 mac_ids
156  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
157  * @head_msdu: head of msdu to be popped
158  * @tail_msdu: tail of msdu to be popped
159  * @npackets: number of packet to be popped
160  * @ppdu_id: ppdu id of processing ppdu
161  * @head: head of descs list to be freed
162  * @tail: tail of decs list to be freed
163  *
164  * Return: number of msdu in MPDU to be popped
165  */
166 static inline uint32_t
dp_rx_mon_mpdu_pop(struct dp_soc * soc,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,uint32_t * npackets,uint32_t * ppdu_id,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail)167 dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
168 	hal_rxdma_desc_t rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu,
169 	qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id,
170 	union dp_rx_desc_list_elem_t **head,
171 	union dp_rx_desc_list_elem_t **tail)
172 {
173 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
174 	void *rx_desc_tlv, *first_rx_desc_tlv = NULL;
175 	void *rx_msdu_link_desc;
176 	qdf_nbuf_t msdu;
177 	qdf_nbuf_t last;
178 	struct hal_rx_msdu_list msdu_list;
179 	uint16_t num_msdus;
180 	uint32_t rx_buf_size, rx_pkt_offset;
181 	struct hal_buf_info buf_info;
182 	uint32_t rx_bufs_used = 0;
183 	uint32_t msdu_ppdu_id, msdu_cnt;
184 	uint8_t *data = NULL;
185 	uint32_t i;
186 	uint32_t total_frag_len = 0, frag_len = 0;
187 	bool is_frag, is_first_msdu;
188 	bool drop_mpdu = false, is_frag_non_raw = false;
189 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
190 	qdf_dma_addr_t buf_paddr = 0;
191 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
192 	struct cdp_mon_status *rs;
193 	struct dp_mon_pdev *mon_pdev;
194 
195 	if (qdf_unlikely(!dp_pdev)) {
196 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
197 		return rx_bufs_used;
198 	}
199 
200 	mon_pdev = dp_pdev->monitor_pdev;
201 	msdu = 0;
202 
203 	last = NULL;
204 
205 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
206 				     &buf_info, &msdu_cnt);
207 
208 	rs = &mon_pdev->rx_mon_recv_status;
209 	rs->cdp_rs_rxdma_err = false;
210 	if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) ==
211 		HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) {
212 		uint8_t rxdma_err =
213 			hal_rx_reo_ent_rxdma_error_code_get(
214 				rxdma_dst_ring_desc);
215 		if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) ||
216 		   (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) ||
217 		   (rxdma_err == HAL_RXDMA_ERR_OVERFLOW) ||
218 		   (rxdma_err == HAL_RXDMA_ERR_FCS && mon_pdev->mcopy_mode) ||
219 		   (rxdma_err == HAL_RXDMA_ERR_FCS &&
220 		    mon_pdev->rx_pktlog_cbf))) {
221 			drop_mpdu = true;
222 			mon_pdev->rx_mon_stats.dest_mpdu_drop++;
223 		}
224 		rs->cdp_rs_rxdma_err = true;
225 	}
226 
227 	is_frag = false;
228 	is_first_msdu = true;
229 
230 	do {
231 		if (!msdu_cnt) {
232 			drop_mpdu = true;
233 			DP_STATS_INC(dp_pdev, invalid_msdu_cnt, 1);
234 		}
235 
236 		/* WAR for duplicate link descriptors received from HW */
237 		if (qdf_unlikely(mon_pdev->mon_last_linkdesc_paddr ==
238 		    buf_info.paddr)) {
239 			mon_pdev->rx_mon_stats.dup_mon_linkdesc_cnt++;
240 			return rx_bufs_used;
241 		}
242 
243 		rx_msdu_link_desc =
244 			dp_rx_cookie_2_mon_link_desc(dp_pdev,
245 						     &buf_info, mac_id);
246 
247 		qdf_assert_always(rx_msdu_link_desc);
248 
249 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
250 				     &msdu_list, &num_msdus);
251 
252 		for (i = 0; i < num_msdus; i++) {
253 			uint16_t l2_hdr_offset;
254 			struct dp_rx_desc *rx_desc = NULL;
255 			struct rx_desc_pool *rx_desc_pool;
256 
257 			rx_desc = dp_rx_get_mon_desc(soc,
258 						     msdu_list.sw_cookie[i]);
259 
260 			qdf_assert_always(rx_desc);
261 
262 			msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
263 			buf_paddr = dp_rx_mon_get_paddr_from_desc(rx_desc);
264 
265 			/* WAR for duplicate buffers received from HW */
266 			if (qdf_unlikely(mon_pdev->mon_last_buf_cookie ==
267 				msdu_list.sw_cookie[i] ||
268 				DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) ||
269 				msdu_list.paddr[i] != buf_paddr ||
270 				!rx_desc->in_use)) {
271 				/* Skip duplicate buffer and drop subsequent
272 				 * buffers in this MPDU
273 				 */
274 				drop_mpdu = true;
275 				mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
276 				mon_pdev->mon_last_linkdesc_paddr =
277 					buf_info.paddr;
278 				continue;
279 			}
280 
281 			if (rx_desc->unmapped == 0) {
282 				rx_desc_pool = dp_rx_get_mon_desc_pool(soc,
283 								       mac_id,
284 								dp_pdev->pdev_id);
285 				dp_rx_mon_buffer_unmap(soc, rx_desc,
286 						       rx_desc_pool->buf_size);
287 				rx_desc->unmapped = 1;
288 			}
289 
290 			if (dp_rx_buffer_pool_refill(soc, msdu,
291 						     rx_desc->pool_id)) {
292 				drop_mpdu = true;
293 				msdu = NULL;
294 				mon_pdev->mon_last_linkdesc_paddr =
295 					buf_info.paddr;
296 				goto next_msdu;
297 			}
298 
299 			if (drop_mpdu) {
300 				mon_pdev->mon_last_linkdesc_paddr =
301 					buf_info.paddr;
302 				dp_rx_mon_buffer_free(rx_desc);
303 				msdu = NULL;
304 				goto next_msdu;
305 			}
306 
307 			data = dp_rx_mon_get_buffer_data(rx_desc);
308 			rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data);
309 
310 			dp_rx_mon_dest_debug("%pK: i=%d, ppdu_id=%x, num_msdus = %u",
311 					     soc, i, *ppdu_id, num_msdus);
312 
313 			if (is_first_msdu) {
314 				if (!hal_rx_mpdu_start_tlv_tag_valid(
315 						soc->hal_soc,
316 						rx_desc_tlv)) {
317 					drop_mpdu = true;
318 					dp_rx_mon_buffer_free(rx_desc);
319 					msdu = NULL;
320 					mon_pdev->mon_last_linkdesc_paddr =
321 						buf_info.paddr;
322 					goto next_msdu;
323 				}
324 
325 				msdu_ppdu_id = hal_rx_hw_desc_get_ppduid_get(
326 						soc->hal_soc,
327 						rx_desc_tlv,
328 						rxdma_dst_ring_desc);
329 				is_first_msdu = false;
330 
331 				dp_rx_mon_dest_debug("%pK: msdu_ppdu_id=%x",
332 						     soc, msdu_ppdu_id);
333 
334 				if (*ppdu_id > msdu_ppdu_id)
335 					dp_rx_mon_dest_debug("%pK: ppdu_id=%d "
336 							     "msdu_ppdu_id=%d", soc,
337 							     *ppdu_id, msdu_ppdu_id);
338 
339 				if ((*ppdu_id < msdu_ppdu_id) && (
340 					(msdu_ppdu_id - *ppdu_id) <
341 						NOT_PPDU_ID_WRAP_AROUND)) {
342 					*ppdu_id = msdu_ppdu_id;
343 					return rx_bufs_used;
344 				} else if ((*ppdu_id > msdu_ppdu_id) && (
345 					(*ppdu_id - msdu_ppdu_id) >
346 						NOT_PPDU_ID_WRAP_AROUND)) {
347 					*ppdu_id = msdu_ppdu_id;
348 					return rx_bufs_used;
349 				}
350 
351 				dp_tx_capture_get_user_id(dp_pdev,
352 							  rx_desc_tlv);
353 
354 				if (*ppdu_id == msdu_ppdu_id)
355 					mon_pdev->rx_mon_stats.ppdu_id_match++;
356 				else
357 					mon_pdev->rx_mon_stats.ppdu_id_mismatch
358 						++;
359 
360 				mon_pdev->mon_last_linkdesc_paddr =
361 					buf_info.paddr;
362 
363 				if (dp_rx_mon_alloc_parent_buffer(head_msdu)
364 				    != QDF_STATUS_SUCCESS) {
365 					DP_STATS_INC(dp_pdev,
366 						     replenish.nbuf_alloc_fail,
367 						     1);
368 					qdf_frag_free(rx_desc_tlv);
369 					dp_rx_mon_dest_debug("failed to allocate parent buffer to hold all frag");
370 					drop_mpdu = true;
371 					goto next_msdu;
372 				}
373 			}
374 
375 			if (hal_rx_desc_is_first_msdu(soc->hal_soc,
376 						      rx_desc_tlv))
377 				hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
378 					rx_desc_tlv,
379 					&mon_pdev->ppdu_info.rx_status);
380 
381 			dp_rx_mon_parse_desc_buffer(soc,
382 						    &(msdu_list.msdu_info[i]),
383 						    &is_frag,
384 						    &total_frag_len,
385 						    &frag_len,
386 						    &l2_hdr_offset,
387 						    rx_desc_tlv,
388 						    &first_rx_desc_tlv,
389 						    &is_frag_non_raw, data);
390 			if (!is_frag && msdu_cnt)
391 				msdu_cnt--;
392 
393 			dp_rx_mon_dest_debug("total_len %u frag_len %u flags %u",
394 					     total_frag_len, frag_len,
395 				      msdu_list.msdu_info[i].msdu_flags);
396 
397 			rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc);
398 
399 			rx_buf_size = rx_pkt_offset + l2_hdr_offset
400 					+ frag_len;
401 
402 			dp_rx_mon_buffer_set_pktlen(msdu, rx_buf_size);
403 #if 0
404 			/* Disable it.see packet on msdu done set to 0 */
405 			/*
406 			 * Check if DMA completed -- msdu_done is the
407 			 * last bit to be written
408 			 */
409 			if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) {
410 
411 				QDF_TRACE(QDF_MODULE_ID_DP,
412 					  QDF_TRACE_LEVEL_ERROR,
413 					  "%s:%d: Pkt Desc",
414 					  __func__, __LINE__);
415 
416 				QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
417 					QDF_TRACE_LEVEL_ERROR,
418 					rx_desc_tlv, 128);
419 
420 				qdf_assert_always(0);
421 			}
422 #endif
423 			dp_rx_mon_dest_debug("%pK: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, frag_len %u",
424 					     soc, rx_pkt_offset, l2_hdr_offset,
425 					     msdu_list.msdu_info[i].msdu_len,
426 					     frag_len);
427 
428 			if (dp_rx_mon_add_msdu_to_list(soc, head_msdu, msdu,
429 						       &last, rx_desc_tlv,
430 						       frag_len, l2_hdr_offset)
431 					!= QDF_STATUS_SUCCESS) {
432 				dp_rx_mon_add_msdu_to_list_failure_handler(rx_desc_tlv,
433 						dp_pdev, &last, head_msdu,
434 						tail_msdu, __func__);
435 				drop_mpdu = true;
436 				goto next_msdu;
437 			}
438 
439 next_msdu:
440 			mon_pdev->mon_last_buf_cookie = msdu_list.sw_cookie[i];
441 			rx_bufs_used++;
442 			dp_rx_add_to_free_desc_list(head,
443 				tail, rx_desc);
444 		}
445 
446 		/*
447 		 * Store the current link buffer into to the local
448 		 * structure to be  used for release purpose.
449 		 */
450 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
451 					     buf_info.paddr,
452 					     buf_info.sw_cookie, buf_info.rbm);
453 
454 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
455 					      &buf_info);
456 		if (dp_rx_monitor_link_desc_return(dp_pdev,
457 						   (hal_buff_addrinfo_t)
458 						   rx_link_buf_info,
459 						   mac_id,
460 						   bm_action)
461 						   != QDF_STATUS_SUCCESS)
462 			dp_err_rl("monitor link desc return failed");
463 	} while (buf_info.paddr);
464 
465 	dp_rx_mon_init_tail_msdu(head_msdu, msdu, last, tail_msdu);
466 	dp_rx_mon_remove_raw_frame_fcs_len(soc, head_msdu, tail_msdu);
467 
468 	return rx_bufs_used;
469 }
470 
471 #if !defined(DISABLE_MON_CONFIG) && \
472 	(defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC) || \
473 	 defined(MON_ENABLE_DROP_FOR_MAC))
474 /**
475  * dp_rx_mon_drop_one_mpdu() - Drop one mpdu from one rxdma monitor destination
476  *			       ring.
477  * @pdev: DP pdev handle
478  * @mac_id: MAC id which is being currently processed
479  * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry
480  * @head: HEAD if the rx_desc list to be freed
481  * @tail: TAIL of the rx_desc list to be freed
482  *
483  * Return: Number of msdus which are dropped.
484  */
dp_rx_mon_drop_one_mpdu(struct dp_pdev * pdev,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail)485 static int dp_rx_mon_drop_one_mpdu(struct dp_pdev *pdev,
486 				   uint32_t mac_id,
487 				   hal_rxdma_desc_t rxdma_dst_ring_desc,
488 				   union dp_rx_desc_list_elem_t **head,
489 				   union dp_rx_desc_list_elem_t **tail)
490 {
491 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
492 	struct dp_soc *soc = pdev->soc;
493 	hal_soc_handle_t hal_soc = soc->hal_soc;
494 	struct hal_buf_info buf_info;
495 	uint32_t msdu_count = 0;
496 	uint32_t rx_bufs_used = 0;
497 	void *rx_msdu_link_desc;
498 	struct hal_rx_msdu_list msdu_list;
499 	uint16_t num_msdus;
500 	qdf_nbuf_t nbuf;
501 	uint32_t i;
502 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
503 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
504 	struct rx_desc_pool *rx_desc_pool;
505 
506 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
507 	hal_rx_reo_ent_buf_paddr_get(hal_soc, rxdma_dst_ring_desc,
508 				     &buf_info, &msdu_count);
509 
510 	do {
511 		rx_msdu_link_desc = dp_rx_cookie_2_mon_link_desc(pdev,
512 								 &buf_info,
513 								 mac_id);
514 		if (qdf_unlikely(!rx_msdu_link_desc)) {
515 			mon_pdev->rx_mon_stats.mon_link_desc_invalid++;
516 			return rx_bufs_used;
517 		}
518 
519 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
520 				     &msdu_list, &num_msdus);
521 
522 		for (i = 0; i < num_msdus; i++) {
523 			struct dp_rx_desc *rx_desc;
524 			qdf_dma_addr_t buf_paddr;
525 
526 			rx_desc = dp_rx_get_mon_desc(soc,
527 						     msdu_list.sw_cookie[i]);
528 
529 			if (qdf_unlikely(!rx_desc)) {
530 				mon_pdev->rx_mon_stats.
531 						mon_rx_desc_invalid++;
532 				continue;
533 			}
534 
535 			nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
536 			buf_paddr =
537 				 dp_rx_mon_get_paddr_from_desc(rx_desc);
538 
539 			if (qdf_unlikely(!rx_desc->in_use || !nbuf ||
540 					 msdu_list.paddr[i] !=
541 					 buf_paddr)) {
542 				mon_pdev->rx_mon_stats.
543 						mon_nbuf_sanity_err++;
544 				continue;
545 			}
546 			rx_bufs_used++;
547 
548 			if (!rx_desc->unmapped) {
549 				dp_rx_mon_buffer_unmap(soc, rx_desc,
550 						       rx_desc_pool->buf_size);
551 				rx_desc->unmapped = 1;
552 			}
553 
554 			qdf_nbuf_free(nbuf);
555 			dp_rx_add_to_free_desc_list(head, tail, rx_desc);
556 
557 			if (!(msdu_list.msdu_info[i].msdu_flags &
558 			      HAL_MSDU_F_MSDU_CONTINUATION))
559 				msdu_count--;
560 		}
561 
562 		/*
563 		 * Store the current link buffer into to the local
564 		 * structure to be  used for release purpose.
565 		 */
566 		hal_rxdma_buff_addr_info_set(soc->hal_soc,
567 					     rx_link_buf_info,
568 					     buf_info.paddr,
569 					     buf_info.sw_cookie,
570 					     buf_info.rbm);
571 
572 		hal_rx_mon_next_link_desc_get(soc->hal_soc,
573 					      rx_msdu_link_desc,
574 					      &buf_info);
575 		if (dp_rx_monitor_link_desc_return(pdev,
576 						   (hal_buff_addrinfo_t)
577 						   rx_link_buf_info,
578 						   mac_id, bm_action) !=
579 		    QDF_STATUS_SUCCESS)
580 			dp_info_rl("monitor link desc return failed");
581 	} while (buf_info.paddr && msdu_count);
582 
583 	return rx_bufs_used;
584 }
585 #endif
586 
587 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC)
588 /**
589  * dp_rx_mon_check_n_drop_mpdu() - Check if the current MPDU is not from the
590  *				   PMAC which is being currently processed, and
591  *				   if yes, drop the MPDU.
592  * @pdev: DP pdev handle
593  * @mac_id: MAC id which is being currently processed
594  * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry
595  * @head: HEAD if the rx_desc list to be freed
596  * @tail: TAIL of the rx_desc list to be freed
597  * @rx_bufs_dropped: Number of msdus dropped
598  *
599  * Return: QDF_STATUS_SUCCESS, if the mpdu was to be dropped
600  *	   QDF_STATUS_E_INVAL/QDF_STATUS_E_FAILURE, if the mdpu was not dropped
601  */
602 static QDF_STATUS
dp_rx_mon_check_n_drop_mpdu(struct dp_pdev * pdev,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,uint32_t * rx_bufs_dropped)603 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
604 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
605 			    union dp_rx_desc_list_elem_t **head,
606 			    union dp_rx_desc_list_elem_t **tail,
607 			    uint32_t *rx_bufs_dropped)
608 {
609 	struct dp_soc *soc = pdev->soc;
610 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
611 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
612 	uint8_t src_link_id;
613 	QDF_STATUS status;
614 
615 	if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN)
616 		goto drop_mpdu;
617 
618 	lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
619 
620 	status = hal_rx_reo_ent_get_src_link_id(soc->hal_soc,
621 						rxdma_dst_ring_desc,
622 						&src_link_id);
623 	if (QDF_IS_STATUS_ERROR(status))
624 		return QDF_STATUS_E_INVAL;
625 
626 	if (src_link_id == lmac_id)
627 		return QDF_STATUS_E_INVAL;
628 
629 drop_mpdu:
630 	*rx_bufs_dropped = dp_rx_mon_drop_one_mpdu(pdev, mac_id,
631 						   rxdma_dst_ring_desc,
632 						   head, tail);
633 
634 	return QDF_STATUS_SUCCESS;
635 }
636 #else
637 static inline QDF_STATUS
dp_rx_mon_check_n_drop_mpdu(struct dp_pdev * pdev,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,uint32_t * rx_bufs_dropped)638 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
639 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
640 			    union dp_rx_desc_list_elem_t **head,
641 			    union dp_rx_desc_list_elem_t **tail,
642 			    uint32_t *rx_bufs_dropped)
643 {
644 	return QDF_STATUS_E_FAILURE;
645 }
646 #endif
647 
dp_rx_mon_dest_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)648 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
649 			    uint32_t mac_id, uint32_t quota)
650 {
651 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
652 	uint8_t pdev_id;
653 	hal_rxdma_desc_t rxdma_dst_ring_desc;
654 	hal_soc_handle_t hal_soc;
655 	void *mon_dst_srng;
656 	union dp_rx_desc_list_elem_t *head = NULL;
657 	union dp_rx_desc_list_elem_t *tail = NULL;
658 	uint32_t ppdu_id;
659 	uint32_t rx_bufs_used;
660 	uint32_t mpdu_rx_bufs_used;
661 	int mac_for_pdev = mac_id;
662 	struct cdp_pdev_mon_stats *rx_mon_stats;
663 	struct dp_mon_pdev *mon_pdev;
664 
665 	if (!pdev) {
666 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
667 		return;
668 	}
669 
670 	mon_pdev = pdev->monitor_pdev;
671 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
672 
673 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
674 		dp_rx_mon_dest_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
675 				   soc, mon_dst_srng);
676 		return;
677 	}
678 
679 	hal_soc = soc->hal_soc;
680 
681 	qdf_assert((hal_soc && pdev));
682 
683 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
684 
685 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
686 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
687 			  "%s %d : HAL Mon Dest Ring access Failed -- %pK",
688 			  __func__, __LINE__, mon_dst_srng);
689 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
690 		return;
691 	}
692 
693 	pdev_id = pdev->pdev_id;
694 	ppdu_id = mon_pdev->ppdu_info.com_info.ppdu_id;
695 	rx_bufs_used = 0;
696 	rx_mon_stats = &mon_pdev->rx_mon_stats;
697 
698 	while (qdf_likely(rxdma_dst_ring_desc =
699 		hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
700 		qdf_nbuf_t head_msdu, tail_msdu;
701 		uint32_t npackets;
702 		uint32_t rx_bufs_dropped;
703 
704 		rx_bufs_dropped = 0;
705 		head_msdu = (qdf_nbuf_t)NULL;
706 		tail_msdu = (qdf_nbuf_t)NULL;
707 
708 		if (QDF_STATUS_SUCCESS ==
709 		    dp_rx_mon_check_n_drop_mpdu(pdev, mac_id,
710 						rxdma_dst_ring_desc,
711 						&head, &tail,
712 						&rx_bufs_dropped)) {
713 			/* Increment stats */
714 			rx_bufs_used += rx_bufs_dropped;
715 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
716 			continue;
717 		}
718 
719 		mpdu_rx_bufs_used =
720 			dp_rx_mon_mpdu_pop(soc, mac_id,
721 					   rxdma_dst_ring_desc,
722 					   &head_msdu, &tail_msdu,
723 					   &npackets, &ppdu_id,
724 					   &head, &tail);
725 
726 		rx_bufs_used += mpdu_rx_bufs_used;
727 
728 		if (mpdu_rx_bufs_used)
729 			mon_pdev->mon_dest_ring_stuck_cnt = 0;
730 		else
731 			mon_pdev->mon_dest_ring_stuck_cnt++;
732 
733 		if (mon_pdev->mon_dest_ring_stuck_cnt >
734 		    MON_DEST_RING_STUCK_MAX_CNT) {
735 			dp_info("destination ring stuck");
736 			dp_info("ppdu_id status=%d dest=%d",
737 				mon_pdev->ppdu_info.com_info.ppdu_id, ppdu_id);
738 			rx_mon_stats->mon_rx_dest_stuck++;
739 			mon_pdev->ppdu_info.com_info.ppdu_id = ppdu_id;
740 			continue;
741 		}
742 
743 		if (ppdu_id != mon_pdev->ppdu_info.com_info.ppdu_id) {
744 			rx_mon_stats->stat_ring_ppdu_id_hist[
745 				rx_mon_stats->ppdu_id_hist_idx] =
746 				mon_pdev->ppdu_info.com_info.ppdu_id;
747 			rx_mon_stats->dest_ring_ppdu_id_hist[
748 				rx_mon_stats->ppdu_id_hist_idx] = ppdu_id;
749 			rx_mon_stats->ppdu_id_hist_idx =
750 				(rx_mon_stats->ppdu_id_hist_idx + 1) &
751 					(MAX_PPDU_ID_HIST - 1);
752 			dp_rx_mon_dest_debug("%pK: ppdu_id %x != ppdu_info.com_info.ppdu_id %x",
753 					     soc, ppdu_id,
754 					     mon_pdev->ppdu_info.com_info.ppdu_id);
755 			break;
756 		}
757 
758 		if (qdf_likely((head_msdu) && (tail_msdu))) {
759 			rx_mon_stats->dest_mpdu_done++;
760 			dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu);
761 		}
762 
763 		rxdma_dst_ring_desc =
764 			hal_srng_dst_get_next(hal_soc,
765 					      mon_dst_srng);
766 	}
767 
768 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
769 
770 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
771 
772 	if (rx_bufs_used) {
773 		rx_mon_stats->dest_ppdu_done++;
774 		dp_rx_buffers_replenish(soc, mac_id,
775 					dp_rxdma_get_mon_buf_ring(pdev,
776 								  mac_for_pdev),
777 					dp_rx_get_mon_desc_pool(soc, mac_id,
778 								pdev_id),
779 					rx_bufs_used, &head, &tail, false);
780 	}
781 }
782 
783 QDF_STATUS
dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev * pdev,uint32_t mac_id,bool delayed_replenish)784 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
785 				 bool delayed_replenish)
786 {
787 	uint8_t pdev_id = pdev->pdev_id;
788 	struct dp_soc *soc = pdev->soc;
789 	struct dp_srng *mon_buf_ring;
790 	uint32_t num_entries;
791 	struct rx_desc_pool *rx_desc_pool;
792 	QDF_STATUS status = QDF_STATUS_SUCCESS;
793 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
794 
795 	mon_buf_ring = dp_rxdma_get_mon_buf_ring(pdev, mac_id);
796 
797 	num_entries = mon_buf_ring->num_entries;
798 
799 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev_id);
800 
801 	dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
802 
803 	/* Replenish RXDMA monitor buffer ring with 8 buffers only
804 	 * delayed_replenish_entries is actually 8 but when we call
805 	 * dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence
806 	 * added 1 to delayed_replenish_entries to ensure we have 8
807 	 * entries. Once the monitor VAP is configured we replenish
808 	 * the complete RXDMA monitor buffer ring.
809 	 */
810 	if (delayed_replenish) {
811 		num_entries = soc_cfg_ctx->delayed_replenish_entries + 1;
812 		status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring,
813 						   rx_desc_pool,
814 						   num_entries - 1);
815 	} else {
816 		union dp_rx_desc_list_elem_t *tail = NULL;
817 		union dp_rx_desc_list_elem_t *desc_list = NULL;
818 
819 		status = dp_rx_buffers_replenish(soc, mac_id,
820 						 mon_buf_ring,
821 						 rx_desc_pool,
822 						 num_entries,
823 						 &desc_list,
824 						 &tail, false);
825 	}
826 
827 	return status;
828 }
829 
830 void
dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev * pdev,uint32_t mac_id)831 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
832 {
833 	uint8_t pdev_id = pdev->pdev_id;
834 	struct dp_soc *soc = pdev->soc;
835 	struct dp_srng *mon_buf_ring;
836 	uint32_t num_entries;
837 	struct rx_desc_pool *rx_desc_pool;
838 	uint32_t rx_desc_pool_size;
839 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
840 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
841 
842 	mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
843 
844 	num_entries = mon_buf_ring->num_entries;
845 
846 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
847 
848 	/* If descriptor pool is already initialized, do not initialize it */
849 	if (rx_desc_pool->freelist)
850 		return;
851 
852 	dp_debug("Mon RX Desc buf Pool[%d] init entries=%u",
853 		 pdev_id, num_entries);
854 
855 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
856 		num_entries;
857 
858 	rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id);
859 	rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
860 	rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
861 	/* Enable frag processing if feature is enabled */
862 	dp_rx_enable_mon_dest_frag(rx_desc_pool, true);
863 
864 	dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
865 
866 	mon_pdev->mon_last_linkdesc_paddr = 0;
867 
868 	mon_pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
869 
870 	/* Attach full monitor mode resources */
871 	dp_full_mon_attach(pdev);
872 }
873 
874 static void
dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev * pdev,uint32_t mac_id)875 dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id)
876 {
877 	uint8_t pdev_id = pdev->pdev_id;
878 	struct dp_soc *soc = pdev->soc;
879 	struct rx_desc_pool *rx_desc_pool;
880 
881 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
882 
883 	dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id);
884 
885 	dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
886 
887 	/* Detach full monitor mode resources */
888 	dp_full_mon_detach(pdev);
889 }
890 
891 static void
dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev * pdev,uint32_t mac_id)892 dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id)
893 {
894 	uint8_t pdev_id = pdev->pdev_id;
895 	struct dp_soc *soc = pdev->soc;
896 	struct rx_desc_pool *rx_desc_pool;
897 
898 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
899 
900 	dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id);
901 
902 	dp_rx_desc_pool_free(soc, rx_desc_pool);
903 }
904 
dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev * pdev,uint32_t mac_id)905 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
906 {
907 	uint8_t pdev_id = pdev->pdev_id;
908 	struct dp_soc *soc = pdev->soc;
909 	struct rx_desc_pool *rx_desc_pool;
910 
911 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
912 
913 	dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id);
914 
915 	if (rx_desc_pool->rx_mon_dest_frag_enable)
916 		dp_rx_desc_frag_free(soc, rx_desc_pool);
917 	else
918 		dp_rx_desc_nbuf_free(soc, rx_desc_pool, true);
919 }
920 
921 QDF_STATUS
dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev * pdev,uint32_t mac_id)922 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
923 {
924 	uint8_t pdev_id = pdev->pdev_id;
925 	struct dp_soc *soc = pdev->soc;
926 	struct dp_srng *mon_buf_ring;
927 	uint32_t num_entries;
928 	struct rx_desc_pool *rx_desc_pool;
929 	uint32_t rx_desc_pool_size;
930 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
931 
932 	mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
933 
934 	num_entries = mon_buf_ring->num_entries;
935 
936 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
937 
938 	dp_debug("Mon RX Desc Pool[%d] entries=%u",
939 		 pdev_id, num_entries);
940 
941 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
942 		num_entries;
943 
944 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_SUCCESS)
945 		return QDF_STATUS_SUCCESS;
946 
947 	return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool);
948 }
949 
950 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
951 uint32_t
dp_mon_dest_srng_drop_for_mac(struct dp_pdev * pdev,uint32_t mac_id,bool force_flush)952 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
953 			      bool force_flush)
954 {
955 	struct dp_soc *soc = pdev->soc;
956 	hal_rxdma_desc_t rxdma_dst_ring_desc;
957 	hal_soc_handle_t hal_soc;
958 	void *mon_dst_srng;
959 	union dp_rx_desc_list_elem_t *head = NULL;
960 	union dp_rx_desc_list_elem_t *tail = NULL;
961 	uint32_t rx_bufs_used = 0;
962 	struct rx_desc_pool *rx_desc_pool;
963 	uint32_t reap_cnt = 0;
964 	uint32_t rx_bufs_dropped;
965 	struct dp_mon_pdev *mon_pdev;
966 	bool is_rxdma_dst_ring_common;
967 
968 	if (qdf_unlikely(!soc || !soc->hal_soc))
969 		return reap_cnt;
970 
971 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id);
972 
973 	if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)))
974 		return reap_cnt;
975 
976 	hal_soc = soc->hal_soc;
977 	mon_pdev = pdev->monitor_pdev;
978 
979 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
980 
981 	if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
982 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
983 		return reap_cnt;
984 	}
985 
986 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
987 	is_rxdma_dst_ring_common = dp_is_rxdma_dst_ring_common(pdev);
988 
989 	while ((rxdma_dst_ring_desc =
990 		hal_srng_dst_peek(hal_soc, mon_dst_srng)) &&
991 		(reap_cnt < MON_DROP_REAP_LIMIT || force_flush)) {
992 		if (is_rxdma_dst_ring_common && !force_flush) {
993 			if (QDF_STATUS_SUCCESS ==
994 			    dp_rx_mon_check_n_drop_mpdu(pdev, mac_id,
995 							rxdma_dst_ring_desc,
996 							&head, &tail,
997 							&rx_bufs_dropped)) {
998 				/* Increment stats */
999 				rx_bufs_used += rx_bufs_dropped;
1000 			} else {
1001 				/*
1002 				 * If the mpdu was not dropped, we need to
1003 				 * wait for the entry to be processed, along
1004 				 * with the status ring entry for the other
1005 				 * mac. Hence we bail out here.
1006 				 */
1007 				break;
1008 			}
1009 		} else {
1010 			rx_bufs_used += dp_rx_mon_drop_one_mpdu(pdev, mac_id,
1011 								rxdma_dst_ring_desc,
1012 								&head, &tail);
1013 		}
1014 		reap_cnt++;
1015 		rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1016 							    mon_dst_srng);
1017 	}
1018 
1019 	hal_srng_access_end(hal_soc, mon_dst_srng);
1020 
1021 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1022 
1023 	if (rx_bufs_used) {
1024 		dp_rx_buffers_replenish(soc, mac_id,
1025 					dp_rxdma_get_mon_buf_ring(pdev, mac_id),
1026 					rx_desc_pool,
1027 					rx_bufs_used, &head, &tail, false);
1028 	}
1029 
1030 	return reap_cnt;
1031 }
1032 #else
1033 #if defined(QCA_SUPPORT_FULL_MON) && defined(WIFI_MONITOR_SUPPORT)
1034 uint32_t
dp_mon_dest_srng_drop_for_mac(struct dp_pdev * pdev,uint32_t mac_id)1035 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
1036 {
1037 	struct dp_soc *soc = pdev->soc;
1038 	hal_rxdma_desc_t rxdma_dst_ring_desc;
1039 	hal_soc_handle_t hal_soc;
1040 	void *mon_dst_srng;
1041 	union dp_rx_desc_list_elem_t *head = NULL;
1042 	union dp_rx_desc_list_elem_t *tail = NULL;
1043 	uint32_t rx_bufs_used = 0;
1044 	void *rx_msdu_link_desc;
1045 	uint16_t num_msdus;
1046 	struct hal_rx_msdu_list msdu_list;
1047 	qdf_nbuf_t nbuf = NULL;
1048 	uint32_t i;
1049 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
1050 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
1051 	struct rx_desc_pool *rx_desc_pool = NULL;
1052 	uint32_t reap_cnt = 0;
1053 	struct dp_mon_pdev *mon_pdev;
1054 	struct hal_rx_mon_desc_info *desc_info;
1055 
1056 	if (qdf_unlikely(!soc || !soc->hal_soc))
1057 		return reap_cnt;
1058 
1059 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id);
1060 
1061 	if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)))
1062 		return reap_cnt;
1063 
1064 	hal_soc = soc->hal_soc;
1065 	mon_pdev = pdev->monitor_pdev;
1066 	desc_info = mon_pdev->mon_desc;
1067 
1068 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
1069 
1070 	while ((rxdma_dst_ring_desc =
1071 		hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
1072 		qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
1073 		hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
1074 					    (void *)rxdma_dst_ring_desc,
1075 					    (void *)desc_info);
1076 
1077 		if (desc_info->end_of_ppdu) {
1078 			rxdma_dst_ring_desc =
1079 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1080 			continue;
1081 		}
1082 
1083 		do {
1084 			rx_msdu_link_desc =
1085 				dp_rx_cookie_2_mon_link_desc(pdev,
1086 							     &desc_info->
1087 							     link_desc,
1088 							     mac_id);
1089 
1090 			if (qdf_unlikely(!rx_msdu_link_desc)) {
1091 				mon_pdev->rx_mon_stats.mon_link_desc_invalid++;
1092 				goto next_entry;
1093 			}
1094 
1095 			hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
1096 					     &msdu_list, &num_msdus);
1097 
1098 			for (i = 0; i < num_msdus; i++) {
1099 				struct dp_rx_desc *rx_desc;
1100 				qdf_dma_addr_t buf_paddr;
1101 
1102 				rx_desc =
1103 					dp_rx_get_mon_desc(soc, msdu_list.
1104 							   sw_cookie[i]);
1105 
1106 				if (qdf_unlikely(!rx_desc)) {
1107 					mon_pdev->rx_mon_stats.
1108 							mon_rx_desc_invalid++;
1109 					continue;
1110 				}
1111 
1112 				nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
1113 				buf_paddr =
1114 					 dp_rx_mon_get_paddr_from_desc(rx_desc);
1115 
1116 				if (qdf_unlikely(!rx_desc->in_use || !nbuf ||
1117 						 msdu_list.paddr[i] !=
1118 						 buf_paddr)) {
1119 					mon_pdev->rx_mon_stats.
1120 							mon_nbuf_sanity_err++;
1121 					continue;
1122 				}
1123 				rx_bufs_used++;
1124 
1125 				if (!rx_desc->unmapped) {
1126 					dp_rx_mon_buffer_unmap(soc, rx_desc,
1127 							       rx_desc_pool->
1128 							       buf_size);
1129 					rx_desc->unmapped = 1;
1130 				}
1131 
1132 				dp_rx_mon_buffer_free(rx_desc);
1133 				dp_rx_add_to_free_desc_list(&head, &tail,
1134 							    rx_desc);
1135 
1136 				if (!(msdu_list.msdu_info[i].msdu_flags &
1137 				      HAL_MSDU_F_MSDU_CONTINUATION))
1138 					desc_info->msdu_count--;
1139 			}
1140 
1141 			/*
1142 			 * Store the current link buffer into to the local
1143 			 * structure to be  used for release purpose.
1144 			 */
1145 			hal_rxdma_buff_addr_info_set(soc->hal_soc,
1146 						     rx_link_buf_info,
1147 						     desc_info->link_desc.paddr,
1148 						     desc_info->link_desc.
1149 						     sw_cookie,
1150 						     desc_info->link_desc.rbm);
1151 
1152 			hal_rx_mon_next_link_desc_get(soc->hal_soc,
1153 						      rx_msdu_link_desc,
1154 						      &desc_info->link_desc);
1155 			if (dp_rx_monitor_link_desc_return(pdev,
1156 							   (hal_buff_addrinfo_t)
1157 							   rx_link_buf_info,
1158 							   mac_id, bm_action) !=
1159 			    QDF_STATUS_SUCCESS)
1160 				dp_info_rl("monitor link desc return failed");
1161 		} while (desc_info->link_desc.paddr);
1162 
1163 next_entry:
1164 		reap_cnt++;
1165 		rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1166 							    mon_dst_srng);
1167 	}
1168 
1169 	if (rx_bufs_used) {
1170 		dp_rx_buffers_replenish(soc, mac_id,
1171 					dp_rxdma_get_mon_buf_ring(pdev, mac_id),
1172 					rx_desc_pool,
1173 					rx_bufs_used, &head, &tail, false);
1174 	}
1175 
1176 	return reap_cnt;
1177 }
1178 #else
1179 uint32_t
dp_mon_dest_srng_drop_for_mac(struct dp_pdev * pdev,uint32_t mac_id)1180 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
1181 {
1182 	return 0;
1183 }
1184 #endif
1185 #endif
1186 
1187 static void
dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev * pdev,int mac_for_pdev)1188 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev)
1189 {
1190 	struct dp_soc *soc = pdev->soc;
1191 
1192 	dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev);
1193 	dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev);
1194 }
1195 
1196 static void
dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev * pdev,int mac_for_pdev)1197 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev)
1198 {
1199 	struct dp_soc *soc = pdev->soc;
1200 
1201 	if (!soc->wlan_cfg_ctx->rxdma1_enable)
1202 		return;
1203 
1204 	dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev);
1205 }
1206 
1207 static void
dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev * pdev,uint32_t mac_for_pdev)1208 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1209 {
1210 	struct dp_soc *soc = pdev->soc;
1211 
1212 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1213 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1214 		return;
1215 
1216 	dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
1217 	dp_link_desc_ring_replenish(soc, mac_for_pdev);
1218 }
1219 
1220 static void
dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev * pdev,int mac_for_pdev)1221 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev)
1222 {
1223 	struct dp_soc *soc = pdev->soc;
1224 
1225 	if (!soc->wlan_cfg_ctx->rxdma1_enable)
1226 		return;
1227 
1228 	dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev);
1229 }
1230 
1231 static QDF_STATUS
dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev * pdev,int mac_for_pdev)1232 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev)
1233 {
1234 	struct dp_soc *soc = pdev->soc;
1235 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
1236 	bool delayed_replenish;
1237 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1238 
1239 	delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0;
1240 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1241 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1242 		return status;
1243 
1244 	status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
1245 						  delayed_replenish);
1246 	if (!QDF_IS_STATUS_SUCCESS(status))
1247 		dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed");
1248 
1249 	return status;
1250 }
1251 
1252 static QDF_STATUS
dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev * pdev,uint32_t mac_for_pdev)1253 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1254 {
1255 	struct dp_soc *soc = pdev->soc;
1256 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1257 
1258 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1259 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1260 		return status;
1261 
1262 	/* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */
1263 	status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
1264 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1265 		dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed");
1266 		goto fail;
1267 	}
1268 
1269 	/* Allocate link descriptors for the monitor link descriptor ring */
1270 	status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
1271 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1272 		dp_err("dp_hw_link_desc_pool_banks_alloc() failed");
1273 		goto mon_buf_dealloc;
1274 	}
1275 
1276 	return status;
1277 
1278 mon_buf_dealloc:
1279 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1280 fail:
1281 	return status;
1282 }
1283 #else
1284 static void
dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev * pdev,int mac_for_pdev)1285 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev)
1286 {
1287 }
1288 
1289 static void
dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev * pdev,int mac_for_pdev)1290 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev)
1291 {
1292 }
1293 
1294 static void
dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev * pdev,uint32_t mac_for_pdev)1295 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1296 {
1297 }
1298 
1299 static void
dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev * pdev,int mac_for_pdev)1300 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev)
1301 {
1302 }
1303 
1304 static QDF_STATUS
dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev * pdev,int mac_for_pdev)1305 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev)
1306 {
1307 	return QDF_STATUS_SUCCESS;
1308 }
1309 
1310 static QDF_STATUS
dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev * pdev,uint32_t mac_for_pdev)1311 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1312 {
1313 	return QDF_STATUS_SUCCESS;
1314 }
1315 
1316 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1317 uint32_t
dp_mon_dest_srng_drop_for_mac(struct dp_pdev * pdev,uint32_t mac_id)1318 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
1319 {
1320 	return 0;
1321 }
1322 #endif
1323 
1324 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC)
1325 static QDF_STATUS
dp_rx_mon_check_n_drop_mpdu(struct dp_pdev * pdev,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,uint32_t * rx_bufs_dropped)1326 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
1327 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
1328 			    union dp_rx_desc_list_elem_t **head,
1329 			    union dp_rx_desc_list_elem_t **tail,
1330 			    uint32_t *rx_bufs_dropped)
1331 {
1332 	return QDF_STATUS_E_FAILURE;
1333 }
1334 #endif
1335 #endif
1336 
1337 #ifdef WLAN_SOFTUMAC_SUPPORT
dp_mon_hw_link_desc_bank_free(struct dp_soc * soc,uint32_t mac_id)1338 static void dp_mon_hw_link_desc_bank_free(struct dp_soc *soc, uint32_t mac_id)
1339 {
1340 	struct qdf_mem_multi_page_t *pages;
1341 
1342 	pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1343 	if (!pages) {
1344 		dp_err("can not get mon link desc pages");
1345 		QDF_ASSERT(0);
1346 		return;
1347 	}
1348 
1349 	if (pages->dma_pages) {
1350 		wlan_minidump_remove((void *)
1351 				     pages->dma_pages->page_v_addr_start,
1352 				     pages->num_pages * pages->page_size,
1353 				     soc->ctrl_psoc,
1354 				     WLAN_MD_DP_SRNG_SW2RXDMA_LINK_RING,
1355 				     "mon hw_link_desc_bank");
1356 		dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
1357 					     pages, 0, false);
1358 	}
1359 }
1360 
1361 static QDF_STATUS
dp_mon_hw_link_desc_bank_alloc(struct dp_soc * soc,uint32_t mac_id)1362 dp_mon_hw_link_desc_bank_alloc(struct dp_soc *soc, uint32_t mac_id)
1363 {
1364 	struct qdf_mem_multi_page_t *pages;
1365 	uint32_t *total_link_descs, total_mem_size;
1366 	uint32_t num_entries;
1367 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1368 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1369 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1370 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
1371 
1372 	pages = dp_monitor_get_link_desc_pages(soc, mac_id);
1373 	if (!pages) {
1374 		dp_err("can not get mon link desc pages");
1375 		QDF_ASSERT(0);
1376 		return QDF_STATUS_E_FAULT;
1377 	}
1378 
1379 	/* If link descriptor banks are allocated, return from here */
1380 	if (pages->num_pages)
1381 		return QDF_STATUS_SUCCESS;
1382 
1383 	num_entries = dp_monitor_get_num_link_desc_ring_entries(soc, mac_id);
1384 	total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
1385 	qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
1386 		      MINIDUMP_STR_SIZE);
1387 
1388 	/* Round up to power of 2 */
1389 	*total_link_descs = 1;
1390 	while (*total_link_descs < num_entries)
1391 		*total_link_descs <<= 1;
1392 
1393 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
1394 		     soc, *total_link_descs, link_desc_size);
1395 
1396 	total_mem_size =  *total_link_descs * link_desc_size;
1397 	total_mem_size += link_desc_align;
1398 
1399 	dp_init_info("%pK: total_mem_size: %d", soc, total_mem_size);
1400 
1401 	dp_set_max_page_size(pages, max_alloc_size);
1402 	dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
1403 				      pages, link_desc_size,
1404 				      *total_link_descs, 0, false);
1405 
1406 	if (!pages->num_pages) {
1407 		dp_err("Multi page alloc fail for mon hw link desc pool");
1408 		return QDF_STATUS_E_FAULT;
1409 	}
1410 
1411 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
1412 			  pages->num_pages * pages->page_size,
1413 			  soc->ctrl_psoc,
1414 			  WLAN_MD_DP_SRNG_SW2RXDMA_LINK_RING,
1415 			  "mon hw_link_desc_bank");
1416 
1417 	return QDF_STATUS_SUCCESS;
1418 }
1419 
1420 static void
dp_mon_link_desc_ring_replenish(struct dp_soc * soc,int mac_id)1421 dp_mon_link_desc_ring_replenish(struct dp_soc *soc, int mac_id)
1422 {
1423 	dp_link_desc_ring_replenish(soc, mac_id);
1424 }
1425 #else
1426 static QDF_STATUS
dp_mon_hw_link_desc_bank_alloc(struct dp_soc * soc,uint32_t mac_id)1427 dp_mon_hw_link_desc_bank_alloc(struct dp_soc *soc, uint32_t mac_id)
1428 {
1429 	return QDF_STATUS_SUCCESS;
1430 }
1431 
1432 static void
dp_mon_hw_link_desc_bank_free(struct dp_soc * soc,uint32_t mac_id)1433 dp_mon_hw_link_desc_bank_free(struct dp_soc *soc, uint32_t mac_id) {}
1434 
1435 static void
dp_mon_link_desc_ring_replenish(struct dp_soc * soc,int mac_id)1436 dp_mon_link_desc_ring_replenish(struct dp_soc *soc, int mac_id) {}
1437 #endif
1438 
1439 static void
dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev * pdev,int mac_id)1440 dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id)
1441 {
1442 	struct dp_soc *soc = pdev->soc;
1443 	uint8_t pdev_id = pdev->pdev_id;
1444 	int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1445 
1446 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1447 	dp_mon_hw_link_desc_bank_free(soc, mac_for_pdev);
1448 	dp_rx_pdev_mon_dest_desc_pool_free(pdev, mac_for_pdev);
1449 }
1450 
dp_rx_pdev_mon_desc_pool_free(struct dp_pdev * pdev)1451 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev)
1452 {
1453 	int mac_id;
1454 
1455 	for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++)
1456 		dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id);
1457 }
1458 
1459 #ifdef WLAN_FEATURE_LOCAL_PKT_CAPTURE
1460 static inline void
dp_rx_lpc_lock_create(struct dp_mon_pdev * mon_pdev)1461 dp_rx_lpc_lock_create(struct dp_mon_pdev *mon_pdev)
1462 {
1463 	qdf_spinlock_create(&mon_pdev->lpc_lock);
1464 }
1465 
1466 static inline void
dp_rx_lpc_lock_destroy(struct dp_mon_pdev * mon_pdev)1467 dp_rx_lpc_lock_destroy(struct dp_mon_pdev *mon_pdev)
1468 {
1469 	qdf_spinlock_destroy(&mon_pdev->lpc_lock);
1470 }
1471 #else
1472 static inline void
dp_rx_lpc_lock_create(struct dp_mon_pdev * mon_pdev)1473 dp_rx_lpc_lock_create(struct dp_mon_pdev *mon_pdev)
1474 {
1475 }
1476 
1477 static inline void
dp_rx_lpc_lock_destroy(struct dp_mon_pdev * mon_pdev)1478 dp_rx_lpc_lock_destroy(struct dp_mon_pdev *mon_pdev)
1479 {
1480 }
1481 #endif
1482 
1483 static void
dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev * pdev,int mac_id)1484 dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id)
1485 {
1486 	struct dp_soc *soc = pdev->soc;
1487 	uint8_t pdev_id = pdev->pdev_id;
1488 	int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1489 
1490 	dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev);
1491 
1492 	dp_rx_pdev_mon_dest_desc_pool_deinit(pdev, mac_for_pdev);
1493 }
1494 
1495 void
dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev * pdev)1496 dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev)
1497 {
1498 	int mac_id;
1499 
1500 	for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++)
1501 		dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id);
1502 	qdf_spinlock_destroy(&pdev->monitor_pdev->mon_lock);
1503 	dp_rx_lpc_lock_destroy(pdev->monitor_pdev);
1504 }
1505 
1506 static void
dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev * pdev,int mac_id)1507 dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id)
1508 {
1509 	struct dp_soc *soc = pdev->soc;
1510 	uint32_t mac_for_pdev;
1511 
1512 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1513 	dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev);
1514 	dp_mon_link_desc_ring_replenish(soc, mac_for_pdev);
1515 
1516 	dp_rx_pdev_mon_dest_desc_pool_init(pdev, mac_for_pdev);
1517 }
1518 
1519 void
dp_rx_pdev_mon_desc_pool_init(struct dp_pdev * pdev)1520 dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev)
1521 {
1522 	int mac_id;
1523 
1524 	for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++)
1525 		dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id);
1526 	qdf_spinlock_create(&pdev->monitor_pdev->mon_lock);
1527 	dp_rx_lpc_lock_create(pdev->monitor_pdev);
1528 }
1529 
1530 void
dp_rx_pdev_mon_buffers_free(struct dp_pdev * pdev)1531 dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev)
1532 {
1533 	int mac_id;
1534 	int mac_for_pdev;
1535 	uint8_t pdev_id = pdev->pdev_id;
1536 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx;
1537 
1538 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev;
1539 	     mac_id++) {
1540 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1541 							  pdev_id);
1542 		dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
1543 	}
1544 
1545 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev;
1546 	     mac_id++) {
1547 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1548 							  pdev_id);
1549 		dp_rx_pdev_mon_dest_buffers_free(pdev, mac_for_pdev);
1550 	}
1551 	pdev->monitor_pdev->pdev_mon_init = 0;
1552 }
1553 
1554 QDF_STATUS
dp_rx_pdev_mon_buffers_alloc(struct dp_pdev * pdev)1555 dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev)
1556 {
1557 	int mac_id;
1558 	int mac_for_pdev;
1559 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1560 	uint8_t pdev_id = pdev->pdev_id;
1561 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx;
1562 
1563 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev;
1564 	     mac_id++) {
1565 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1566 							  pdev_id);
1567 		status = dp_rx_pdev_mon_status_buffers_alloc(pdev,
1568 							     mac_for_pdev);
1569 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1570 			dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed");
1571 			goto mon_status_buf_fail;
1572 		}
1573 	}
1574 
1575 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev;
1576 	     mac_id++) {
1577 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1578 							  pdev_id);
1579 		status = dp_rx_pdev_mon_dest_buffers_alloc(pdev, mac_for_pdev);
1580 		if (!QDF_IS_STATUS_SUCCESS(status))
1581 			goto mon_stat_buf_dealloc;
1582 	}
1583 
1584 	return status;
1585 
1586 mon_stat_buf_dealloc:
1587 	dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
1588 mon_status_buf_fail:
1589 	return status;
1590 }
1591 
1592 static QDF_STATUS
dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev * pdev,int mac_id)1593 dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id)
1594 {
1595 	struct dp_soc *soc = pdev->soc;
1596 	uint8_t pdev_id = pdev->pdev_id;
1597 	uint32_t mac_for_pdev;
1598 	QDF_STATUS status;
1599 
1600 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1601 
1602 	/* Allocate sw rx descriptor pool for monitor status ring */
1603 	status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev);
1604 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1605 		dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed");
1606 		goto fail;
1607 	}
1608 
1609 	/* Allocate hw link desc bank for monitor mode for
1610 	 * SOFTUMAC architecture.
1611 	 */
1612 	status = dp_mon_hw_link_desc_bank_alloc(soc, mac_for_pdev);
1613 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1614 		dp_err("dp_mon_hw_link_desc_bank_alloc() failed");
1615 		goto mon_status_dealloc;
1616 	}
1617 
1618 	status = dp_rx_pdev_mon_dest_desc_pool_alloc(pdev, mac_for_pdev);
1619 	if (!QDF_IS_STATUS_SUCCESS(status))
1620 		goto link_desc_bank_free;
1621 
1622 	return status;
1623 
1624 link_desc_bank_free:
1625 	dp_mon_hw_link_desc_bank_free(soc, mac_for_pdev);
1626 mon_status_dealloc:
1627 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1628 fail:
1629 	return status;
1630 }
1631 
1632 QDF_STATUS
dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev * pdev)1633 dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev)
1634 {
1635 	QDF_STATUS status;
1636 	int mac_id, count;
1637 
1638 	for (mac_id = 0; mac_id < NUM_RXDMA_STATUS_RINGS_PER_PDEV; mac_id++) {
1639 		status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id);
1640 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1641 			dp_rx_mon_dest_err("%pK: %d failed",
1642 					   pdev->soc, mac_id);
1643 
1644 			for (count = 0; count < mac_id; count++)
1645 				dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count);
1646 
1647 			return status;
1648 		}
1649 	}
1650 	return status;
1651 }
1652 
1653 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT
1654 static inline void
hal_rx_populate_buf_info(struct dp_soc * soc,struct hal_rx_mon_dest_buf_info * buf_info,void * rx_desc)1655 hal_rx_populate_buf_info(struct dp_soc *soc,
1656 			 struct hal_rx_mon_dest_buf_info *buf_info,
1657 			 void *rx_desc)
1658 {
1659 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
1660 				      (uint8_t *)buf_info,
1661 				      sizeof(*buf_info));
1662 }
1663 
1664 static inline uint8_t
hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc * soc,struct hal_rx_mon_dest_buf_info * buf_info,void * rx_desc,bool is_first_frag)1665 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc,
1666 				   struct hal_rx_mon_dest_buf_info *buf_info,
1667 				   void *rx_desc, bool is_first_frag)
1668 {
1669 	if (is_first_frag)
1670 		return buf_info->l2_hdr_pad;
1671 	else
1672 		return DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
1673 }
1674 #else
1675 static inline void
hal_rx_populate_buf_info(struct dp_soc * soc,struct hal_rx_mon_dest_buf_info * buf_info,void * rx_desc)1676 hal_rx_populate_buf_info(struct dp_soc *soc,
1677 			 struct hal_rx_mon_dest_buf_info *buf_info,
1678 			 void *rx_desc)
1679 {
1680 	if (hal_rx_tlv_decap_format_get(soc->hal_soc, rx_desc) ==
1681 	    HAL_HW_RX_DECAP_FORMAT_RAW)
1682 		buf_info->is_decap_raw = 1;
1683 
1684 	if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc))
1685 		buf_info->mpdu_len_err = 1;
1686 }
1687 
1688 static inline uint8_t
hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc * soc,struct hal_rx_mon_dest_buf_info * buf_info,void * rx_desc,bool is_first_frag)1689 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc,
1690 				   struct hal_rx_mon_dest_buf_info *buf_info,
1691 				   void *rx_desc, bool is_first_frag)
1692 {
1693 	return hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_desc);
1694 }
1695 #endif
1696 
1697 static inline
dp_rx_msdus_set_payload(struct dp_soc * soc,qdf_nbuf_t msdu,uint8_t l2_hdr_offset)1698 void dp_rx_msdus_set_payload(struct dp_soc *soc, qdf_nbuf_t msdu,
1699 			     uint8_t l2_hdr_offset)
1700 {
1701 	uint8_t *data;
1702 	uint32_t rx_pkt_offset;
1703 
1704 	data = qdf_nbuf_data(msdu);
1705 	rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc);
1706 	qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset);
1707 }
1708 
1709 static inline qdf_nbuf_t
dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc * soc,uint32_t mac_id,qdf_nbuf_t head_msdu,qdf_nbuf_t last_msdu,struct cdp_mon_status * rx_status)1710 dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc,
1711 				   uint32_t mac_id,
1712 				   qdf_nbuf_t head_msdu,
1713 				   qdf_nbuf_t last_msdu,
1714 				   struct cdp_mon_status *rx_status)
1715 {
1716 	qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list;
1717 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1718 		mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
1719 		is_amsdu, is_first_frag, amsdu_pad;
1720 	void *rx_desc;
1721 	char *hdr_desc;
1722 	unsigned char *dest;
1723 	struct ieee80211_frame *wh;
1724 	struct ieee80211_qoscntl *qos;
1725 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1726 	struct dp_mon_pdev *mon_pdev;
1727 	struct hal_rx_mon_dest_buf_info buf_info;
1728 	uint8_t l2_hdr_offset;
1729 
1730 	head_frag_list = NULL;
1731 	mpdu_buf = NULL;
1732 
1733 	if (qdf_unlikely(!dp_pdev)) {
1734 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d",
1735 				     soc, mac_id);
1736 		return NULL;
1737 	}
1738 
1739 	mon_pdev = dp_pdev->monitor_pdev;
1740 
1741 	/* The nbuf has been pulled just beyond the status and points to the
1742 	 * payload
1743 	 */
1744 	if (!head_msdu)
1745 		goto mpdu_stitch_fail;
1746 
1747 	msdu_orig = head_msdu;
1748 
1749 	rx_desc = qdf_nbuf_data(msdu_orig);
1750 	qdf_mem_zero(&buf_info, sizeof(buf_info));
1751 	hal_rx_populate_buf_info(soc, &buf_info, rx_desc);
1752 
1753 	if (buf_info.mpdu_len_err) {
1754 		/* It looks like there is some issue on MPDU len err */
1755 		/* Need further investigate if drop the packet */
1756 		DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1757 		return NULL;
1758 	}
1759 
1760 	rx_desc = qdf_nbuf_data(last_msdu);
1761 
1762 	rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc,
1763 								rx_desc);
1764 	mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err;
1765 
1766 	/* Fill out the rx_status from the PPDU start and end fields */
1767 	/*   HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */
1768 
1769 	rx_desc = qdf_nbuf_data(head_msdu);
1770 
1771 	/* Easy case - The MSDU status indicates that this is a non-decapped
1772 	 * packet in RAW mode.
1773 	 */
1774 	if (buf_info.is_decap_raw) {
1775 		/* Note that this path might suffer from headroom unavailabilty
1776 		 * - but the RX status is usually enough
1777 		 */
1778 
1779 		l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc,
1780 								   &buf_info,
1781 								   rx_desc,
1782 								   true);
1783 		dp_rx_msdus_set_payload(soc, head_msdu, l2_hdr_offset);
1784 
1785 		dp_rx_mon_dest_debug("%pK: decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
1786 				     soc, head_msdu, head_msdu->next,
1787 				     last_msdu, last_msdu->next);
1788 
1789 		mpdu_buf = head_msdu;
1790 
1791 		prev_buf = mpdu_buf;
1792 
1793 		frag_list_sum_len = 0;
1794 		msdu = qdf_nbuf_next(head_msdu);
1795 		is_first_frag = 1;
1796 
1797 		while (msdu) {
1798 			l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(
1799 							soc, &buf_info,
1800 							rx_desc, false);
1801 			dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset);
1802 
1803 			if (is_first_frag) {
1804 				is_first_frag = 0;
1805 				head_frag_list  = msdu;
1806 			}
1807 
1808 			frag_list_sum_len += qdf_nbuf_len(msdu);
1809 
1810 			/* Maintain the linking of the cloned MSDUS */
1811 			qdf_nbuf_set_next_ext(prev_buf, msdu);
1812 
1813 			/* Move to the next */
1814 			prev_buf = msdu;
1815 			msdu = qdf_nbuf_next(msdu);
1816 		}
1817 
1818 		qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN);
1819 
1820 		/* If there were more fragments to this RAW frame */
1821 		if (head_frag_list) {
1822 			if (frag_list_sum_len <
1823 				sizeof(struct ieee80211_frame_min_one)) {
1824 				DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1825 				return NULL;
1826 			}
1827 			frag_list_sum_len -= HAL_RX_FCS_LEN;
1828 			qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1829 						 frag_list_sum_len);
1830 			qdf_nbuf_set_next(mpdu_buf, NULL);
1831 		}
1832 
1833 		goto mpdu_stitch_done;
1834 	}
1835 
1836 	/* Decap mode:
1837 	 * Calculate the amount of header in decapped packet to knock off based
1838 	 * on the decap type and the corresponding number of raw bytes to copy
1839 	 * status header
1840 	 */
1841 	rx_desc = qdf_nbuf_data(head_msdu);
1842 
1843 	hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
1844 
1845 	dp_rx_mon_dest_debug("%pK: decap format not raw", soc);
1846 
1847 	/* Base size */
1848 	wifi_hdr_len = sizeof(struct ieee80211_frame);
1849 	wh = (struct ieee80211_frame *)hdr_desc;
1850 
1851 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1852 
1853 	if (dir == IEEE80211_FC1_DIR_DSTODS)
1854 		wifi_hdr_len += 6;
1855 
1856 	is_amsdu = 0;
1857 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
1858 		qos = (struct ieee80211_qoscntl *)
1859 			(hdr_desc + wifi_hdr_len);
1860 		wifi_hdr_len += 2;
1861 
1862 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1863 	}
1864 
1865 	/* Calculate security header length based on 'Protected'
1866 	 * and 'EXT_IV' flag
1867 	 */
1868 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1869 		char *iv = (char *)wh + wifi_hdr_len;
1870 
1871 		if (iv[3] & KEY_EXTIV)
1872 			sec_hdr_len = 8;
1873 		else
1874 			sec_hdr_len = 4;
1875 	} else {
1876 		sec_hdr_len = 0;
1877 	}
1878 	wifi_hdr_len += sec_hdr_len;
1879 
1880 	/* MSDU related stuff LLC - AMSDU subframe header etc */
1881 	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
1882 
1883 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
1884 
1885 	/* "Decap" header to remove from MSDU buffer */
1886 	decap_hdr_pull_bytes = 14;
1887 
1888 	/* Allocate a new nbuf for holding the 802.11 header retrieved from the
1889 	 * status of the now decapped first msdu. Leave enough headroom for
1890 	 * accommodating any radio-tap /prism like PHY header
1891 	 */
1892 	mpdu_buf = qdf_nbuf_alloc(soc->osdev,
1893 				  MAX_MONITOR_HEADER + mpdu_buf_len,
1894 				  MAX_MONITOR_HEADER, 4, FALSE);
1895 
1896 	if (!mpdu_buf)
1897 		goto mpdu_stitch_done;
1898 
1899 	/* Copy the MPDU related header and enc headers into the first buffer
1900 	 * - Note that there can be a 2 byte pad between heaader and enc header
1901 	 */
1902 
1903 	prev_buf = mpdu_buf;
1904 	dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
1905 	if (!dest)
1906 		goto mpdu_stitch_fail;
1907 
1908 	qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
1909 	hdr_desc += wifi_hdr_len;
1910 
1911 #if 0
1912 	dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len);
1913 	adf_os_mem_copy(dest, hdr_desc, sec_hdr_len);
1914 	hdr_desc += sec_hdr_len;
1915 #endif
1916 
1917 	/* The first LLC len is copied into the MPDU buffer */
1918 	frag_list_sum_len = 0;
1919 
1920 	msdu_orig = head_msdu;
1921 	is_first_frag = 1;
1922 	amsdu_pad = 0;
1923 
1924 	while (msdu_orig) {
1925 
1926 		/* TODO: intra AMSDU padding - do we need it ??? */
1927 
1928 		msdu = msdu_orig;
1929 
1930 		if (is_first_frag) {
1931 			head_frag_list  = msdu;
1932 		} else {
1933 			/* Reload the hdr ptr only on non-first MSDUs */
1934 			rx_desc = qdf_nbuf_data(msdu_orig);
1935 			hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc,
1936 							     rx_desc);
1937 		}
1938 
1939 		/* Copy this buffers MSDU related status into the prev buffer */
1940 
1941 		if (is_first_frag)
1942 			is_first_frag = 0;
1943 
1944 		/* Update protocol and flow tag for MSDU */
1945 		dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev,
1946 						   msdu_orig, rx_desc);
1947 
1948 		dest = qdf_nbuf_put_tail(prev_buf,
1949 					 msdu_llc_len + amsdu_pad);
1950 
1951 		if (!dest)
1952 			goto mpdu_stitch_fail;
1953 
1954 		dest += amsdu_pad;
1955 		qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
1956 
1957 		l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc,
1958 								   &buf_info,
1959 								   rx_desc,
1960 								   true);
1961 		dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset);
1962 
1963 		/* Push the MSDU buffer beyond the decap header */
1964 		qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
1965 		frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu)
1966 			+ amsdu_pad;
1967 
1968 		/* Set up intra-AMSDU pad to be added to start of next buffer -
1969 		 * AMSDU pad is 4 byte pad on AMSDU subframe
1970 		 */
1971 		amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
1972 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1973 
1974 		/* TODO FIXME How do we handle MSDUs that have fraglist - Should
1975 		 * probably iterate all the frags cloning them along the way and
1976 		 * and also updating the prev_buf pointer
1977 		 */
1978 
1979 		/* Move to the next */
1980 		prev_buf = msdu;
1981 		msdu_orig = qdf_nbuf_next(msdu_orig);
1982 	}
1983 
1984 #if 0
1985 	/* Add in the trailer section - encryption trailer + FCS */
1986 	qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN);
1987 	frag_list_sum_len += HAL_RX_FCS_LEN;
1988 #endif
1989 
1990 	frag_list_sum_len -= msdu_llc_len;
1991 
1992 	/* TODO: Convert this to suitable adf routines */
1993 	qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1994 				 frag_list_sum_len);
1995 
1996 	dp_rx_mon_dest_debug("%pK: mpdu_buf %pK mpdu_buf->len %u",
1997 			     soc, mpdu_buf, mpdu_buf->len);
1998 
1999 mpdu_stitch_done:
2000 	/* Check if this buffer contains the PPDU end status for TSF */
2001 	/* Need revist this code to see where we can get tsf timestamp */
2002 #if 0
2003 	/* PPDU end TLV will be retrieved from monitor status ring */
2004 	last_mpdu =
2005 		(*(((u_int32_t *)&rx_desc->attention)) &
2006 		RX_ATTENTION_0_LAST_MPDU_MASK) >>
2007 		RX_ATTENTION_0_LAST_MPDU_LSB;
2008 
2009 	if (last_mpdu)
2010 		rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
2011 
2012 #endif
2013 	return mpdu_buf;
2014 
2015 mpdu_stitch_fail:
2016 	if ((mpdu_buf) && !buf_info.is_decap_raw) {
2017 		dp_rx_mon_dest_err("%pK: mpdu_stitch_fail mpdu_buf %pK",
2018 				   soc, mpdu_buf);
2019 		/* Free the head buffer */
2020 		qdf_nbuf_free(mpdu_buf);
2021 	}
2022 	return NULL;
2023 }
2024 
2025 #ifdef DP_RX_MON_MEM_FRAG
2026 /**
2027  * dp_rx_mon_fraglist_prepare() - Prepare nbuf fraglist from chained skb
2028  *
2029  * @head_msdu: Parent SKB
2030  * @tail_msdu: Last skb in the chained list
2031  *
2032  * Return: Void
2033  */
dp_rx_mon_fraglist_prepare(qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu)2034 void dp_rx_mon_fraglist_prepare(qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
2035 {
2036 	qdf_nbuf_t msdu, mpdu_buf, head_frag_list;
2037 	uint32_t frag_list_sum_len;
2038 
2039 	dp_err("[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
2040 	       __func__, __LINE__, head_msdu, head_msdu->next,
2041 	       tail_msdu, tail_msdu->next);
2042 
2043 	/* Single skb accommodating MPDU worth Data */
2044 	if (tail_msdu == head_msdu)
2045 		return;
2046 
2047 	mpdu_buf = head_msdu;
2048 	frag_list_sum_len = 0;
2049 
2050 	msdu = qdf_nbuf_next(head_msdu);
2051 	/* msdu can't be NULL here as it is multiple skb case here */
2052 
2053 	/* Head frag list to point to second skb */
2054 	head_frag_list  = msdu;
2055 
2056 	while (msdu) {
2057 		frag_list_sum_len += qdf_nbuf_len(msdu);
2058 		msdu = qdf_nbuf_next(msdu);
2059 	}
2060 
2061 	qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, frag_list_sum_len);
2062 
2063 	/* Make Parent skb next to NULL */
2064 	qdf_nbuf_set_next(mpdu_buf, NULL);
2065 }
2066 
2067 /**
2068  * dp_rx_mon_frag_restitch_mpdu_from_msdus() - Restitch logic to
2069  *      convert to 802.3 header and adjust frag memory pointing to
2070  *      dot3 header and payload in case of Non-Raw frame.
2071  *
2072  * @soc: struct dp_soc *
2073  * @mac_id: MAC id
2074  * @head_msdu: MPDU containing all MSDU as a frag
2075  * @tail_msdu: last skb which accommodate MPDU info
2076  * @rx_status: struct cdp_mon_status *
2077  *
2078  * Return: Adjusted nbuf containing MPDU worth info.
2079  */
2080 static inline qdf_nbuf_t
dp_rx_mon_frag_restitch_mpdu_from_msdus(struct dp_soc * soc,uint32_t mac_id,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu,struct cdp_mon_status * rx_status)2081 dp_rx_mon_frag_restitch_mpdu_from_msdus(struct dp_soc *soc,
2082 					uint32_t mac_id,
2083 					qdf_nbuf_t head_msdu,
2084 					qdf_nbuf_t tail_msdu,
2085 					struct cdp_mon_status *rx_status)
2086 {
2087 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
2088 		mpdu_buf_len, decap_hdr_pull_bytes, dir,
2089 		is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
2090 	qdf_frag_t rx_desc, rx_src_desc, rx_dest_desc, frag_addr;
2091 	char *hdr_desc;
2092 	uint8_t num_frags, frags_iter, l2_hdr_offset;
2093 	struct ieee80211_frame *wh;
2094 	struct ieee80211_qoscntl *qos;
2095 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2096 	int16_t frag_page_offset = 0;
2097 	struct hal_rx_mon_dest_buf_info buf_info;
2098 	uint32_t pad_byte_pholder = 0;
2099 	qdf_nbuf_t msdu_curr;
2100 	uint16_t rx_mon_tlv_size = soc->rx_mon_pkt_tlv_size;
2101 	struct dp_mon_pdev *mon_pdev;
2102 
2103 	if (qdf_unlikely(!dp_pdev)) {
2104 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d",
2105 				     soc, mac_id);
2106 		return NULL;
2107 	}
2108 
2109 	mon_pdev = dp_pdev->monitor_pdev;
2110 	qdf_mem_zero(&buf_info, sizeof(struct hal_rx_mon_dest_buf_info));
2111 
2112 	if (!head_msdu || !tail_msdu)
2113 		goto mpdu_stitch_fail;
2114 
2115 	rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size;
2116 
2117 	if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) {
2118 		/* It looks like there is some issue on MPDU len err */
2119 		/* Need further investigate if drop the packet */
2120 		DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
2121 		return NULL;
2122 	}
2123 
2124 	/* Look for FCS error */
2125 	num_frags = qdf_nbuf_get_nr_frags(tail_msdu);
2126 	rx_desc = qdf_nbuf_get_frag_addr(tail_msdu, num_frags - 1) -
2127 				rx_mon_tlv_size;
2128 	rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc,
2129 								rx_desc);
2130 	mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err;
2131 
2132 	rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size;
2133 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
2134 				      (uint8_t *)&buf_info,
2135 				      sizeof(buf_info));
2136 
2137 	/* Easy case - The MSDU status indicates that this is a non-decapped
2138 	 * packet in RAW mode.
2139 	 */
2140 	if (buf_info.is_decap_raw == 1) {
2141 		if (qdf_unlikely(mon_pdev->ppdu_info.rx_status.rs_fcs_err)) {
2142 			hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
2143 			wh = (struct ieee80211_frame *)hdr_desc;
2144 			if ((wh->i_fc[0] & QDF_IEEE80211_FC0_VERSION_MASK) !=
2145 			    QDF_IEEE80211_FC0_VERSION_0) {
2146 				DP_STATS_INC(dp_pdev, dropped.mon_ver_err, 1);
2147 				return NULL;
2148 			}
2149 		}
2150 		dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu);
2151 		goto mpdu_stitch_done;
2152 	}
2153 
2154 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
2155 
2156 	/* Decap mode:
2157 	 * Calculate the amount of header in decapped packet to knock off based
2158 	 * on the decap type and the corresponding number of raw bytes to copy
2159 	 * status header
2160 	 */
2161 	hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
2162 
2163 	dp_rx_mon_dest_debug("%pK: decap format not raw", soc);
2164 
2165 	/* Base size */
2166 	wifi_hdr_len = sizeof(struct ieee80211_frame);
2167 	wh = (struct ieee80211_frame *)hdr_desc;
2168 
2169 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
2170 
2171 	if (dir == IEEE80211_FC1_DIR_DSTODS)
2172 		wifi_hdr_len += 6;
2173 
2174 	is_amsdu = 0;
2175 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
2176 		qos = (struct ieee80211_qoscntl *)
2177 			(hdr_desc + wifi_hdr_len);
2178 		wifi_hdr_len += 2;
2179 
2180 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
2181 	}
2182 
2183 	/*Calculate security header length based on 'Protected'
2184 	 * and 'EXT_IV' flag
2185 	 */
2186 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2187 		char *iv = (char *)wh + wifi_hdr_len;
2188 
2189 		if (iv[3] & KEY_EXTIV)
2190 			sec_hdr_len = 8;
2191 		else
2192 			sec_hdr_len = 4;
2193 	} else {
2194 		sec_hdr_len = 0;
2195 	}
2196 	wifi_hdr_len += sec_hdr_len;
2197 
2198 	/* MSDU related stuff LLC - AMSDU subframe header etc */
2199 	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
2200 
2201 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
2202 
2203 	/* "Decap" header to remove from MSDU buffer */
2204 	decap_hdr_pull_bytes = 14;
2205 
2206 	amsdu_pad = 0;
2207 	tot_msdu_len = 0;
2208 
2209 	/*
2210 	 * keeping first MSDU ops outside of loop to avoid multiple
2211 	 * check handling
2212 	 */
2213 
2214 	/* Construct src header */
2215 	rx_src_desc = hdr_desc;
2216 
2217 	/*
2218 	 * Update protocol and flow tag for MSDU
2219 	 * update frag index in ctx_idx field.
2220 	 * Reset head pointer data of nbuf before updating.
2221 	 */
2222 	QDF_NBUF_CB_RX_CTX_ID(head_msdu) = 0;
2223 	dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, head_msdu, rx_desc);
2224 
2225 	/* Construct destination address */
2226 	frag_addr = qdf_nbuf_get_frag_addr(head_msdu, 0);
2227 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0);
2228 	/* We will come here in 2 scenario:
2229 	 * 1. First MSDU of MPDU with single buffer
2230 	 * 2. First buffer of First MSDU of MPDU with continuation
2231 	 *
2232 	 *  ------------------------------------------------------------
2233 	 * | SINGLE BUFFER (<= RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN)|
2234 	 *  ------------------------------------------------------------
2235 	 *
2236 	 *  ------------------------------------------------------------
2237 	 * | First BUFFER with Continuation             | ...           |
2238 	 * | (RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN) |               |
2239 	 *  ------------------------------------------------------------
2240 	 */
2241 	pad_byte_pholder =
2242 		(RX_MONITOR_BUFFER_SIZE - soc->rx_mon_pkt_tlv_size) - frag_size;
2243 	/* Construct destination address
2244 	 *  --------------------------------------------------------------
2245 	 * | RX_PKT_TLV | L2_HDR_PAD   |   Decap HDR   |      Payload     |
2246 	 * |            |                              /                  |
2247 	 * |            >Frag address points here     /                   |
2248 	 * |            \                            /                    |
2249 	 * |             \ This bytes needs to      /                     |
2250 	 * |              \  removed to frame pkt  /                      |
2251 	 * |               -----------------------                        |
2252 	 * |                                      |                       |
2253 	 * |                                      |                       |
2254 	 * |   WIFI +LLC HDR will be added here <-|                       |
2255 	 * |        |                             |                       |
2256 	 * |         >Dest addr will point        |                       |
2257 	 * |            somewhere in this area    |                       |
2258 	 *  --------------------------------------------------------------
2259 	 */
2260 	rx_dest_desc =
2261 		(frag_addr + decap_hdr_pull_bytes + l2_hdr_offset) -
2262 					mpdu_buf_len;
2263 	/* Add WIFI and LLC header for 1st MSDU of MPDU */
2264 	qdf_mem_copy(rx_dest_desc, rx_src_desc, mpdu_buf_len);
2265 
2266 	frag_page_offset =
2267 		(decap_hdr_pull_bytes + l2_hdr_offset) - mpdu_buf_len;
2268 
2269 	qdf_nbuf_move_frag_page_offset(head_msdu, 0, frag_page_offset);
2270 
2271 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0);
2272 
2273 	if (buf_info.first_buffer && buf_info.last_buffer) {
2274 		/* MSDU with single buffer */
2275 		amsdu_pad = frag_size & 0x3;
2276 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
2277 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
2278 			char *frag_addr_temp;
2279 
2280 			qdf_nbuf_trim_add_frag_size(head_msdu, 0, amsdu_pad,
2281 						    0);
2282 			frag_addr_temp =
2283 				(char *)qdf_nbuf_get_frag_addr(head_msdu, 0);
2284 			frag_addr_temp = (frag_addr_temp +
2285 				qdf_nbuf_get_frag_size_by_idx(head_msdu, 0)) -
2286 					amsdu_pad;
2287 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
2288 			amsdu_pad = 0;
2289 		}
2290 	} else {
2291 		/*
2292 		 * First buffer of Continuation frame and hence
2293 		 * amsdu_padding doesn't need to be added
2294 		 * Increase tot_msdu_len so that amsdu_pad byte
2295 		 * will be calculated for last frame of MSDU
2296 		 */
2297 		tot_msdu_len = frag_size;
2298 		amsdu_pad = 0;
2299 	}
2300 
2301 	/* Here amsdu_pad byte will have some value if 1sf buffer was
2302 	 * Single buffer MSDU and dint had pholder to adjust amsdu padding
2303 	 * byte in the end
2304 	 * So dont initialize to ZERO here
2305 	 */
2306 	pad_byte_pholder = 0;
2307 	for (msdu_curr = head_msdu; msdu_curr;) {
2308 		/* frag_iter will start from 0 for second skb onwards */
2309 		if (msdu_curr == head_msdu)
2310 			frags_iter = 1;
2311 		else
2312 			frags_iter = 0;
2313 
2314 		num_frags = qdf_nbuf_get_nr_frags(msdu_curr);
2315 
2316 		for (; frags_iter < num_frags; frags_iter++) {
2317 		/* Construct destination address
2318 		 *  ----------------------------------------------------------
2319 		 * | RX_PKT_TLV | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
2320 		 * |            | (First buffer)             |         |      |
2321 		 * |            |                            /        /       |
2322 		 * |            >Frag address points here   /        /        |
2323 		 * |            \                          /        /         |
2324 		 * |             \ This bytes needs to    /        /          |
2325 		 * |              \  removed to frame pkt/        /           |
2326 		 * |               ----------------------        /            |
2327 		 * |                                     |     /     Add      |
2328 		 * |                                     |    /   amsdu pad   |
2329 		 * |   LLC HDR will be added here      <-|    |   Byte for    |
2330 		 * |        |                            |    |   last frame  |
2331 		 * |         >Dest addr will point       |    |    if space   |
2332 		 * |            somewhere in this area   |    |    available  |
2333 		 * |  And amsdu_pad will be created if   |    |               |
2334 		 * | dint get added in last buffer       |    |               |
2335 		 * |       (First Buffer)                |    |               |
2336 		 *  ----------------------------------------------------------
2337 		 */
2338 			frag_addr =
2339 				qdf_nbuf_get_frag_addr(msdu_curr, frags_iter);
2340 			rx_desc = frag_addr - rx_mon_tlv_size;
2341 
2342 			/*
2343 			 * Update protocol and flow tag for MSDU
2344 			 * update frag index in ctx_idx field
2345 			 */
2346 			QDF_NBUF_CB_RX_CTX_ID(msdu_curr) = frags_iter;
2347 			dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev,
2348 							   msdu_curr, rx_desc);
2349 
2350 			/* Read buffer info from stored data in tlvs */
2351 			hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
2352 						      (uint8_t *)&buf_info,
2353 						      sizeof(buf_info));
2354 
2355 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_curr,
2356 								  frags_iter);
2357 
2358 			/* If Middle buffer, dont add any header */
2359 			if ((!buf_info.first_buffer) &&
2360 			    (!buf_info.last_buffer)) {
2361 				tot_msdu_len += frag_size;
2362 				amsdu_pad = 0;
2363 				pad_byte_pholder = 0;
2364 				continue;
2365 			}
2366 
2367 			/* Calculate if current buffer has placeholder
2368 			 * to accommodate amsdu pad byte
2369 			 */
2370 			pad_byte_pholder =
2371 				(RX_MONITOR_BUFFER_SIZE - soc->rx_mon_pkt_tlv_size)
2372 				- frag_size;
2373 			/*
2374 			 * We will come here only only three condition:
2375 			 * 1. Msdu with single Buffer
2376 			 * 2. First buffer in case MSDU is spread in multiple
2377 			 *    buffer
2378 			 * 3. Last buffer in case MSDU is spread in multiple
2379 			 *    buffer
2380 			 *
2381 			 *         First buffER | Last buffer
2382 			 * Case 1:      1       |     1
2383 			 * Case 2:      1       |     0
2384 			 * Case 3:      0       |     1
2385 			 *
2386 			 * In 3rd case only l2_hdr_padding byte will be Zero and
2387 			 * in other case, It will be 2 Bytes.
2388 			 */
2389 			if (buf_info.first_buffer)
2390 				l2_hdr_offset =
2391 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
2392 			else
2393 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
2394 
2395 			if (buf_info.first_buffer) {
2396 				/* Src addr from where llc header needs to be copied */
2397 				rx_src_desc =
2398 					hal_rx_desc_get_80211_hdr(soc->hal_soc,
2399 								  rx_desc);
2400 
2401 				/* Size of buffer with llc header */
2402 				frag_size = frag_size -
2403 					(l2_hdr_offset + decap_hdr_pull_bytes);
2404 				frag_size += msdu_llc_len;
2405 
2406 				/* Construct destination address */
2407 				rx_dest_desc = frag_addr +
2408 					decap_hdr_pull_bytes + l2_hdr_offset;
2409 				rx_dest_desc = rx_dest_desc - (msdu_llc_len);
2410 
2411 				qdf_mem_copy(rx_dest_desc, rx_src_desc,
2412 					     msdu_llc_len);
2413 
2414 				/*
2415 				 * Calculate new page offset and create hole
2416 				 * if amsdu_pad required.
2417 				 */
2418 				frag_page_offset = l2_hdr_offset +
2419 						decap_hdr_pull_bytes;
2420 				frag_page_offset = frag_page_offset -
2421 						(msdu_llc_len + amsdu_pad);
2422 
2423 				qdf_nbuf_move_frag_page_offset(msdu_curr,
2424 							       frags_iter,
2425 							       frag_page_offset);
2426 
2427 				tot_msdu_len = frag_size;
2428 				/*
2429 				 * No amsdu padding required for first frame of
2430 				 * continuation buffer
2431 				 */
2432 				if (!buf_info.last_buffer) {
2433 					amsdu_pad = 0;
2434 					continue;
2435 				}
2436 			} else {
2437 				tot_msdu_len += frag_size;
2438 			}
2439 
2440 			/* Will reach to this place in only two case:
2441 			 * 1. Single buffer MSDU
2442 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
2443 			 */
2444 
2445 			/* Check size of buffer if amsdu padding required */
2446 			amsdu_pad = tot_msdu_len & 0x3;
2447 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
2448 
2449 			/* Create placeholder if current buffer can
2450 			 * accommodate padding.
2451 			 */
2452 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
2453 				char *frag_addr_temp;
2454 
2455 				qdf_nbuf_trim_add_frag_size(msdu_curr,
2456 							    frags_iter,
2457 							    amsdu_pad, 0);
2458 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_curr,
2459 										frags_iter);
2460 				frag_addr_temp = (frag_addr_temp +
2461 					qdf_nbuf_get_frag_size_by_idx(msdu_curr, frags_iter)) -
2462 					amsdu_pad;
2463 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
2464 				amsdu_pad = 0;
2465 			}
2466 
2467 			/* reset tot_msdu_len */
2468 			tot_msdu_len = 0;
2469 		}
2470 		msdu_curr = qdf_nbuf_next(msdu_curr);
2471 	}
2472 
2473 	dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu);
2474 
2475 	dp_rx_mon_dest_debug("%pK: head_msdu %pK head_msdu->len %u",
2476 			     soc, head_msdu, head_msdu->len);
2477 
2478 mpdu_stitch_done:
2479 	return head_msdu;
2480 
2481 mpdu_stitch_fail:
2482 	dp_rx_mon_dest_err("%pK: mpdu_stitch_fail head_msdu %pK",
2483 			   soc, head_msdu);
2484 	return NULL;
2485 }
2486 #endif
2487 
2488 #ifdef DP_RX_MON_MEM_FRAG
dp_rx_mon_restitch_mpdu(struct dp_soc * soc,uint32_t mac_id,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu,struct cdp_mon_status * rs)2489 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id,
2490 				   qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu,
2491 				   struct cdp_mon_status *rs)
2492 {
2493 	if (qdf_nbuf_get_nr_frags(head_msdu))
2494 		return dp_rx_mon_frag_restitch_mpdu_from_msdus(soc, mac_id,
2495 							       head_msdu,
2496 							       tail_msdu, rs);
2497 	else
2498 		return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id,
2499 							  head_msdu,
2500 							  tail_msdu, rs);
2501 }
2502 #else
dp_rx_mon_restitch_mpdu(struct dp_soc * soc,uint32_t mac_id,qdf_nbuf_t head_msdu,qdf_nbuf_t tail_msdu,struct cdp_mon_status * rs)2503 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id,
2504 				   qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu,
2505 				   struct cdp_mon_status *rs)
2506 {
2507 	return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu,
2508 						  tail_msdu, rs);
2509 }
2510 #endif
2511 
2512 #ifdef DP_RX_MON_MEM_FRAG
2513 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
2514 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
dp_rx_mon_update_pf_tag_to_buf_headroom(struct dp_soc * soc,qdf_nbuf_t nbuf)2515 void dp_rx_mon_update_pf_tag_to_buf_headroom(struct dp_soc *soc,
2516 					     qdf_nbuf_t nbuf)
2517 {
2518 	qdf_nbuf_t ext_list;
2519 
2520 	if (qdf_unlikely(!soc)) {
2521 		dp_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
2522 		       soc);
2523 		qdf_assert_always(0);
2524 	}
2525 
2526 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
2527 		return;
2528 
2529 	if (qdf_unlikely(!nbuf))
2530 		return;
2531 
2532 	/* Return if it dint came from mon Path */
2533 	if (!qdf_nbuf_get_nr_frags(nbuf))
2534 		return;
2535 
2536 	/* Headroom must be double of PF_TAG_SIZE as we copy it 1stly to head */
2537 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) {
2538 		dp_err("Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]",
2539 		       qdf_nbuf_headroom(nbuf), DP_RX_MON_TOT_PF_TAG_LEN);
2540 		return;
2541 	}
2542 
2543 	qdf_nbuf_push_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN);
2544 	qdf_mem_copy(qdf_nbuf_data(nbuf), qdf_nbuf_head(nbuf),
2545 		     DP_RX_MON_TOT_PF_TAG_LEN);
2546 	qdf_nbuf_pull_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN);
2547 
2548 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2549 	while (ext_list) {
2550 		/* Headroom must be double of PF_TAG_SIZE
2551 		 * as we copy it 1stly to head
2552 		 */
2553 		if (qdf_unlikely(qdf_nbuf_headroom(ext_list) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) {
2554 			dp_err("Fraglist Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]",
2555 			       qdf_nbuf_headroom(ext_list),
2556 			       DP_RX_MON_TOT_PF_TAG_LEN);
2557 			ext_list = qdf_nbuf_queue_next(ext_list);
2558 			continue;
2559 		}
2560 		qdf_nbuf_push_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN);
2561 		qdf_mem_copy(qdf_nbuf_data(ext_list), qdf_nbuf_head(ext_list),
2562 			     DP_RX_MON_TOT_PF_TAG_LEN);
2563 		qdf_nbuf_pull_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN);
2564 		ext_list = qdf_nbuf_queue_next(ext_list);
2565 	}
2566 }
2567 #endif
2568 #endif
2569 
2570 #ifdef QCA_MONITOR_PKT_SUPPORT
dp_mon_htt_dest_srng_setup(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)2571 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
2572 				      struct dp_pdev *pdev,
2573 				      int mac_id,
2574 				      int mac_for_pdev)
2575 {
2576 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2577 
2578 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2579 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2580 					soc->rxdma_mon_buf_ring[mac_id]
2581 					.hal_srng,
2582 					RXDMA_MONITOR_BUF);
2583 
2584 		if (status != QDF_STATUS_SUCCESS) {
2585 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring");
2586 			return status;
2587 		}
2588 
2589 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2590 					soc->rxdma_mon_dst_ring[mac_id]
2591 					.hal_srng,
2592 					RXDMA_MONITOR_DST);
2593 
2594 		if (status != QDF_STATUS_SUCCESS) {
2595 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring");
2596 			return status;
2597 		}
2598 
2599 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2600 					soc->rxdma_mon_desc_ring[mac_id]
2601 					.hal_srng,
2602 					RXDMA_MONITOR_DESC);
2603 
2604 		if (status != QDF_STATUS_SUCCESS) {
2605 			dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring");
2606 			return status;
2607 		}
2608 	}
2609 
2610 	return status;
2611 }
2612 #endif /* QCA_MONITOR_PKT_SUPPORT */
2613 
2614 #ifdef QCA_MONITOR_PKT_SUPPORT
dp_mon_dest_rings_deinit(struct dp_pdev * pdev,int lmac_id)2615 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
2616 {
2617 	struct dp_soc *soc = pdev->soc;
2618 
2619 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2620 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2621 			       RXDMA_MONITOR_BUF, 0);
2622 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2623 			       RXDMA_MONITOR_DST, 0);
2624 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2625 			       RXDMA_MONITOR_DESC, 0);
2626 	}
2627 }
2628 
dp_mon_dest_rings_free(struct dp_pdev * pdev,int lmac_id)2629 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
2630 {
2631 	struct dp_soc *soc = pdev->soc;
2632 
2633 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2634 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
2635 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
2636 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
2637 	}
2638 }
2639 
dp_mon_dest_rings_init(struct dp_pdev * pdev,int lmac_id)2640 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
2641 {
2642 	struct dp_soc *soc = pdev->soc;
2643 
2644 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2645 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2646 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
2647 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
2648 			goto fail1;
2649 		}
2650 
2651 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2652 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
2653 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
2654 			goto fail1;
2655 		}
2656 
2657 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2658 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
2659 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
2660 			goto fail1;
2661 		}
2662 	}
2663 	return QDF_STATUS_SUCCESS;
2664 
2665 fail1:
2666 	return QDF_STATUS_E_NOMEM;
2667 }
2668 
dp_mon_dest_rings_alloc(struct dp_pdev * pdev,int lmac_id)2669 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
2670 {
2671 	int entries;
2672 	struct dp_soc *soc = pdev->soc;
2673 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2674 
2675 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2676 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2677 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2678 				  RXDMA_MONITOR_BUF, entries, 0)) {
2679 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
2680 			goto fail1;
2681 		}
2682 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
2683 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2684 				  RXDMA_MONITOR_DST, entries, 0)) {
2685 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
2686 			goto fail1;
2687 		}
2688 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2689 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2690 				  RXDMA_MONITOR_DESC, entries, 0)) {
2691 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
2692 			goto fail1;
2693 		}
2694 	}
2695 	return QDF_STATUS_SUCCESS;
2696 
2697 fail1:
2698 	return QDF_STATUS_E_NOMEM;
2699 }
2700 #endif
2701