xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_rx_mon_1.0.h (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #ifndef _DP_RX_MON_1_0_H_
18 #define _DP_RX_MON_1_0_H_
19 
20 #include <dp_rx.h>
21 /*
22  * MON_BUF_MIN_ENTRIES macro defines minimum number of network buffers
23  * to be refilled in the RXDMA monitor buffer ring at init, remaining
24  * buffers are replenished at the time of monitor vap creation
25  */
26 #define MON_BUF_MIN_ENTRIES 64
27 
28 /*
29  * The below macro defines the maximum number of ring entries that would
30  * be processed in a single instance when processing each of the non-monitoring
31  * RXDMA2SW ring.
32  */
33 #define MON_DROP_REAP_LIMIT 64
34 
35 QDF_STATUS dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev,
36 					       uint32_t mac_id);
37 QDF_STATUS dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev,
38 						 uint32_t mac_id);
39 void dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev,
40 					  uint32_t mac_id);
41 void dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev,
42 					    uint32_t mac_id);
43 void dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev,
44 					  uint32_t mac_id);
45 void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
46 
47 QDF_STATUS dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev);
48 QDF_STATUS dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev);
49 void dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev);
50 void dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev);
51 void dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev);
52 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev);
53 
54 /**
55  * dp_rx_mon_dest_process() - Brain of the Rx processing functionality
56  *	Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
57  * @soc: core txrx main context
58  * @int_ctx: interrupt context
59  * @mac_id: mac id
60  * @quota: No. of units (packets) that can be serviced in one shot.
61  *
62  * This function implements the core of Rx functionality. This is
63  * expected to handle only non-error frames.
64  *
65  * Return: none
66  */
67 #ifdef QCA_MONITOR_PKT_SUPPORT
68 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
69 			    uint32_t mac_id, uint32_t quota);
70 
71 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
72 QDF_STATUS
73 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
74 				 bool delayed_replenish);
75 QDF_STATUS
76 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id);
77 void
78 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id);
79 #else
80 static inline
dp_rx_mon_dest_process(struct dp_soc * soc,struct dp_intr * int_ctx,uint32_t mac_id,uint32_t quota)81 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
82 			    uint32_t mac_id, uint32_t quota)
83 {
84 }
85 
86 static inline
dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev * pdev,uint32_t mac_id)87 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
88 {
89 }
90 
91 static inline QDF_STATUS
dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev * pdev,uint32_t mac_id,bool delayed_replenish)92 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
93 				 bool delayed_replenish)
94 {
95 	return QDF_STATUS_SUCCESS;
96 }
97 
98 static inline QDF_STATUS
dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev * pdev,uint32_t mac_id)99 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
100 {
101 	return QDF_STATUS_SUCCESS;
102 }
103 
104 static inline void
dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev * pdev,uint32_t mac_id)105 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
106 {
107 }
108 #endif
109 
110 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
111 /**
112  * dp_mon_dest_srng_drop_for_mac() - Drop the mon dest ring packets for
113  *  a given mac
114  * @pdev: DP pdev
115  * @mac_id: mac id
116  * @force_flush: Force flush ring
117  *
118  * Return: None
119  */
120 uint32_t
121 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
122 			      bool force_flush);
123 #else
124 #ifdef QCA_SUPPORT_FULL_MON
125 /**
126  * dp_mon_dest_srng_drop_for_mac() - Drop the mon dest ring packets for
127  *  a given mac
128  * @pdev: DP pdev
129  * @mac_id: mac id
130  *
131  * Return: None
132  */
133 uint32_t
134 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id);
135 #endif
136 #endif
137 
138 /**
139  * dp_rxdma_err_process() - RxDMA error processing functionality
140  * @int_ctx: interrupt context
141  * @soc: core txrx main context
142  * @mac_id: mac id
143  * @quota: No. of units (packets) that can be serviced in one shot.
144  *
145  * Return: num of buffers processed
146  */
147 uint32_t dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
148 			      uint32_t mac_id, uint32_t quota);
149 
150 /**
151  * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf
152  * @pdev: DP pdev object
153  *
154  * Return: None
155  */
156 void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev);
157 
158 #ifdef QCA_MONITOR_PKT_SUPPORT
159 /**
160  * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
161  *			      (WBM), following error handling
162  *
163  * @dp_pdev: core txrx pdev context
164  * @buf_addr_info: void pointer to monitor link descriptor buf addr info
165  * @mac_id: mac id which is one of 3 mac_ids
166  *
167  * Return: QDF_STATUS
168  */
169 QDF_STATUS
170 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
171 			   hal_buff_addrinfo_t buf_addr_info,
172 			   int mac_id);
173 #else
174 static inline QDF_STATUS
dp_rx_mon_link_desc_return(struct dp_pdev * dp_pdev,hal_buff_addrinfo_t buf_addr_info,int mac_id)175 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
176 			   hal_buff_addrinfo_t buf_addr_info,
177 			   int mac_id)
178 {
179 	return QDF_STATUS_SUCCESS;
180 }
181 #endif
182 
183 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
dp_rx_mon_get_rx_pkt_tlv_size(struct dp_soc * soc)184 static inline uint16_t dp_rx_mon_get_rx_pkt_tlv_size(struct dp_soc *soc)
185 {
186 	return soc->curr_rx_pkt_tlv_size;
187 }
188 #else
dp_rx_mon_get_rx_pkt_tlv_size(struct dp_soc * soc)189 static inline uint16_t dp_rx_mon_get_rx_pkt_tlv_size(struct dp_soc *soc)
190 {
191 	return soc->rx_mon_pkt_tlv_size;
192 }
193 #endif
194 
195 /**
196  * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across
197  *				multiple nbufs. This function
198  *                              is to return data length in
199  *				fragmented buffer
200  * @soc: Datapath soc handle
201  * @total_len: pointer to remaining data length.
202  * @frag_len: pointer to data length in this fragment.
203  * @l2_hdr_pad: l2 header padding
204  */
dp_mon_adjust_frag_len(struct dp_soc * soc,uint32_t * total_len,uint32_t * frag_len,uint16_t l2_hdr_pad)205 static inline void dp_mon_adjust_frag_len(struct dp_soc *soc,
206 					  uint32_t *total_len,
207 					  uint32_t *frag_len,
208 					  uint16_t l2_hdr_pad)
209 {
210 	uint32_t rx_pkt_tlv_len = soc->rx_mon_pkt_tlv_size;
211 
212 	if (*total_len >= (RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len)) {
213 		*frag_len = RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len -
214 					l2_hdr_pad;
215 		*total_len -= *frag_len;
216 	} else {
217 		*frag_len = *total_len;
218 		*total_len = 0;
219 	}
220 }
221 
222 /**
223  * dp_rx_mon_frag_adjust_frag_len() - MPDU and MSDU may spread across
224  * multiple nbufs. This function is to return data length in
225  * fragmented buffer.
226  * It takes input as max_limit for any buffer(as it changes based
227  * on decap type and buffer sequence in MSDU.
228  *
229  * If MSDU is divided into multiple buffer then below format will
230  * be max limit.
231  * Decap type Non-Raw
232  *--------------------------------
233  *|  1st  |  2nd  | ...  | Last   |
234  *| 1662  |  1664 | 1664 | <=1664 |
235  *--------------------------------
236  * Decap type Raw
237  *--------------------------------
238  *|  1st  |  2nd  | ...  | Last   |
239  *| 1664  |  1664 | 1664 | <=1664 |
240  *--------------------------------
241  *
242  * It also calculate if current buffer has placeholder to keep padding byte.
243  *  --------------------------------
244  * |       MAX LIMIT(1662/1664)     |
245  *  --------------------------------
246  * | Actual Data | Pad byte Pholder |
247  *  --------------------------------
248  *
249  * @total_len: Remaining data length.
250  * @frag_len:  Data length in this fragment.
251  * @max_limit: Max limit of current buffer/MSDU.
252  */
253 #ifdef DP_RX_MON_MEM_FRAG
254 static inline
dp_rx_mon_frag_adjust_frag_len(uint32_t * total_len,uint32_t * frag_len,uint32_t max_limit)255 void dp_rx_mon_frag_adjust_frag_len(uint32_t *total_len, uint32_t *frag_len,
256 				    uint32_t max_limit)
257 {
258 	if (*total_len >= max_limit) {
259 		*frag_len = max_limit;
260 		*total_len -= *frag_len;
261 	} else {
262 		*frag_len = *total_len;
263 		*total_len = 0;
264 	}
265 }
266 
267 /**
268  * DP_RX_MON_GET_NBUF_FROM_DESC() - Get nbuf from desc
269  * @rx_desc: RX descriptor
270  *
271  * Return: nbuf address
272  */
273 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \
274 	NULL
275 
276 /**
277  * dp_rx_mon_add_msdu_to_list_failure_handler() - Handler for nbuf buffer
278  *                                                  attach failure
279  *
280  * @rx_tlv_hdr: rx_tlv_hdr
281  * @pdev: struct dp_pdev *
282  * @last: skb pointing to last skb in chained list at any moment
283  * @head_msdu: parent skb in the chained list
284  * @tail_msdu: Last skb in the chained list
285  * @func_name: caller function name
286  *
287  * Return: void
288  */
289 static inline void
dp_rx_mon_add_msdu_to_list_failure_handler(void * rx_tlv_hdr,struct dp_pdev * pdev,qdf_nbuf_t * last,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,const char * func_name)290 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr,
291 					   struct dp_pdev *pdev,
292 					   qdf_nbuf_t *last,
293 					   qdf_nbuf_t *head_msdu,
294 					   qdf_nbuf_t *tail_msdu,
295 					   const char *func_name)
296 {
297 	DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
298 	qdf_frag_free(rx_tlv_hdr);
299 	if (head_msdu)
300 		qdf_nbuf_list_free(*head_msdu);
301 	dp_err("[%s] failed to allocate subsequent parent buffer to hold all frag",
302 	       func_name);
303 	if (head_msdu)
304 		*head_msdu = NULL;
305 	if (last)
306 		*last = NULL;
307 	if (tail_msdu)
308 		*tail_msdu = NULL;
309 }
310 
311 /**
312  * dp_rx_mon_get_paddr_from_desc() - Get paddr from desc
313  * @rx_desc: RX descriptor
314  *
315  * Return: Physical address of the buffer
316  */
317 static inline
dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc * rx_desc)318 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc)
319 {
320 	return rx_desc->paddr_buf_start;
321 }
322 
323 /**
324  * DP_RX_MON_IS_BUFFER_ADDR_NULL() - Is Buffer received from hw is NULL
325  * @rx_desc: RX descriptor
326  *
327  * Return: true if the buffer is NULL, otherwise false
328  */
329 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \
330 	(!(rx_desc->rx_buf_start))
331 
332 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \
333 	true
334 
335 /**
336  * dp_rx_mon_buffer_free() - Free nbuf or frag memory
337  * Free nbuf if feature is disabled, else free frag.
338  *
339  * @rx_desc: Rx desc
340  */
341 static inline void
dp_rx_mon_buffer_free(struct dp_rx_desc * rx_desc)342 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc)
343 {
344 	qdf_frag_free(rx_desc->rx_buf_start);
345 }
346 
347 /**
348  * dp_rx_mon_buffer_unmap() - Unmap nbuf or frag memory
349  * Unmap nbuf if feature is disabled, else unmap frag.
350  *
351  * @soc: struct dp_soc *
352  * @rx_desc: struct dp_rx_desc *
353  * @size: Size to be unmapped
354  */
355 static inline void
dp_rx_mon_buffer_unmap(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint16_t size)356 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
357 		       uint16_t size)
358 {
359 	qdf_mem_unmap_page(soc->osdev, rx_desc->paddr_buf_start,
360 			   size, QDF_DMA_FROM_DEVICE);
361 }
362 
363 /**
364  * dp_rx_mon_alloc_parent_buffer() - Allocate parent buffer to hold
365  * radiotap header and accommodate all frag memory in nr_frag.
366  *
367  * @head_msdu: Ptr to hold allocated Msdu
368  *
369  * Return: QDF_STATUS
370  */
371 static inline
dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t * head_msdu)372 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu)
373 {
374 	/*
375 	 * Headroom should accommodate radiotap header
376 	 * and protocol and flow tag for all frag
377 	 * Length reserved to accommodate Radiotap header
378 	 * is 128 bytes and length reserved for Protocol
379 	 * flow tag will vary based on QDF_NBUF_MAX_FRAGS.
380 	 */
381 	/*  -------------------------------------------------
382 	 * |       Protocol & Flow TAG      | Radiotap header|
383 	 * |                                |  Length(128 B) |
384 	 * |  ((4* QDF_NBUF_MAX_FRAGS) * 2) |                |
385 	 *  -------------------------------------------------
386 	 */
387 
388 	*head_msdu = qdf_nbuf_alloc_no_recycler(DP_RX_MON_MAX_MONITOR_HEADER,
389 						DP_RX_MON_MAX_MONITOR_HEADER, 4);
390 
391 	if (!(*head_msdu))
392 		return QDF_STATUS_E_FAILURE;
393 
394 	qdf_mem_zero(qdf_nbuf_head(*head_msdu), qdf_nbuf_headroom(*head_msdu));
395 
396 	/* Set *head_msdu->next as NULL as all msdus are
397 	 * mapped via nr frags
398 	 */
399 	qdf_nbuf_set_next(*head_msdu, NULL);
400 
401 	return QDF_STATUS_SUCCESS;
402 }
403 
404 /**
405  * dp_rx_mon_parse_desc_buffer() - Parse desc buffer based.
406  * @dp_soc:             struct dp_soc*
407  * @msdu_info:          struct hal_rx_msdu_desc_info*
408  * @is_frag_p:          is_frag *
409  * @total_frag_len_p:   Remaining frag len to be updated
410  * @frag_len_p:         frag len
411  * @l2_hdr_offset_p:    l2 hdr offset
412  * @rx_desc_tlv:        rx_desc_tlv
413  * @first_rx_desc_tlv:
414  * @is_frag_non_raw_p:  Non raw frag
415  * @data:               NBUF Data
416  *
417  * Below code will parse desc buffer, handle continuation frame,
418  * adjust frag length and update l2_hdr_padding
419  *
420  */
421 static inline void
dp_rx_mon_parse_desc_buffer(struct dp_soc * dp_soc,struct hal_rx_msdu_desc_info * msdu_info,bool * is_frag_p,uint32_t * total_frag_len_p,uint32_t * frag_len_p,uint16_t * l2_hdr_offset_p,qdf_frag_t rx_desc_tlv,void ** first_rx_desc_tlv,bool * is_frag_non_raw_p,void * data)422 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
423 			    struct hal_rx_msdu_desc_info *msdu_info,
424 			    bool *is_frag_p, uint32_t *total_frag_len_p,
425 			    uint32_t *frag_len_p, uint16_t *l2_hdr_offset_p,
426 			    qdf_frag_t rx_desc_tlv,
427 			    void **first_rx_desc_tlv,
428 			    bool *is_frag_non_raw_p, void *data)
429 {
430 	struct hal_rx_mon_dest_buf_info frame_info;
431 	uint16_t tot_payload_len =
432 			RX_MONITOR_BUFFER_SIZE - dp_soc->rx_mon_pkt_tlv_size;
433 
434 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
435 		/* First buffer of MSDU */
436 		if (!(*is_frag_p)) {
437 			/* Set total frag_len from msdu_len */
438 			*total_frag_len_p = msdu_info->msdu_len;
439 
440 			*is_frag_p = true;
441 			if (HAL_HW_RX_DECAP_FORMAT_RAW ==
442 			    hal_rx_tlv_decap_format_get(dp_soc->hal_soc,
443 							rx_desc_tlv)) {
444 				*l2_hdr_offset_p =
445 					DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
446 				frame_info.is_decap_raw = 1;
447 			} else {
448 				*l2_hdr_offset_p =
449 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
450 				frame_info.is_decap_raw = 0;
451 				*is_frag_non_raw_p = true;
452 			}
453 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
454 						       frag_len_p,
455 						       tot_payload_len -
456 						       *l2_hdr_offset_p);
457 
458 			frame_info.first_buffer = 1;
459 			frame_info.last_buffer = 0;
460 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
461 						    rx_desc_tlv,
462 						    (uint8_t *)&frame_info,
463 						    sizeof(frame_info));
464 		} else {
465 			/*
466 			 * Continuation Middle frame
467 			 * Here max limit will be same for Raw and Non raw case.
468 			 */
469 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
470 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
471 						       frag_len_p,
472 						       tot_payload_len);
473 
474 			/* Update frame info if is non raw frame */
475 			if (*is_frag_non_raw_p)
476 				frame_info.is_decap_raw = 0;
477 			else
478 				frame_info.is_decap_raw = 1;
479 
480 			frame_info.first_buffer = 0;
481 			frame_info.last_buffer = 0;
482 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
483 						    rx_desc_tlv,
484 						    (uint8_t *)&frame_info,
485 						    sizeof(frame_info));
486 		}
487 	} else {
488 		/**
489 		 * Last buffer of MSDU spread among multiple buffer
490 		 * Here max limit will be same for Raw and Non raw case.
491 		 */
492 		if (*is_frag_p) {
493 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
494 
495 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
496 						       frag_len_p,
497 						       tot_payload_len);
498 
499 			/* Update frame info if is non raw frame */
500 			if (*is_frag_non_raw_p)
501 				frame_info.is_decap_raw = 0;
502 			else
503 				frame_info.is_decap_raw = 1;
504 
505 			frame_info.first_buffer = 0;
506 			frame_info.last_buffer = 1;
507 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
508 						    rx_desc_tlv,
509 						    (uint8_t *)&frame_info,
510 						    sizeof(frame_info));
511 		} else {
512 			/* MSDU with single buffer */
513 			*frag_len_p = msdu_info->msdu_len;
514 			if (HAL_HW_RX_DECAP_FORMAT_RAW ==
515 			    hal_rx_tlv_decap_format_get(dp_soc->hal_soc,
516 							rx_desc_tlv)) {
517 				*l2_hdr_offset_p =
518 					DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
519 				frame_info.is_decap_raw = 1;
520 			} else {
521 				*l2_hdr_offset_p =
522 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
523 				frame_info.is_decap_raw = 0;
524 			}
525 
526 			frame_info.first_buffer = 1;
527 			frame_info.last_buffer = 1;
528 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
529 						    rx_desc_tlv,
530 						    (uint8_t *)&frame_info,
531 						    sizeof(frame_info));
532 		}
533 		/* Reset bool after complete processing of MSDU */
534 		*is_frag_p = false;
535 		*is_frag_non_raw_p = false;
536 	}
537 }
538 
539 /**
540  * dp_rx_mon_buffer_set_pktlen() - set pktlen for buffer
541  * @msdu: MSDU
542  * @size: MSDU size
543  */
dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu,uint32_t size)544 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size)
545 {
546 }
547 
548 /**
549  * dp_rx_mon_add_msdu_to_list()- Add msdu to list and update head_msdu
550  *      It will add reaped buffer frag to nr frag of parent msdu.
551  * @soc: DP soc handle
552  * @head_msdu: NULL if first time called else &msdu
553  * @msdu: Msdu where frag address needs to be added via nr_frag
554  * @last: Used to traverse in list if this feature is disabled.
555  * @rx_desc_tlv: Frag address
556  * @frag_len: Frag len
557  * @l2_hdr_offset: l2 hdr padding
558  */
559 static inline
dp_rx_mon_add_msdu_to_list(struct dp_soc * soc,qdf_nbuf_t * head_msdu,qdf_nbuf_t msdu,qdf_nbuf_t * last,qdf_frag_t rx_desc_tlv,uint32_t frag_len,uint32_t l2_hdr_offset)560 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu,
561 				      qdf_nbuf_t msdu, qdf_nbuf_t *last,
562 				      qdf_frag_t rx_desc_tlv, uint32_t frag_len,
563 				      uint32_t l2_hdr_offset)
564 {
565 	uint32_t num_frags;
566 	qdf_nbuf_t msdu_curr;
567 
568 	/* Here head_msdu and *head_msdu must not be NULL */
569 	/* Dont add frag to skb if frag length is zero. Drop frame */
570 	if (qdf_unlikely(!frag_len || !head_msdu || !(*head_msdu))) {
571 		dp_err("[%s] frag_len[%d] || head_msdu[%pK] || *head_msdu is Null while adding frag to skb",
572 		       __func__, frag_len, head_msdu);
573 		return QDF_STATUS_E_FAILURE;
574 	}
575 
576 	/* In case of first desc of MPDU, assign curr msdu to *head_msdu */
577 	if (!qdf_nbuf_get_nr_frags(*head_msdu))
578 		msdu_curr = *head_msdu;
579 	else
580 		msdu_curr = *last;
581 
582 	/* Current msdu must not be NULL */
583 	if (qdf_unlikely(!msdu_curr)) {
584 		dp_err("[%s] Current msdu can't be Null while adding frag to skb",
585 		       __func__);
586 		return QDF_STATUS_E_FAILURE;
587 	}
588 
589 	num_frags = qdf_nbuf_get_nr_frags(msdu_curr);
590 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
591 		qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr,
592 				     soc->rx_mon_pkt_tlv_size,
593 				     frag_len + l2_hdr_offset,
594 				     RX_MONITOR_BUFFER_SIZE,
595 				     false);
596 		if (*last != msdu_curr)
597 			*last = msdu_curr;
598 		return QDF_STATUS_SUCCESS;
599 	}
600 
601 	/* Execution will reach here only if num_frags == QDF_NBUF_MAX_FRAGS */
602 	msdu_curr = NULL;
603 	if ((dp_rx_mon_alloc_parent_buffer(&msdu_curr))
604 	    != QDF_STATUS_SUCCESS)
605 		return QDF_STATUS_E_FAILURE;
606 
607 	qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr, soc->rx_mon_pkt_tlv_size,
608 			     frag_len + l2_hdr_offset, RX_MONITOR_BUFFER_SIZE,
609 			     false);
610 
611 	/* Add allocated nbuf in the chain */
612 	qdf_nbuf_set_next(*last, msdu_curr);
613 
614 	/* Assign current msdu to last to avoid traversal */
615 	*last = msdu_curr;
616 
617 	return QDF_STATUS_SUCCESS;
618 }
619 
620 /**
621  * dp_rx_mon_init_tail_msdu() - Initialize tail msdu
622  *
623  * @head_msdu: Parent buffer to hold MPDU data
624  * @msdu: Msdu to be updated in tail_msdu
625  * @last: last msdu
626  * @tail_msdu: Last msdu
627  */
628 static inline
dp_rx_mon_init_tail_msdu(qdf_nbuf_t * head_msdu,qdf_nbuf_t msdu,qdf_nbuf_t last,qdf_nbuf_t * tail_msdu)629 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu,
630 			      qdf_nbuf_t last, qdf_nbuf_t *tail_msdu)
631 {
632 	if (!head_msdu || !(*head_msdu)) {
633 		*tail_msdu = NULL;
634 		return;
635 	}
636 
637 	if (last)
638 		qdf_nbuf_set_next(last, NULL);
639 	*tail_msdu = last;
640 }
641 
642 /**
643  * dp_rx_mon_remove_raw_frame_fcs_len() - Remove FCS length for Raw Frame
644  *
645  * If feature is disabled, then removal happens in restitch logic.
646  *
647  * @soc: Datapath soc handle
648  * @head_msdu: Head msdu
649  * @tail_msdu: Tail msdu
650  */
651 static inline
dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc * soc,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu)652 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc,
653 					qdf_nbuf_t *head_msdu,
654 					qdf_nbuf_t *tail_msdu)
655 {
656 	qdf_frag_t addr;
657 
658 	if (qdf_unlikely(!head_msdu || !tail_msdu || !(*head_msdu)))
659 		return;
660 
661 	/* If *head_msdu is valid, then *tail_msdu must be valid */
662 	/* If head_msdu is valid, then it must have nr_frags */
663 	/* If tail_msdu is valid, then it must have nr_frags */
664 
665 	/* Strip FCS_LEN for Raw frame */
666 	addr = qdf_nbuf_get_frag_addr(*head_msdu, 0);
667 	addr -= soc->rx_mon_pkt_tlv_size;
668 	if (hal_rx_tlv_decap_format_get(soc->hal_soc, addr) ==
669 		HAL_HW_RX_DECAP_FORMAT_RAW) {
670 		qdf_nbuf_trim_add_frag_size(*tail_msdu,
671 			qdf_nbuf_get_nr_frags(*tail_msdu) - 1,
672 					-HAL_RX_FCS_LEN, 0);
673 	}
674 }
675 
676 /**
677  * dp_rx_mon_get_buffer_data()- Get data from desc buffer
678  * @rx_desc: desc
679  *
680  * Return address containing actual tlv content
681  */
682 static inline
dp_rx_mon_get_buffer_data(struct dp_rx_desc * rx_desc)683 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc)
684 {
685 	return rx_desc->rx_buf_start;
686 }
687 
688 #else
689 
690 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \
691 	(rx_desc->nbuf)
692 
693 static inline void
dp_rx_mon_add_msdu_to_list_failure_handler(void * rx_tlv_hdr,struct dp_pdev * pdev,qdf_nbuf_t * last,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu,const char * func_name)694 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr,
695 					   struct dp_pdev *pdev,
696 					   qdf_nbuf_t *last,
697 					   qdf_nbuf_t *head_msdu,
698 					   qdf_nbuf_t *tail_msdu,
699 					   const char *func_name)
700 {
701 }
702 
703 static inline
dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc * rx_desc)704 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc)
705 {
706 	qdf_dma_addr_t paddr = 0;
707 	qdf_nbuf_t msdu = NULL;
708 
709 	msdu = rx_desc->nbuf;
710 	if (msdu)
711 		paddr = qdf_nbuf_get_frag_paddr(msdu, 0);
712 
713 	return paddr;
714 }
715 
716 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \
717 	(!(rx_desc->nbuf))
718 
719 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \
720 	(msdu)
721 
722 static inline void
dp_rx_mon_buffer_free(struct dp_rx_desc * rx_desc)723 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc)
724 {
725 	qdf_nbuf_free(rx_desc->nbuf);
726 }
727 
728 static inline void
dp_rx_mon_buffer_unmap(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint16_t size)729 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
730 		       uint16_t size)
731 {
732 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
733 				     QDF_DMA_FROM_DEVICE, size);
734 }
735 
736 static inline
dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t * head_msdu)737 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu)
738 {
739 	return QDF_STATUS_SUCCESS;
740 }
741 
742 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT
743 
744 #define RXDMA_DATA_DMA_BLOCK_SIZE 128
745 static inline void
dp_rx_mon_parse_desc_buffer(struct dp_soc * dp_soc,struct hal_rx_msdu_desc_info * msdu_info,bool * is_frag_p,uint32_t * total_frag_len_p,uint32_t * frag_len_p,uint16_t * l2_hdr_offset_p,qdf_frag_t rx_desc_tlv,void ** first_rx_desc_tlv,bool * is_frag_non_raw_p,void * data)746 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
747 			    struct hal_rx_msdu_desc_info *msdu_info,
748 			    bool *is_frag_p, uint32_t *total_frag_len_p,
749 			    uint32_t *frag_len_p,
750 			    uint16_t *l2_hdr_offset_p,
751 			    qdf_frag_t rx_desc_tlv,
752 			    void **first_rx_desc_tlv,
753 			    bool *is_frag_non_raw_p, void *data)
754 {
755 	struct hal_rx_mon_dest_buf_info frame_info;
756 	uint32_t rx_pkt_tlv_len = dp_rx_mon_get_rx_pkt_tlv_size(dp_soc);
757 
758 	/*
759 	 * HW structures call this L3 header padding
760 	 * -- even though this is actually the offset
761 	 * from the buffer beginning where the L2
762 	 * header begins.
763 	 */
764 	*l2_hdr_offset_p =
765 	hal_rx_msdu_end_l3_hdr_padding_get(dp_soc->hal_soc, data);
766 
767 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
768 		/*
769 		 * Set l3_hdr_pad for first frag. This can be later
770 		 * changed based on decap format, detected in last frag
771 		 */
772 		*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
773 		if (!(*is_frag_p)) {
774 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
775 			*first_rx_desc_tlv = rx_desc_tlv;
776 		}
777 
778 		*is_frag_p = true;
779 		*frag_len_p = (RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len -
780 			       *l2_hdr_offset_p) &
781 			      ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
782 		*total_frag_len_p += *frag_len_p;
783 	} else {
784 		if (hal_rx_tlv_decap_format_get(dp_soc->hal_soc, rx_desc_tlv) ==
785 		    HAL_HW_RX_DECAP_FORMAT_RAW)
786 			frame_info.is_decap_raw = 1;
787 
788 		if (hal_rx_tlv_mpdu_len_err_get(dp_soc->hal_soc, rx_desc_tlv))
789 			frame_info.mpdu_len_err = 1;
790 
791 		frame_info.l2_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get(
792 						dp_soc->hal_soc, rx_desc_tlv);
793 
794 		if (*is_frag_p) {
795 			/* Last fragment of msdu */
796 			*frag_len_p = msdu_info->msdu_len - *total_frag_len_p;
797 
798 			/* Set this in the first frag priv data */
799 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
800 						    *first_rx_desc_tlv,
801 						    (uint8_t *)&frame_info,
802 						    sizeof(frame_info));
803 		} else {
804 			*frag_len_p = msdu_info->msdu_len;
805 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
806 						    rx_desc_tlv,
807 						    (uint8_t *)&frame_info,
808 						    sizeof(frame_info));
809 		}
810 		*is_frag_p = false;
811 		*first_rx_desc_tlv = NULL;
812 	}
813 }
814 #else
815 
816 static inline void
dp_rx_mon_parse_desc_buffer(struct dp_soc * dp_soc,struct hal_rx_msdu_desc_info * msdu_info,bool * is_frag_p,uint32_t * total_frag_len_p,uint32_t * frag_len_p,uint16_t * l2_hdr_offset_p,qdf_frag_t rx_desc_tlv,qdf_frag_t first_rx_desc_tlv,bool * is_frag_non_raw_p,void * data)817 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
818 			    struct hal_rx_msdu_desc_info *msdu_info,
819 			    bool *is_frag_p, uint32_t *total_frag_len_p,
820 			    uint32_t *frag_len_p,
821 			    uint16_t *l2_hdr_offset_p,
822 			    qdf_frag_t rx_desc_tlv,
823 			    qdf_frag_t first_rx_desc_tlv,
824 			    bool *is_frag_non_raw_p, void *data)
825 {
826 	/*
827 	 * HW structures call this L3 header padding
828 	 * -- even though this is actually the offset
829 	 * from the buffer beginning where the L2
830 	 * header begins.
831 	 */
832 	*l2_hdr_offset_p =
833 	hal_rx_msdu_end_l3_hdr_padding_get(dp_soc->hal_soc, data);
834 
835 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
836 		if (!*(is_frag_p)) {
837 			*total_frag_len_p = msdu_info->msdu_len;
838 			*is_frag_p = true;
839 		}
840 		dp_mon_adjust_frag_len(dp_soc, total_frag_len_p, frag_len_p,
841 				       *l2_hdr_offset_p);
842 	} else {
843 		if (*is_frag_p) {
844 			dp_mon_adjust_frag_len(dp_soc, total_frag_len_p,
845 					       frag_len_p,
846 					       *l2_hdr_offset_p);
847 		} else {
848 			*frag_len_p = msdu_info->msdu_len;
849 		}
850 		*is_frag_p = false;
851 	}
852 }
853 #endif
854 
dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu,uint32_t size)855 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size)
856 {
857 	qdf_nbuf_set_pktlen(msdu, size);
858 }
859 
860 static inline
dp_rx_mon_add_msdu_to_list(struct dp_soc * soc,qdf_nbuf_t * head_msdu,qdf_nbuf_t msdu,qdf_nbuf_t * last,qdf_frag_t rx_desc_tlv,uint32_t frag_len,uint32_t l2_hdr_offset)861 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu,
862 				      qdf_nbuf_t msdu, qdf_nbuf_t *last,
863 				      qdf_frag_t rx_desc_tlv, uint32_t frag_len,
864 				      uint32_t l2_hdr_offset)
865 {
866 	if (head_msdu && !*head_msdu) {
867 		*head_msdu = msdu;
868 	} else {
869 		if (*last)
870 			qdf_nbuf_set_next(*last, msdu);
871 	}
872 	*last = msdu;
873 	return QDF_STATUS_SUCCESS;
874 }
875 
876 static inline
dp_rx_mon_init_tail_msdu(qdf_nbuf_t * head_msdu,qdf_nbuf_t msdu,qdf_nbuf_t last,qdf_nbuf_t * tail_msdu)877 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu,
878 			      qdf_nbuf_t last, qdf_nbuf_t *tail_msdu)
879 {
880 	if (last)
881 		qdf_nbuf_set_next(last, NULL);
882 
883 	*tail_msdu = msdu;
884 }
885 
886 static inline
dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc * soc,qdf_nbuf_t * head_msdu,qdf_nbuf_t * tail_msdu)887 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc,
888 					qdf_nbuf_t *head_msdu,
889 					qdf_nbuf_t *tail_msdu)
890 {
891 }
892 
893 static inline
dp_rx_mon_get_buffer_data(struct dp_rx_desc * rx_desc)894 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc)
895 {
896 	qdf_nbuf_t msdu = NULL;
897 	uint8_t *data = NULL;
898 
899 	msdu = rx_desc->nbuf;
900 	if (qdf_likely(msdu))
901 		data = qdf_nbuf_data(msdu);
902 	return data;
903 }
904 
905 #endif
906 
907 #ifndef WLAN_SOFTUMAC_SUPPORT
908 /**
909  * dp_rx_cookie_2_mon_link_desc() - Retrieve Link descriptor based on target
910  * @pdev: core physical device context
911  * @buf_info: ptr to structure holding the buffer info
912  * @mac_id: mac number
913  *
914  * Return: link descriptor address
915  */
916 static inline
dp_rx_cookie_2_mon_link_desc(struct dp_pdev * pdev,struct hal_buf_info * buf_info,uint8_t mac_id)917 void *dp_rx_cookie_2_mon_link_desc(struct dp_pdev *pdev,
918 				   struct hal_buf_info *buf_info,
919 				   uint8_t mac_id)
920 {
921 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
922 		return dp_rx_cookie_2_mon_link_desc_va(pdev, buf_info,
923 						       mac_id);
924 
925 	return dp_rx_cookie_2_link_desc_va(pdev->soc, buf_info);
926 }
927 
928 /**
929  * dp_rx_monitor_link_desc_return() - Return Link descriptor based on target
930  * @pdev: core physical device context
931  * @p_last_buf_addr_info: MPDU Link descriptor
932  * @mac_id: mac number
933  * @bm_action:
934  *
935  * Return: QDF_STATUS
936  */
937 static inline
dp_rx_monitor_link_desc_return(struct dp_pdev * pdev,hal_buff_addrinfo_t p_last_buf_addr_info,uint8_t mac_id,uint8_t bm_action)938 QDF_STATUS dp_rx_monitor_link_desc_return(struct dp_pdev *pdev,
939 					  hal_buff_addrinfo_t
940 					  p_last_buf_addr_info,
941 					  uint8_t mac_id, uint8_t bm_action)
942 {
943 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
944 		return dp_rx_mon_link_desc_return(pdev, p_last_buf_addr_info,
945 						  mac_id);
946 
947 	return dp_rx_link_desc_return_by_addr(pdev->soc, p_last_buf_addr_info,
948 				      bm_action);
949 }
950 #else
951 static inline
dp_rx_cookie_2_mon_link_desc(struct dp_pdev * pdev,struct hal_buf_info * buf_info,uint8_t mac_id)952 void *dp_rx_cookie_2_mon_link_desc(struct dp_pdev *pdev,
953 				   struct hal_buf_info *buf_info,
954 				   uint8_t mac_id)
955 {
956 	return dp_rx_cookie_2_mon_link_desc_va(pdev, buf_info, mac_id);
957 }
958 
959 static inline
dp_rx_monitor_link_desc_return(struct dp_pdev * pdev,hal_buff_addrinfo_t p_last_buf_addr_info,uint8_t mac_id,uint8_t bm_action)960 QDF_STATUS dp_rx_monitor_link_desc_return(struct dp_pdev *pdev,
961 					  hal_buff_addrinfo_t
962 					  p_last_buf_addr_info,
963 					  uint8_t mac_id, uint8_t bm_action)
964 {
965 	return dp_rx_mon_link_desc_return(pdev, p_last_buf_addr_info,
966 					  mac_id);
967 }
968 #endif
969 
dp_is_rxdma_dst_ring_common(struct dp_pdev * pdev)970 static inline bool dp_is_rxdma_dst_ring_common(struct dp_pdev *pdev)
971 {
972 	struct dp_soc *soc = pdev->soc;
973 
974 	return (soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev == 1);
975 }
976 
977 /**
978  * dp_rxdma_get_mon_dst_ring() - Return the pointer to rxdma_err_dst_ring
979  *					or mon_dst_ring based on the target
980  * @pdev: core physical device context
981  * @mac_for_pdev: mac_id number
982  *
983  * Return: ring address
984  */
985 static inline
dp_rxdma_get_mon_dst_ring(struct dp_pdev * pdev,uint8_t mac_for_pdev)986 void *dp_rxdma_get_mon_dst_ring(struct dp_pdev *pdev,
987 				uint8_t mac_for_pdev)
988 {
989 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
990 		return pdev->soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng;
991 
992 	/* For targets with 1 RXDMA DST ring for both mac */
993 	if (dp_is_rxdma_dst_ring_common(pdev))
994 		return pdev->soc->rxdma_err_dst_ring[0].hal_srng;
995 
996 	return pdev->soc->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
997 }
998 
999 /**
1000  * dp_rxdma_get_mon_buf_ring() - Return monitor buf ring address
1001  *				    based on target
1002  * @pdev: core physical device context
1003  * @mac_for_pdev: mac id number
1004  *
1005  * Return: ring address
1006  */
1007 static inline
dp_rxdma_get_mon_buf_ring(struct dp_pdev * pdev,uint8_t mac_for_pdev)1008 struct dp_srng *dp_rxdma_get_mon_buf_ring(struct dp_pdev *pdev,
1009 					  uint8_t mac_for_pdev)
1010 {
1011 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
1012 		return &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
1013 
1014 	/* For MCL there is only 1 rx refill ring */
1015 	return &pdev->soc->rx_refill_buf_ring[0];
1016 }
1017 
1018 /**
1019  * dp_rx_get_mon_desc() - Return Rx descriptor based on target
1020  * @soc: soc handle
1021  * @cookie: cookie value
1022  *
1023  * Return: Rx descriptor
1024  */
1025 static inline
dp_rx_get_mon_desc(struct dp_soc * soc,uint32_t cookie)1026 struct dp_rx_desc *dp_rx_get_mon_desc(struct dp_soc *soc,
1027 				      uint32_t cookie)
1028 {
1029 	if (soc->wlan_cfg_ctx->rxdma1_enable)
1030 		return dp_rx_cookie_2_va_mon_buf(soc, cookie);
1031 
1032 	return soc->arch_ops.dp_rx_desc_cookie_2_va(soc, cookie);
1033 }
1034 
1035 #ifdef QCA_MONITOR_PKT_SUPPORT
1036 /*
1037  * dp_mon_htt_dest_srng_setup(): monitor dest srng setup
1038  * @soc: DP SOC handle
1039  * @pdev: DP PDEV handle
1040  * @mac_id: MAC ID
1041  * @mac_for_pdev: PDEV mac
1042  *
1043  * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure
1044  */
1045 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
1046 				      struct dp_pdev *pdev,
1047 				      int mac_id,
1048 				      int mac_for_pdev);
1049 
1050 /*
1051  * dp_mon_dest_rings_deinit(): deinit monitor dest rings
1052  * @pdev: DP PDEV handle
1053  * @lmac_id: MAC ID
1054  *
1055  * Return: status: None
1056  */
1057 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id);
1058 
1059 /*
1060  * dp_mon_dest_rings_free(): free monitor dest rings
1061  * @pdev: DP PDEV handle
1062  * @lmac_id: MAC ID
1063  *
1064  * Return: status: None
1065  */
1066 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id);
1067 
1068 /*
1069  * dp_mon_dest_rings_init(): init monitor dest rings
1070  * @pdev: DP PDEV handle
1071  * @lmac_id: MAC ID
1072  *
1073  * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure
1074  */
1075 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id);
1076 
1077 /*
1078  * dp_mon_dest_rings_allocate(): allocate monitor dest rings
1079  * @pdev: DP PDEV handle
1080  * @lmac_id: MAC ID
1081  *
1082  * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure
1083  */
1084 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id);
1085 
1086 #else
dp_mon_htt_dest_srng_setup(struct dp_soc * soc,struct dp_pdev * pdev,int mac_id,int mac_for_pdev)1087 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
1088 				      struct dp_pdev *pdev,
1089 				      int mac_id,
1090 				      int mac_for_pdev)
1091 {
1092 	return QDF_STATUS_SUCCESS;
1093 }
1094 
dp_mon_dest_rings_deinit(struct dp_pdev * pdev,int lmac_id)1095 static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
1096 {
1097 }
1098 
dp_mon_dest_rings_free(struct dp_pdev * pdev,int lmac_id)1099 static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
1100 {
1101 }
1102 
1103 static
dp_mon_dest_rings_init(struct dp_pdev * pdev,int lmac_id)1104 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
1105 {
1106 	return QDF_STATUS_SUCCESS;
1107 }
1108 
1109 static
dp_mon_dest_rings_alloc(struct dp_pdev * pdev,int lmac_id)1110 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
1111 {
1112 	return QDF_STATUS_SUCCESS;
1113 }
1114 #endif /* QCA_MONITOR_PKT_SUPPORT */
1115 
1116 #endif /* _DP_RX_MON_1_0_H_ */
1117