1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _DP_RX_H
21 #define _DP_RX_H
22
23 #include "hal_rx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include <qdf_tracepoint.h>
27 #include "dp_ipa.h"
28
29 #ifdef RXDMA_OPTIMIZATION
30 #ifndef RX_DATA_BUFFER_ALIGNMENT
31 #define RX_DATA_BUFFER_ALIGNMENT 128
32 #endif
33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
34 #define RX_MONITOR_BUFFER_ALIGNMENT 128
35 #endif
36 #else /* RXDMA_OPTIMIZATION */
37 #define RX_DATA_BUFFER_ALIGNMENT 4
38 #define RX_MONITOR_BUFFER_ALIGNMENT 4
39 #endif /* RXDMA_OPTIMIZATION */
40
41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
42 #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
43 /* RBM value used for re-injecting defragmented packets into REO */
44 #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
45 #endif
46
47 /* Max buffer in invalid peer SG list*/
48 #define DP_MAX_INVALID_BUFFERS 10
49 #ifdef DP_INVALID_PEER_ASSERT
50 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
51 do { \
52 qdf_assert_always(!(head)); \
53 qdf_assert_always(!(tail)); \
54 } while (0)
55 #else
56 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
57 #endif
58
59 #define RX_BUFFER_RESERVATION 0
60
61 #define DP_DEFAULT_NOISEFLOOR (-96)
62
63 #define DP_RX_DESC_MAGIC 0xdec0de
64
65 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
66 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
67 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
68 #define dp_rx_info(params...) \
69 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
70 #define dp_rx_info_rl(params...) \
71 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
72 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
73 #define dp_rx_err_err(params...) \
74 QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
75
76 /**
77 * enum dp_rx_desc_state
78 *
79 * @RX_DESC_REPLENISHED: rx desc replenished
80 * @RX_DESC_IN_FREELIST: rx desc in freelist
81 */
82 enum dp_rx_desc_state {
83 RX_DESC_REPLENISHED,
84 RX_DESC_IN_FREELIST,
85 };
86
87 #ifndef QCA_HOST_MODE_WIFI_DISABLED
88 /**
89 * struct dp_rx_desc_dbg_info
90 *
91 * @freelist_caller: name of the function that put the
92 * the rx desc in freelist
93 * @freelist_ts: timestamp when the rx desc is put in
94 * a freelist
95 * @replenish_caller: name of the function that last
96 * replenished the rx desc
97 * @replenish_ts: last replenish timestamp
98 * @prev_nbuf: previous nbuf info
99 * @prev_nbuf_data_addr: previous nbuf data address
100 */
101 struct dp_rx_desc_dbg_info {
102 char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
103 uint64_t freelist_ts;
104 char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
105 uint64_t replenish_ts;
106 qdf_nbuf_t prev_nbuf;
107 uint8_t *prev_nbuf_data_addr;
108 };
109
110 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
111
112 /**
113 * struct dp_rx_desc
114 *
115 * @nbuf: VA of the "skb" posted
116 * @rx_buf_start: VA of the original Rx buffer, before
117 * movement of any skb->data pointer
118 * @paddr_buf_start: PA of the original Rx buffer, before
119 * movement of any frag pointer
120 * @cookie: index into the sw array which holds
121 * the sw Rx descriptors
122 * Cookie space is 21 bits:
123 * lower 18 bits -- index
124 * upper 3 bits -- pool_id
125 * @pool_id: pool Id for which this allocated.
126 * Can only be used if there is no flow
127 * steering
128 * @chip_id: chip_id indicating MLO chip_id
129 * valid or used only in case of multi-chip MLO
130 * @reuse_nbuf: VA of the "skb" which is being reused
131 * @magic:
132 * @nbuf_data_addr: VA of nbuf data posted
133 * @dbg_info:
134 * @prev_paddr_buf_start: paddr of the prev nbuf attach to rx_desc
135 * @in_use: rx_desc is in use
136 * @unmapped: used to mark rx_desc an unmapped if the corresponding
137 * nbuf is already unmapped
138 * @in_err_state: Nbuf sanity failed for this descriptor.
139 * @has_reuse_nbuf: the nbuf associated with this desc is also saved in
140 * reuse_nbuf field
141 * @msdu_done_fail: this particular rx_desc was dequeued from REO with
142 * msdu_done bit not set in data buffer.
143 */
144 struct dp_rx_desc {
145 qdf_nbuf_t nbuf;
146 #ifdef WLAN_SUPPORT_PPEDS
147 qdf_nbuf_t reuse_nbuf;
148 #endif
149 uint8_t *rx_buf_start;
150 qdf_dma_addr_t paddr_buf_start;
151 uint32_t cookie;
152 uint8_t pool_id;
153 uint8_t chip_id;
154 #ifdef RX_DESC_DEBUG_CHECK
155 uint32_t magic;
156 uint8_t *nbuf_data_addr;
157 struct dp_rx_desc_dbg_info *dbg_info;
158 qdf_dma_addr_t prev_paddr_buf_start;
159 #endif
160 uint8_t in_use:1,
161 unmapped:1,
162 in_err_state:1,
163 has_reuse_nbuf:1,
164 msdu_done_fail:1;
165 };
166
167 #ifndef QCA_HOST_MODE_WIFI_DISABLED
168 #ifdef ATH_RX_PRI_SAVE
169 #define DP_RX_TID_SAVE(_nbuf, _tid) \
170 (qdf_nbuf_set_priority(_nbuf, _tid))
171 #else
172 #define DP_RX_TID_SAVE(_nbuf, _tid)
173 #endif
174
175 /* RX Descriptor Multi Page memory alloc related */
176 #define DP_RX_DESC_OFFSET_NUM_BITS 8
177 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
178 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
179
180 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
181 #define DP_RX_DESC_POOL_ID_SHIFT \
182 (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
183 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
184 (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
185 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \
186 (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
187 DP_RX_DESC_PAGE_ID_SHIFT)
188 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
189 ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
190 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \
191 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \
192 DP_RX_DESC_POOL_ID_SHIFT)
193 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \
194 (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \
195 DP_RX_DESC_PAGE_ID_SHIFT)
196 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \
197 ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
198
199 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
200
201 #define RX_DESC_COOKIE_INDEX_SHIFT 0
202 #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
203 #define RX_DESC_COOKIE_POOL_ID_SHIFT 18
204 #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000
205
206 #define DP_RX_DESC_COOKIE_MAX \
207 (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
208
209 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \
210 (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \
211 RX_DESC_COOKIE_POOL_ID_SHIFT)
212
213 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \
214 (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \
215 RX_DESC_COOKIE_INDEX_SHIFT)
216
217 #define dp_rx_add_to_free_desc_list(head, tail, new) \
218 __dp_rx_add_to_free_desc_list(head, tail, new, __func__)
219
220 #define dp_rx_add_to_free_desc_list_reuse(head, tail, new) \
221 __dp_rx_add_to_free_desc_list_reuse(head, tail, new, __func__)
222
223 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
224 num_buffers, desc_list, tail, req_only) \
225 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
226 num_buffers, desc_list, tail, req_only, \
227 false, __func__)
228
229 #ifdef WLAN_SUPPORT_RX_FISA
230 /**
231 * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
232 * @nbuf: pkt skb pointer
233 * @l3_padding: l3 padding
234 *
235 * Return: None
236 */
237 static inline
dp_rx_set_hdr_pad(qdf_nbuf_t nbuf,uint32_t l3_padding)238 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
239 {
240 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
241 }
242 #else
243 static inline
dp_rx_set_hdr_pad(qdf_nbuf_t nbuf,uint32_t l3_padding)244 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
245 {
246 }
247 #endif
248
249 #ifdef DP_RX_SPECIAL_FRAME_NEED
250 /**
251 * dp_rx_is_special_frame() - check is RX frame special needed
252 *
253 * @nbuf: RX skb pointer
254 * @frame_mask: the mask for special frame needed
255 *
256 * Check is RX frame wanted matched with mask
257 *
258 * Return: true - special frame needed, false - no
259 */
260 static inline
dp_rx_is_special_frame(qdf_nbuf_t nbuf,uint32_t frame_mask)261 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
262 {
263 if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
264 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
265 ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
266 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
267 ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
268 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
269 ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
270 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) ||
271 ((frame_mask & FRAME_MASK_DNS_QUERY) &&
272 qdf_nbuf_data_is_dns_query(nbuf)) ||
273 ((frame_mask & FRAME_MASK_DNS_RESP) &&
274 qdf_nbuf_data_is_dns_response(nbuf)))
275 return true;
276
277 return false;
278 }
279
280 /**
281 * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
282 * if matches mask
283 *
284 * @soc: Datapath soc handler
285 * @peer: pointer to DP peer
286 * @nbuf: pointer to the skb of RX frame
287 * @frame_mask: the mask for special frame needed
288 * @rx_tlv_hdr: start of rx tlv header
289 *
290 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
291 * single nbuf is expected.
292 *
293 * Return: true - nbuf has been delivered to stack, false - not.
294 */
295 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
296 qdf_nbuf_t nbuf, uint32_t frame_mask,
297 uint8_t *rx_tlv_hdr);
298 #else
299 static inline
dp_rx_is_special_frame(qdf_nbuf_t nbuf,uint32_t frame_mask)300 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
301 {
302 return false;
303 }
304
305 static inline
dp_rx_deliver_special_frame(struct dp_soc * soc,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint32_t frame_mask,uint8_t * rx_tlv_hdr)306 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
307 qdf_nbuf_t nbuf, uint32_t frame_mask,
308 uint8_t *rx_tlv_hdr)
309 {
310 return false;
311 }
312 #endif
313
314 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
315 /**
316 * dp_rx_data_is_specific() - Used to exclude specific frames
317 * not practical for getting rx
318 * stats like rate, mcs, nss, etc.
319 *
320 * @hal_soc_hdl: soc handler
321 * @rx_tlv_hdr: rx tlv header
322 * @nbuf: RX skb pointer
323 *
324 * Return: true - a specific frame not suitable
325 * for getting rx stats from it.
326 * false - a common frame suitable for
327 * getting rx stats from it.
328 */
329 static inline
dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)330 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
331 uint8_t *rx_tlv_hdr,
332 qdf_nbuf_t nbuf)
333 {
334 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf)))
335 return true;
336
337 if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr))
338 return true;
339
340 if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr))
341 return true;
342
343 /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */
344 if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
345 QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
346 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
347 return true;
348 } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
349 QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
350 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
351 return true;
352 } else {
353 return true;
354 }
355 return false;
356 }
357 #else
358 static inline
dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)359 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
360 uint8_t *rx_tlv_hdr,
361 qdf_nbuf_t nbuf)
362
363 {
364 /*
365 * default return is true to make sure that rx stats
366 * will not be handled when this feature is disabled
367 */
368 return true;
369 }
370 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
371
372 #ifndef QCA_HOST_MODE_WIFI_DISABLED
373 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
374 static inline
dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer * ta_txrx_peer,qdf_nbuf_t nbuf,uint8_t link_id)375 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
376 qdf_nbuf_t nbuf, uint8_t link_id)
377 {
378 if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
379 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
380 DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
381 rx.intra_bss.mdns_no_fwd,
382 1, link_id);
383 return false;
384 }
385 return true;
386 }
387 #else
388 static inline
dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer * ta_txrx_peer,qdf_nbuf_t nbuf,uint8_t link_id)389 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
390 qdf_nbuf_t nbuf, uint8_t link_id)
391 {
392 return true;
393 }
394 #endif
395 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
396
397 /* DOC: Offset to obtain LLC hdr
398 *
399 * In the case of Wifi parse error
400 * to reach LLC header from beginning
401 * of VLAN tag we need to skip 8 bytes.
402 * Vlan_tag(4)+length(2)+length added
403 * by HW(2) = 8 bytes.
404 */
405 #define DP_SKIP_VLAN 8
406
407 #ifndef QCA_HOST_MODE_WIFI_DISABLED
408
409 /**
410 * struct dp_rx_cached_buf - rx cached buffer
411 * @node: linked list node
412 * @buf: skb buffer
413 */
414 struct dp_rx_cached_buf {
415 qdf_list_node_t node;
416 qdf_nbuf_t buf;
417 };
418
419 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
420
421 /**
422 * dp_rx_xor_block() - xor block of data
423 * @b: destination data block
424 * @a: source data block
425 * @len: length of the data to process
426 *
427 * Return: None
428 */
dp_rx_xor_block(uint8_t * b,const uint8_t * a,qdf_size_t len)429 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
430 {
431 qdf_size_t i;
432
433 for (i = 0; i < len; i++)
434 b[i] ^= a[i];
435 }
436
437 /**
438 * dp_rx_rotl() - rotate the bits left
439 * @val: unsigned integer input value
440 * @bits: number of bits
441 *
442 * Return: Integer with left rotated by number of 'bits'
443 */
dp_rx_rotl(uint32_t val,int bits)444 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
445 {
446 return (val << bits) | (val >> (32 - bits));
447 }
448
449 /**
450 * dp_rx_rotr() - rotate the bits right
451 * @val: unsigned integer input value
452 * @bits: number of bits
453 *
454 * Return: Integer with right rotated by number of 'bits'
455 */
dp_rx_rotr(uint32_t val,int bits)456 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
457 {
458 return (val >> bits) | (val << (32 - bits));
459 }
460
461 /**
462 * dp_set_rx_queue() - set queue_mapping in skb
463 * @nbuf: skb
464 * @queue_id: rx queue_id
465 *
466 * Return: void
467 */
468 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
dp_set_rx_queue(qdf_nbuf_t nbuf,uint8_t queue_id)469 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
470 {
471 qdf_nbuf_record_rx_queue(nbuf, queue_id);
472 return;
473 }
474 #else
dp_set_rx_queue(qdf_nbuf_t nbuf,uint8_t queue_id)475 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
476 {
477 }
478 #endif
479
480 /**
481 * dp_rx_xswap() - swap the bits left
482 * @val: unsigned integer input value
483 *
484 * Return: Integer with bits swapped
485 */
dp_rx_xswap(uint32_t val)486 static inline uint32_t dp_rx_xswap(uint32_t val)
487 {
488 return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
489 }
490
491 /**
492 * dp_rx_get_le32_split() - get little endian 32 bits split
493 * @b0: byte 0
494 * @b1: byte 1
495 * @b2: byte 2
496 * @b3: byte 3
497 *
498 * Return: Integer with split little endian 32 bits
499 */
dp_rx_get_le32_split(uint8_t b0,uint8_t b1,uint8_t b2,uint8_t b3)500 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
501 uint8_t b3)
502 {
503 return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
504 }
505
506 /**
507 * dp_rx_get_le32() - get little endian 32 bits
508 * @p: source 32-bit value
509 *
510 * Return: Integer with little endian 32 bits
511 */
dp_rx_get_le32(const uint8_t * p)512 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
513 {
514 return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
515 }
516
517 /**
518 * dp_rx_put_le32() - put little endian 32 bits
519 * @p: destination char array
520 * @v: source 32-bit integer
521 *
522 * Return: None
523 */
dp_rx_put_le32(uint8_t * p,uint32_t v)524 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
525 {
526 p[0] = (v) & 0xff;
527 p[1] = (v >> 8) & 0xff;
528 p[2] = (v >> 16) & 0xff;
529 p[3] = (v >> 24) & 0xff;
530 }
531
532 /* Extract michal mic block of data */
533 #define dp_rx_michael_block(l, r) \
534 do { \
535 r ^= dp_rx_rotl(l, 17); \
536 l += r; \
537 r ^= dp_rx_xswap(l); \
538 l += r; \
539 r ^= dp_rx_rotl(l, 3); \
540 l += r; \
541 r ^= dp_rx_rotr(l, 2); \
542 l += r; \
543 } while (0)
544
545 /**
546 * struct dp_rx_desc_list_elem_t
547 *
548 * @next: Next pointer to form free list
549 * @rx_desc: DP Rx descriptor
550 */
551 union dp_rx_desc_list_elem_t {
552 union dp_rx_desc_list_elem_t *next;
553 struct dp_rx_desc rx_desc;
554 };
555
556 #ifdef RX_DESC_MULTI_PAGE_ALLOC
557 /**
558 * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
559 * @page_id: Page ID
560 * @offset: Offset of the descriptor element
561 * @rx_pool: RX pool
562 *
563 * Return: RX descriptor element
564 */
565 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
566 struct rx_desc_pool *rx_pool);
567
568 static inline
dp_get_rx_desc_from_cookie(struct dp_soc * soc,struct rx_desc_pool * pool,uint32_t cookie)569 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
570 struct rx_desc_pool *pool,
571 uint32_t cookie)
572 {
573 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
574 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
575 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
576 struct rx_desc_pool *rx_desc_pool;
577 union dp_rx_desc_list_elem_t *rx_desc_elem;
578
579 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
580 return NULL;
581
582 rx_desc_pool = &pool[pool_id];
583 rx_desc_elem = (union dp_rx_desc_list_elem_t *)
584 (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
585 rx_desc_pool->elem_size * offset);
586
587 return &rx_desc_elem->rx_desc;
588 }
589
590 static inline
dp_get_rx_mon_status_desc_from_cookie(struct dp_soc * soc,struct rx_desc_pool * pool,uint32_t cookie)591 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc,
592 struct rx_desc_pool *pool,
593 uint32_t cookie)
594 {
595 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
596 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
597 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
598 struct rx_desc_pool *rx_desc_pool;
599 union dp_rx_desc_list_elem_t *rx_desc_elem;
600
601 if (qdf_unlikely(pool_id >= NUM_RXDMA_STATUS_RINGS_PER_PDEV))
602 return NULL;
603
604 rx_desc_pool = &pool[pool_id];
605 rx_desc_elem = (union dp_rx_desc_list_elem_t *)
606 (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
607 rx_desc_pool->elem_size * offset);
608
609 return &rx_desc_elem->rx_desc;
610 }
611
612 /**
613 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
614 * the Rx descriptor on Rx DMA source ring buffer
615 * @soc: core txrx main context
616 * @cookie: cookie used to lookup virtual address
617 *
618 * Return: Pointer to the Rx descriptor
619 */
620 static inline
dp_rx_cookie_2_va_rxdma_buf(struct dp_soc * soc,uint32_t cookie)621 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
622 uint32_t cookie)
623 {
624 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
625 }
626
627 /**
628 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
629 * the Rx descriptor on monitor ring buffer
630 * @soc: core txrx main context
631 * @cookie: cookie used to lookup virtual address
632 *
633 * Return: Pointer to the Rx descriptor
634 */
635 static inline
dp_rx_cookie_2_va_mon_buf(struct dp_soc * soc,uint32_t cookie)636 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
637 uint32_t cookie)
638 {
639 return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
640 }
641
642 /**
643 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
644 * the Rx descriptor on monitor status ring buffer
645 * @soc: core txrx main context
646 * @cookie: cookie used to lookup virtual address
647 *
648 * Return: Pointer to the Rx descriptor
649 */
650 static inline
dp_rx_cookie_2_va_mon_status(struct dp_soc * soc,uint32_t cookie)651 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
652 uint32_t cookie)
653 {
654 return dp_get_rx_mon_status_desc_from_cookie(soc,
655 &soc->rx_desc_status[0],
656 cookie);
657 }
658 #else
659
660 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
661 uint32_t pool_size,
662 struct rx_desc_pool *rx_desc_pool);
663
664 /**
665 * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
666 * the Rx descriptor on Rx DMA source ring buffer
667 * @soc: core txrx main context
668 * @cookie: cookie used to lookup virtual address
669 *
670 * Return: void *: Virtual Address of the Rx descriptor
671 */
672 static inline
dp_rx_cookie_2_va_rxdma_buf(struct dp_soc * soc,uint32_t cookie)673 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
674 {
675 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
676 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
677 struct rx_desc_pool *rx_desc_pool;
678
679 if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
680 return NULL;
681
682 rx_desc_pool = &soc->rx_desc_buf[pool_id];
683
684 if (qdf_unlikely(index >= rx_desc_pool->pool_size))
685 return NULL;
686
687 return &rx_desc_pool->array[index].rx_desc;
688 }
689
690 /**
691 * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
692 * the Rx descriptor on monitor ring buffer
693 * @soc: core txrx main context
694 * @cookie: cookie used to lookup virtual address
695 *
696 * Return: void *: Virtual Address of the Rx descriptor
697 */
698 static inline
dp_rx_cookie_2_va_mon_buf(struct dp_soc * soc,uint32_t cookie)699 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
700 {
701 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
702 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
703 /* TODO */
704 /* Add sanity for pool_id & index */
705 return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
706 }
707
708 /**
709 * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
710 * the Rx descriptor on monitor status ring buffer
711 * @soc: core txrx main context
712 * @cookie: cookie used to lookup virtual address
713 *
714 * Return: void *: Virtual Address of the Rx descriptor
715 */
716 static inline
dp_rx_cookie_2_va_mon_status(struct dp_soc * soc,uint32_t cookie)717 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
718 {
719 uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
720 uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
721 /* TODO */
722 /* Add sanity for pool_id & index */
723 return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
724 }
725 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
726
727 #ifndef QCA_HOST_MODE_WIFI_DISABLED
728
dp_rx_check_ap_bridge(struct dp_vdev * vdev)729 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
730 {
731 return vdev->ap_bridge_enabled;
732 }
733
734 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
735 static inline QDF_STATUS
dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)736 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
737 {
738 if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
739 return QDF_STATUS_E_FAILURE;
740
741 HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
742 return QDF_STATUS_SUCCESS;
743 }
744
745 /**
746 * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
747 * field in ring descriptor
748 * @ring_desc: ring descriptor
749 *
750 * Return: None
751 */
752 static inline void
dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)753 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
754 {
755 HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
756 }
757 #else
758 static inline QDF_STATUS
dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)759 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
760 {
761 return QDF_STATUS_SUCCESS;
762 }
763
764 static inline void
dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)765 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
766 {
767 }
768 #endif
769
770 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
771
772 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \
773 defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE)
774 /**
775 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
776 * @soc: dp soc ref
777 * @cookie: Rx buf SW cookie value
778 *
779 * Return: true if cookie is valid else false
780 */
dp_rx_is_sw_cookie_valid(struct dp_soc * soc,uint32_t cookie)781 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
782 uint32_t cookie)
783 {
784 uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
785 uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
786 uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
787 struct rx_desc_pool *rx_desc_pool;
788
789 if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
790 goto fail;
791
792 rx_desc_pool = &soc->rx_desc_buf[pool_id];
793
794 if (page_id >= rx_desc_pool->desc_pages.num_pages ||
795 offset >= rx_desc_pool->desc_pages.num_element_per_page)
796 goto fail;
797
798 return true;
799
800 fail:
801 DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
802 return false;
803 }
804 #else
805 /**
806 * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
807 * @soc: dp soc ref
808 * @cookie: Rx buf SW cookie value
809 *
810 * When multi page alloc is disabled SW cookie validness is
811 * checked while fetching Rx descriptor, so no need to check here
812 *
813 * Return: true if cookie is valid else false
814 */
dp_rx_is_sw_cookie_valid(struct dp_soc * soc,uint32_t cookie)815 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
816 uint32_t cookie)
817 {
818 return true;
819 }
820 #endif
821
822 /**
823 * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
824 * rx descriptor pool
825 * @rx_desc_pool: rx descriptor pool pointer
826 *
827 * Return: QDF_STATUS QDF_STATUS_SUCCESS
828 * QDF_STATUS_E_NOMEM
829 */
830 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
831
832 /**
833 * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
834 * descriptors
835 * @soc: core txrx main context
836 * @pool_size: number of rx descriptors (size of the pool)
837 * @rx_desc_pool: rx descriptor pool pointer
838 *
839 * Return: QDF_STATUS QDF_STATUS_SUCCESS
840 * QDF_STATUS_E_NOMEM
841 * QDF_STATUS_E_FAULT
842 */
843 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
844 uint32_t pool_size,
845 struct rx_desc_pool *rx_desc_pool);
846
847 /**
848 * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
849 * @soc: core txrx main context
850 * @pool_id: pool_id which is one of 3 mac_ids
851 * @pool_size: size of the rx descriptor pool
852 * @rx_desc_pool: rx descriptor pool pointer
853 *
854 * Convert the pool of memory into a list of rx descriptors and create
855 * locks to access this list of rx descriptors.
856 *
857 */
858 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
859 uint32_t pool_size,
860 struct rx_desc_pool *rx_desc_pool);
861
862 /**
863 * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
864 * freelist.
865 * @soc: core txrx main context
866 * @local_desc_list: local desc list provided by the caller
867 * @tail: attach the point to last desc of local desc list
868 * @pool_id: pool_id which is one of 3 mac_ids
869 * @rx_desc_pool: rx descriptor pool pointer
870 */
871 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
872 union dp_rx_desc_list_elem_t **local_desc_list,
873 union dp_rx_desc_list_elem_t **tail,
874 uint16_t pool_id,
875 struct rx_desc_pool *rx_desc_pool);
876
877 /**
878 * dp_rx_get_free_desc_list() - provide a list of descriptors from
879 * the free rx desc pool.
880 * @soc: core txrx main context
881 * @pool_id: pool_id which is one of 3 mac_ids
882 * @rx_desc_pool: rx descriptor pool pointer
883 * @num_descs: number of descs requested from freelist
884 * @desc_list: attach the descs to this list (output parameter)
885 * @tail: attach the point to last desc of free list (output parameter)
886 *
887 * Return: number of descs allocated from free list.
888 */
889 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
890 struct rx_desc_pool *rx_desc_pool,
891 uint16_t num_descs,
892 union dp_rx_desc_list_elem_t **desc_list,
893 union dp_rx_desc_list_elem_t **tail);
894
895 /**
896 * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
897 * pool
898 * @pdev: core txrx pdev context
899 *
900 * Return: QDF_STATUS - QDF_STATUS_SUCCESS
901 * QDF_STATUS_E_NOMEM
902 */
903 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
904
905 /**
906 * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
907 * @pdev: core txrx pdev context
908 */
909 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
910
911 /**
912 * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
913 * @pdev: core txrx pdev context
914 *
915 * Return: QDF_STATUS - QDF_STATUS_SUCCESS
916 * QDF_STATUS_E_NOMEM
917 */
918 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
919
920 /**
921 * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
922 * @pdev: core txrx pdev context
923 *
924 * This function resets the freelist of rx descriptors and destroys locks
925 * associated with this list of descriptors.
926 */
927 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
928
929 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
930 struct rx_desc_pool *rx_desc_pool,
931 uint32_t pool_id);
932
933 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
934
935 /**
936 * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
937 * @pdev: core txrx pdev context
938 *
939 * Return: QDF_STATUS - QDF_STATUS_SUCCESS
940 * QDF_STATUS_E_NOMEM
941 */
942 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
943
944 /**
945 * dp_rx_pdev_buffers_free() - Free nbufs (skbs)
946 * @pdev: core txrx pdev context
947 */
948 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
949
950 void dp_rx_pdev_detach(struct dp_pdev *pdev);
951
952 /**
953 * dp_print_napi_stats() - NAPI stats
954 * @soc: soc handle
955 */
956 void dp_print_napi_stats(struct dp_soc *soc);
957
958 /**
959 * dp_rx_vdev_detach() - detach vdev from dp rx
960 * @vdev: virtual device instance
961 *
962 * Return: QDF_STATUS_SUCCESS: success
963 * QDF_STATUS_E_RESOURCES: Error return
964 */
965 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
966
967 #ifndef QCA_HOST_MODE_WIFI_DISABLED
968
969 uint32_t
970 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
971 uint8_t reo_ring_num,
972 uint32_t quota);
973
974 /**
975 * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
976 * multiple nbufs.
977 * @soc: core txrx main context
978 * @nbuf: pointer to the first msdu of an amsdu.
979 *
980 * This function implements the creation of RX frag_list for cases
981 * where an MSDU is spread across multiple nbufs.
982 *
983 * Return: returns the head nbuf which contains complete frag_list.
984 */
985 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
986
987 /**
988 * dp_rx_is_sg_supported() - SG packets processing supported or not.
989 *
990 * Return: returns true when processing is supported else false.
991 */
992 bool dp_rx_is_sg_supported(void);
993
994 /**
995 * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
996 * de-initialization of wifi module.
997 *
998 * @soc: core txrx main context
999 * @pool_id: pool_id which is one of 3 mac_ids
1000 * @rx_desc_pool: rx descriptor pool pointer
1001 *
1002 * Return: None
1003 */
1004 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
1005 struct rx_desc_pool *rx_desc_pool);
1006
1007 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1008
1009 /**
1010 * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
1011 * de-initialization of wifi module.
1012 *
1013 * @soc: core txrx main context
1014 * @rx_desc_pool: rx descriptor pool pointer
1015 * @is_mon_pool: true if this is a monitor pool
1016 *
1017 * Return: None
1018 */
1019 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
1020 struct rx_desc_pool *rx_desc_pool,
1021 bool is_mon_pool);
1022
1023 #ifdef DP_RX_MON_MEM_FRAG
1024 /**
1025 * dp_rx_desc_frag_free() - free the sw rx desc frag called during
1026 * de-initialization of wifi module.
1027 *
1028 * @soc: core txrx main context
1029 * @rx_desc_pool: rx descriptor pool pointer
1030 *
1031 * Return: None
1032 */
1033 void dp_rx_desc_frag_free(struct dp_soc *soc,
1034 struct rx_desc_pool *rx_desc_pool);
1035 #else
1036 static inline
dp_rx_desc_frag_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)1037 void dp_rx_desc_frag_free(struct dp_soc *soc,
1038 struct rx_desc_pool *rx_desc_pool)
1039 {
1040 }
1041 #endif
1042 /**
1043 * dp_rx_desc_pool_free() - free the sw rx desc array called during
1044 * de-initialization of wifi module.
1045 *
1046 * @soc: core txrx main context
1047 * @rx_desc_pool: rx descriptor pool pointer
1048 *
1049 * Return: None
1050 */
1051 void dp_rx_desc_pool_free(struct dp_soc *soc,
1052 struct rx_desc_pool *rx_desc_pool);
1053
1054 /**
1055 * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
1056 * pkts to RAW mode simulation to
1057 * decapsulate the pkt.
1058 * @vdev: vdev on which RAW mode is enabled
1059 * @nbuf_list: list of RAW pkts to process
1060 * @peer: peer object from which the pkt is rx
1061 * @link_id: link Id on which the packet is received
1062 *
1063 * Return: void
1064 */
1065 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
1066 struct dp_txrx_peer *peer, uint8_t link_id);
1067
1068 #ifdef RX_DESC_LOGGING
1069 /**
1070 * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
1071 * structure
1072 * @rx_desc: rx descriptor pointer
1073 *
1074 * Return: None
1075 */
1076 static inline
dp_rx_desc_alloc_dbg_info(struct dp_rx_desc * rx_desc)1077 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1078 {
1079 rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
1080 }
1081
1082 /**
1083 * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
1084 * structure memory
1085 * @rx_desc: rx descriptor pointer
1086 *
1087 * Return: None
1088 */
1089 static inline
dp_rx_desc_free_dbg_info(struct dp_rx_desc * rx_desc)1090 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1091 {
1092 qdf_mem_free(rx_desc->dbg_info);
1093 }
1094
1095 /**
1096 * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
1097 * structure memory
1098 * @rx_desc: rx descriptor pointer
1099 * @func_name: name of calling function
1100 * @flag:
1101 *
1102 * Return: None
1103 */
1104 static
dp_rx_desc_update_dbg_info(struct dp_rx_desc * rx_desc,const char * func_name,uint8_t flag)1105 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1106 const char *func_name, uint8_t flag)
1107 {
1108 struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
1109
1110 if (!info)
1111 return;
1112
1113 if (flag == RX_DESC_REPLENISHED) {
1114 qdf_str_lcopy(info->replenish_caller, func_name,
1115 QDF_MEM_FUNC_NAME_SIZE);
1116 info->replenish_ts = qdf_get_log_timestamp();
1117 } else {
1118 qdf_str_lcopy(info->freelist_caller, func_name,
1119 QDF_MEM_FUNC_NAME_SIZE);
1120 info->freelist_ts = qdf_get_log_timestamp();
1121 info->prev_nbuf = rx_desc->nbuf;
1122 info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
1123 rx_desc->nbuf_data_addr = NULL;
1124 }
1125 }
1126 #else
1127
1128 static inline
dp_rx_desc_alloc_dbg_info(struct dp_rx_desc * rx_desc)1129 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1130 {
1131 }
1132
1133 static inline
dp_rx_desc_free_dbg_info(struct dp_rx_desc * rx_desc)1134 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1135 {
1136 }
1137
1138 static inline
dp_rx_desc_update_dbg_info(struct dp_rx_desc * rx_desc,const char * func_name,uint8_t flag)1139 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1140 const char *func_name, uint8_t flag)
1141 {
1142 }
1143 #endif /* RX_DESC_LOGGING */
1144
1145 /**
1146 * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
1147 *
1148 * @head: pointer to the head of local free list
1149 * @tail: pointer to the tail of local free list
1150 * @new: new descriptor that is added to the free list
1151 * @func_name: caller func name
1152 *
1153 * Return: void:
1154 */
1155 static inline
__dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,struct dp_rx_desc * new,const char * func_name)1156 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
1157 union dp_rx_desc_list_elem_t **tail,
1158 struct dp_rx_desc *new, const char *func_name)
1159 {
1160 qdf_assert(head && new);
1161
1162 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1163
1164 new->nbuf = NULL;
1165 new->in_use = 0;
1166
1167 ((union dp_rx_desc_list_elem_t *)new)->next = *head;
1168 *head = (union dp_rx_desc_list_elem_t *)new;
1169 /* reset tail if head->next is NULL */
1170 if (!*tail || !(*head)->next)
1171 *tail = *head;
1172 }
1173
1174 /**
1175 * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
1176 * @soc: DP SOC handle
1177 * @nbuf: network buffer
1178 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1179 * pool_id has same mapping)
1180 *
1181 * Return: integer type
1182 */
1183 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1184 uint8_t mac_id);
1185
1186 /**
1187 * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
1188 * @soc: DP SOC handle
1189 * @mpdu: mpdu for which peer is invalid
1190 * @mpdu_done: if an mpdu is completed
1191 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1192 * pool_id has same mapping)
1193 *
1194 * Return: integer type
1195 */
1196 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1197 qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
1198
1199 #define DP_RX_HEAD_APPEND(head, elem) \
1200 do { \
1201 qdf_nbuf_set_next((elem), (head)); \
1202 (head) = (elem); \
1203 } while (0)
1204
1205
1206 #define DP_RX_LIST_APPEND(head, tail, elem) \
1207 do { \
1208 if (!(head)) { \
1209 (head) = (elem); \
1210 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
1211 } else { \
1212 qdf_nbuf_set_next((tail), (elem)); \
1213 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \
1214 } \
1215 (tail) = (elem); \
1216 qdf_nbuf_set_next((tail), NULL); \
1217 } while (0)
1218
1219 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
1220 do { \
1221 if (!(phead)) { \
1222 (phead) = (chead); \
1223 } else { \
1224 qdf_nbuf_set_next((ptail), (chead)); \
1225 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
1226 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \
1227 } \
1228 (ptail) = (ctail); \
1229 qdf_nbuf_set_next((ptail), NULL); \
1230 } while (0)
1231
1232 #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM)
1233 /*
1234 * on some third-party platform, the memory below 0x2000
1235 * is reserved for target use, so any memory allocated in this
1236 * region should not be used by host
1237 */
1238 #define MAX_RETRY 50
1239 #define DP_PHY_ADDR_RESERVED 0x2000
1240 #elif defined(BUILD_X86)
1241 /*
1242 * in M2M emulation platforms (x86) the memory below 0x50000000
1243 * is reserved for target use, so any memory allocated in this
1244 * region should not be used by host
1245 */
1246 #define MAX_RETRY 100
1247 #define DP_PHY_ADDR_RESERVED 0x50000000
1248 #endif
1249
1250 #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM) || defined(BUILD_X86)
1251 /**
1252 * dp_check_paddr() - check if current phy address is valid or not
1253 * @dp_soc: core txrx main context
1254 * @rx_netbuf: skb buffer
1255 * @paddr: physical address
1256 * @rx_desc_pool: struct of rx descriptor pool
1257 * check if the physical address of the nbuf->data is less
1258 * than DP_PHY_ADDR_RESERVED then free the nbuf and try
1259 * allocating new nbuf. We can try for 100 times.
1260 *
1261 * This is a temp WAR till we fix it properly.
1262 *
1263 * Return: success or failure.
1264 */
1265 static inline
dp_check_paddr(struct dp_soc * dp_soc,qdf_nbuf_t * rx_netbuf,qdf_dma_addr_t * paddr,struct rx_desc_pool * rx_desc_pool)1266 int dp_check_paddr(struct dp_soc *dp_soc,
1267 qdf_nbuf_t *rx_netbuf,
1268 qdf_dma_addr_t *paddr,
1269 struct rx_desc_pool *rx_desc_pool)
1270 {
1271 uint32_t nbuf_retry = 0;
1272 int32_t ret;
1273
1274 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1275 return QDF_STATUS_SUCCESS;
1276
1277 do {
1278 dp_debug("invalid phy addr 0x%llx, trying again",
1279 (uint64_t)(*paddr));
1280 nbuf_retry++;
1281 if ((*rx_netbuf)) {
1282 /* Not freeing buffer intentionally.
1283 * Observed that same buffer is getting
1284 * re-allocated resulting in longer load time
1285 * WMI init timeout.
1286 * This buffer is anyway not useful so skip it.
1287 *.Add such buffer to invalid list and free
1288 *.them when driver unload.
1289 **/
1290 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1291 *rx_netbuf,
1292 QDF_DMA_FROM_DEVICE,
1293 rx_desc_pool->buf_size);
1294 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1295 *rx_netbuf);
1296 }
1297
1298 *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
1299 rx_desc_pool->buf_size,
1300 RX_BUFFER_RESERVATION,
1301 rx_desc_pool->buf_alignment,
1302 FALSE);
1303
1304 if (qdf_unlikely(!(*rx_netbuf)))
1305 return QDF_STATUS_E_FAILURE;
1306
1307 ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
1308 *rx_netbuf,
1309 QDF_DMA_FROM_DEVICE,
1310 rx_desc_pool->buf_size);
1311
1312 if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1313 qdf_nbuf_free(*rx_netbuf);
1314 *rx_netbuf = NULL;
1315 continue;
1316 }
1317
1318 *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
1319
1320 if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1321 return QDF_STATUS_SUCCESS;
1322
1323 } while (nbuf_retry < MAX_RETRY);
1324
1325 if ((*rx_netbuf)) {
1326 qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1327 *rx_netbuf,
1328 QDF_DMA_FROM_DEVICE,
1329 rx_desc_pool->buf_size);
1330 qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1331 *rx_netbuf);
1332 }
1333
1334 return QDF_STATUS_E_FAILURE;
1335 }
1336
1337 #else
1338 static inline
dp_check_paddr(struct dp_soc * dp_soc,qdf_nbuf_t * rx_netbuf,qdf_dma_addr_t * paddr,struct rx_desc_pool * rx_desc_pool)1339 int dp_check_paddr(struct dp_soc *dp_soc,
1340 qdf_nbuf_t *rx_netbuf,
1341 qdf_dma_addr_t *paddr,
1342 struct rx_desc_pool *rx_desc_pool)
1343 {
1344 return QDF_STATUS_SUCCESS;
1345 }
1346
1347 #endif
1348
1349 /**
1350 * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
1351 * the MSDU Link Descriptor
1352 * @soc: core txrx main context
1353 * @buf_info: buf_info includes cookie that is used to lookup
1354 * virtual address of link descriptor after deriving the page id
1355 * and the offset or index of the desc on the associatde page.
1356 *
1357 * This is the VA of the link descriptor, that HAL layer later uses to
1358 * retrieve the list of MSDU's for a given MPDU.
1359 *
1360 * Return: void *: Virtual Address of the Rx descriptor
1361 */
1362 static inline
dp_rx_cookie_2_link_desc_va(struct dp_soc * soc,struct hal_buf_info * buf_info)1363 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
1364 struct hal_buf_info *buf_info)
1365 {
1366 void *link_desc_va;
1367 struct qdf_mem_multi_page_t *pages;
1368 uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1369
1370 pages = &soc->link_desc_pages;
1371 if (!pages)
1372 return NULL;
1373 if (qdf_unlikely(page_id >= pages->num_pages))
1374 return NULL;
1375 link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1376 (buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1377 return link_desc_va;
1378 }
1379
1380 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1381 #ifdef DISABLE_EAPOL_INTRABSS_FWD
1382 #ifdef WLAN_FEATURE_11BE_MLO
dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1383 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1384 qdf_nbuf_t nbuf)
1385 {
1386 struct qdf_mac_addr *self_mld_mac_addr =
1387 (struct qdf_mac_addr *)vdev->mld_mac_addr.raw;
1388 return qdf_is_macaddr_equal(self_mld_mac_addr,
1389 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1390 QDF_NBUF_DEST_MAC_OFFSET);
1391 }
1392 #else
dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1393 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1394 qdf_nbuf_t nbuf)
1395 {
1396 return false;
1397 }
1398 #endif
1399
dp_nbuf_dst_addr_is_self_addr(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1400 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
1401 qdf_nbuf_t nbuf)
1402 {
1403 return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw,
1404 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1405 QDF_NBUF_DEST_MAC_OFFSET);
1406 }
1407
1408 /**
1409 * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
1410 * pkt with DA not equal to vdev mac addr, fwd is not allowed.
1411 * @soc: core txrx main context
1412 * @ta_txrx_peer: source peer entry
1413 * @rx_tlv_hdr: start address of rx tlvs
1414 * @nbuf: nbuf that has to be intrabss forwarded
1415 *
1416 * Return: true if it is forwarded else false
1417 */
1418 static inline
dp_rx_intrabss_eapol_drop_check(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)1419 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1420 struct dp_txrx_peer *ta_txrx_peer,
1421 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1422 {
1423 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
1424 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev,
1425 nbuf) ||
1426 dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev,
1427 nbuf)))) {
1428 qdf_nbuf_free(nbuf);
1429 DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
1430 return true;
1431 }
1432
1433 return false;
1434 }
1435 #else /* DISABLE_EAPOL_INTRABSS_FWD */
1436
1437 static inline
dp_rx_intrabss_eapol_drop_check(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)1438 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1439 struct dp_txrx_peer *ta_txrx_peer,
1440 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1441 {
1442 return false;
1443 }
1444 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
1445
1446 /**
1447 * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
1448 * @soc: core txrx main context
1449 * @ta_peer: source peer entry
1450 * @rx_tlv_hdr: start address of rx tlvs
1451 * @nbuf: nbuf that has to be intrabss forwarded
1452 * @tid_stats: tid stats pointer
1453 * @link_id: link Id on which packet is received
1454 *
1455 * Return: bool: true if it is forwarded else false
1456 */
1457 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
1458 struct dp_txrx_peer *ta_peer,
1459 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1460 struct cdp_tid_rx_stats *tid_stats,
1461 uint8_t link_id);
1462
1463 /**
1464 * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
1465 * @soc: core txrx main context
1466 * @ta_peer: source peer entry
1467 * @tx_vdev_id: VDEV ID for Intra-BSS TX
1468 * @rx_tlv_hdr: start address of rx tlvs
1469 * @nbuf: nbuf that has to be intrabss forwarded
1470 * @tid_stats: tid stats pointer
1471 * @link_id: link Id on which packet is received
1472 *
1473 * Return: bool: true if it is forwarded else false
1474 */
1475 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
1476 struct dp_txrx_peer *ta_peer,
1477 uint8_t tx_vdev_id,
1478 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1479 struct cdp_tid_rx_stats *tid_stats,
1480 uint8_t link_id);
1481
1482 /**
1483 * dp_rx_defrag_concat() - Concatenate the fragments
1484 *
1485 * @dst: destination pointer to the buffer
1486 * @src: source pointer from where the fragment payload is to be copied
1487 *
1488 * Return: QDF_STATUS
1489 */
dp_rx_defrag_concat(qdf_nbuf_t dst,qdf_nbuf_t src)1490 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1491 {
1492 /*
1493 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1494 * to provide space for src, the headroom portion is copied from
1495 * the original dst buffer to the larger new dst buffer.
1496 * (This is needed, because the headroom of the dst buffer
1497 * contains the rx desc.)
1498 */
1499 if (!qdf_nbuf_cat(dst, src)) {
1500 /*
1501 * qdf_nbuf_cat does not free the src memory.
1502 * Free src nbuf before returning
1503 * For failure case the caller takes of freeing the nbuf
1504 */
1505 qdf_nbuf_free(src);
1506 return QDF_STATUS_SUCCESS;
1507 }
1508
1509 return QDF_STATUS_E_DEFRAG_ERROR;
1510 }
1511
1512 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1513
1514 #ifndef FEATURE_WDS
1515 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1516 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
1517
dp_rx_ast_set_active(struct dp_soc * soc,uint16_t sa_idx,bool is_active)1518 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1519 {
1520 return QDF_STATUS_SUCCESS;
1521 }
1522
1523 static inline void
dp_rx_wds_srcport_learn(struct dp_soc * soc,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,struct hal_rx_msdu_metadata msdu_metadata)1524 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1525 uint8_t *rx_tlv_hdr,
1526 struct dp_txrx_peer *txrx_peer,
1527 qdf_nbuf_t nbuf,
1528 struct hal_rx_msdu_metadata msdu_metadata)
1529 {
1530 }
1531
1532 static inline void
dp_rx_ipa_wds_srcport_learn(struct dp_soc * soc,struct dp_peer * ta_peer,qdf_nbuf_t nbuf,struct hal_rx_msdu_metadata msdu_end_info,bool ad4_valid,bool chfrag_start)1533 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc,
1534 struct dp_peer *ta_peer, qdf_nbuf_t nbuf,
1535 struct hal_rx_msdu_metadata msdu_end_info,
1536 bool ad4_valid, bool chfrag_start)
1537 {
1538 }
1539 #endif
1540
1541 /**
1542 * dp_rx_desc_dump() - dump the sw rx descriptor
1543 *
1544 * @rx_desc: sw rx descriptor
1545 */
dp_rx_desc_dump(struct dp_rx_desc * rx_desc)1546 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1547 {
1548 dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1549 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1550 rx_desc->in_use, rx_desc->unmapped);
1551 }
1552
1553 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1554
1555 /**
1556 * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1557 * In qwrap mode, packets originated from
1558 * any vdev should not loopback and
1559 * should be dropped.
1560 * @vdev: vdev on which rx packet is received
1561 * @nbuf: rx pkt
1562 *
1563 */
1564 #if ATH_SUPPORT_WRAP
check_qwrap_multicast_loopback(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1565 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1566 qdf_nbuf_t nbuf)
1567 {
1568 struct dp_vdev *psta_vdev;
1569 struct dp_pdev *pdev = vdev->pdev;
1570 uint8_t *data = qdf_nbuf_data(nbuf);
1571
1572 if (qdf_unlikely(vdev->proxysta_vdev)) {
1573 /* In qwrap isolation mode, allow loopback packets as all
1574 * packets go to RootAP and Loopback on the mpsta.
1575 */
1576 if (vdev->isolation_vdev)
1577 return false;
1578 TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1579 if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1580 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1581 &data[QDF_MAC_ADDR_SIZE],
1582 QDF_MAC_ADDR_SIZE))) {
1583 /* Drop packet if source address is equal to
1584 * any of the vdev addresses.
1585 */
1586 return true;
1587 }
1588 }
1589 }
1590 return false;
1591 }
1592 #else
check_qwrap_multicast_loopback(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1593 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1594 qdf_nbuf_t nbuf)
1595 {
1596 return false;
1597 }
1598 #endif
1599
1600 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1601
1602 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1603 defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1604 defined(WLAN_SUPPORT_RX_FLOW_TAG)
1605 #include "dp_rx_tag.h"
1606 #endif
1607
1608 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1609 !defined(WLAN_SUPPORT_RX_FLOW_TAG)
1610 /**
1611 * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1612 * and set the corresponding tag in QDF packet
1613 * @soc: core txrx main context
1614 * @vdev: vdev on which the packet is received
1615 * @nbuf: QDF pkt buffer on which the protocol tag should be set
1616 * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1617 * @ring_index: REO ring number, not used for error & monitor ring
1618 * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1619 * @is_update_stats: flag to indicate whether to update stats or not
1620 *
1621 * Return: void
1622 */
1623 static inline void
dp_rx_update_protocol_tag(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint16_t ring_index,bool is_reo_exception,bool is_update_stats)1624 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1625 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1626 uint16_t ring_index,
1627 bool is_reo_exception, bool is_update_stats)
1628 {
1629 }
1630 #endif
1631
1632 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1633 /**
1634 * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
1635 * and returns whether cce metadata matches
1636 * @soc: core txrx main context
1637 * @vdev: vdev on which the packet is received
1638 * @nbuf: QDF pkt buffer on which the protocol tag should be set
1639 * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1640 *
1641 * Return: bool
1642 */
1643 static inline bool
dp_rx_err_cce_drop(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)1644 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
1645 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1646 {
1647 return false;
1648 }
1649
1650 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1651
1652 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1653 /**
1654 * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1655 * and set the corresponding tag in QDF packet
1656 * @soc: core txrx main context
1657 * @vdev: vdev on which the packet is received
1658 * @nbuf: QDF pkt buffer on which the protocol tag should be set
1659 * @rx_tlv_hdr: base address where the RX TLVs starts
1660 * @update_stats: flag to indicate whether to update stats or not
1661 *
1662 * Return: void
1663 */
1664 static inline void
dp_rx_update_flow_tag(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,bool update_stats)1665 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1666 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1667 {
1668 }
1669 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1670
1671 #define CRITICAL_BUFFER_THRESHOLD 64
1672 /**
1673 * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1674 * called during dp rx initialization
1675 * and at the end of dp_rx_process.
1676 *
1677 * @dp_soc: core txrx main context
1678 * @mac_id: mac_id which is one of 3 mac_ids
1679 * @dp_rxdma_srng: dp rxdma circular ring
1680 * @rx_desc_pool: Pointer to free Rx descriptor pool
1681 * @num_req_buffers: number of buffer to be replenished
1682 * @desc_list: list of descs if called from dp_rx_process
1683 * or NULL during dp rx initialization or out of buffer
1684 * interrupt.
1685 * @tail: tail of descs list
1686 * @req_only: If true don't replenish more than req buffers
1687 * @force_replenish: replenish full ring without limit check this
1688 * this field will be considered only when desc_list
1689 * is NULL and req_only is false
1690 * @func_name: name of the caller function
1691 *
1692 * Return: return success or failure
1693 */
1694 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1695 struct dp_srng *dp_rxdma_srng,
1696 struct rx_desc_pool *rx_desc_pool,
1697 uint32_t num_req_buffers,
1698 union dp_rx_desc_list_elem_t **desc_list,
1699 union dp_rx_desc_list_elem_t **tail,
1700 bool req_only,
1701 bool force_replenish,
1702 const char *func_name);
1703
1704 /**
1705 * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
1706 * use direct APIs to get invalidate
1707 * and get the physical address of the
1708 * nbuf instead of map api,called during
1709 * dp rx initialization and at the end
1710 * of dp_rx_process.
1711 *
1712 * @dp_soc: core txrx main context
1713 * @mac_id: mac_id which is one of 3 mac_ids
1714 * @dp_rxdma_srng: dp rxdma circular ring
1715 * @rx_desc_pool: Pointer to free Rx descriptor pool
1716 * @num_req_buffers: number of buffer to be replenished
1717 * @desc_list: list of descs if called from dp_rx_process
1718 * or NULL during dp rx initialization or out of buffer
1719 * interrupt.
1720 * @tail: tail of descs list
1721 *
1722 * Return: return success or failure
1723 */
1724 QDF_STATUS
1725 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1726 struct dp_srng *dp_rxdma_srng,
1727 struct rx_desc_pool *rx_desc_pool,
1728 uint32_t num_req_buffers,
1729 union dp_rx_desc_list_elem_t **desc_list,
1730 union dp_rx_desc_list_elem_t **tail);
1731
1732 /**
1733 * __dp_rx_comp2refill_replenish() - replenish rxdma ring with rx nbufs
1734 * use direct APIs to get invalidate
1735 * and get the physical address of the
1736 * nbuf instead of map api,called during
1737 * dp rx initialization and at the end
1738 * of dp_rx_process.
1739 *
1740 * @dp_soc: core txrx main context
1741 * @mac_id: mac_id which is one of 3 mac_ids
1742 * @dp_rxdma_srng: dp rxdma circular ring
1743 * @rx_desc_pool: Pointer to free Rx descriptor pool
1744 * @num_req_buffers: number of buffer to be replenished
1745 * @desc_list: list of descs if called from dp_rx_process
1746 * or NULL during dp rx initialization or out of buffer
1747 * interrupt.
1748 * @tail: tail of descs list
1749 * Return: return success or failure
1750 */
1751 QDF_STATUS
1752 __dp_rx_comp2refill_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1753 struct dp_srng *dp_rxdma_srng,
1754 struct rx_desc_pool *rx_desc_pool,
1755 uint32_t num_req_buffers,
1756 union dp_rx_desc_list_elem_t **desc_list,
1757 union dp_rx_desc_list_elem_t **tail);
1758
1759 /**
1760 * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs
1761 * use direct APIs to get invalidate
1762 * and get the physical address of the
1763 * nbuf instead of map api,called when
1764 * low threshold interrupt is triggered
1765 *
1766 * @dp_soc: core txrx main context
1767 * @mac_id: mac_id which is one of 3 mac_ids
1768 * @dp_rxdma_srng: dp rxdma circular ring
1769 * @rx_desc_pool: Pointer to free Rx descriptor pool
1770 * @force_replenish: Force replenish the ring fully
1771 *
1772 * Return: return success or failure
1773 */
1774 QDF_STATUS
1775 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1776 struct dp_srng *dp_rxdma_srng,
1777 struct rx_desc_pool *rx_desc_pool,
1778 bool force_replenish);
1779
1780 /**
1781 * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
1782 * use direct APIs to get invalidate
1783 * and get the physical address of the
1784 * nbuf instead of map api,called during
1785 * dp rx initialization.
1786 *
1787 * @dp_soc: core txrx main context
1788 * @mac_id: mac_id which is one of 3 mac_ids
1789 * @dp_rxdma_srng: dp rxdma circular ring
1790 * @rx_desc_pool: Pointer to free Rx descriptor pool
1791 * @num_req_buffers: number of buffer to be replenished
1792 *
1793 * Return: return success or failure
1794 */
1795 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc,
1796 uint32_t mac_id,
1797 struct dp_srng *dp_rxdma_srng,
1798 struct rx_desc_pool *rx_desc_pool,
1799 uint32_t num_req_buffers);
1800
1801 /**
1802 * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1803 * called during dp rx initialization
1804 *
1805 * @dp_soc: core txrx main context
1806 * @mac_id: mac_id which is one of 3 mac_ids
1807 * @dp_rxdma_srng: dp rxdma circular ring
1808 * @rx_desc_pool: Pointer to free Rx descriptor pool
1809 * @num_req_buffers: number of buffer to be replenished
1810 *
1811 * Return: return success or failure
1812 */
1813 QDF_STATUS
1814 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1815 struct dp_srng *dp_rxdma_srng,
1816 struct rx_desc_pool *rx_desc_pool,
1817 uint32_t num_req_buffers);
1818
1819 /**
1820 * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
1821 * @vdev: DP Virtual device handle
1822 * @nbuf: Buffer pointer
1823 * @rx_tlv_hdr: start of rx tlv header
1824 * @txrx_peer: pointer to peer
1825 *
1826 * This function allocated memory for mesh receive stats and fill the
1827 * required stats. Stores the memory address in skb cb.
1828 *
1829 * Return: void
1830 */
1831 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1832 uint8_t *rx_tlv_hdr,
1833 struct dp_txrx_peer *txrx_peer);
1834
1835 /**
1836 * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
1837 * @vdev: DP Virtual device handle
1838 * @nbuf: Buffer pointer
1839 * @rx_tlv_hdr: start of rx tlv header
1840 *
1841 * This checks if the received packet is matching any filter out
1842 * catogery and and drop the packet if it matches.
1843 *
1844 * Return: QDF_STATUS_SUCCESS indicates drop,
1845 * QDF_STATUS_E_FAILURE indicate to not drop
1846 */
1847 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1848 uint8_t *rx_tlv_hdr);
1849
1850 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1851 struct dp_txrx_peer *peer);
1852
1853 /**
1854 * dp_rx_compute_delay() - Compute and fill in all timestamps
1855 * to pass in correct fields
1856 * @vdev: pdev handle
1857 * @nbuf: network buffer
1858 *
1859 * Return: none
1860 */
1861 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1862
1863 #ifdef QCA_PEER_EXT_STATS
1864
1865 /**
1866 * dp_rx_compute_tid_delay - Compute per TID delay stats
1867 * @stats: TID delay stats to update
1868 * @nbuf: NBuffer
1869 *
1870 * Return: Void
1871 */
1872 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1873 qdf_nbuf_t nbuf);
1874 #endif /* QCA_PEER_EXT_STATS */
1875
1876 #ifdef WLAN_SUPPORT_PPEDS
1877 static inline
dp_rx_set_reuse_nbuf(struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)1878 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1879 {
1880 rx_desc->reuse_nbuf = nbuf;
1881 rx_desc->has_reuse_nbuf = true;
1882 }
1883
1884 /**
1885 * __dp_rx_add_to_free_desc_list_reuse() - Adds to a local free descriptor list
1886 * this list will reused
1887 *
1888 * @head: pointer to the head of local free list
1889 * @tail: pointer to the tail of local free list
1890 * @new: new descriptor that is added to the free list
1891 * @func_name: caller func name
1892 *
1893 * Return: void:
1894 */
1895 static inline
__dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,struct dp_rx_desc * new,const char * func_name)1896 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
1897 union dp_rx_desc_list_elem_t **tail,
1898 struct dp_rx_desc *new,
1899 const char *func_name)
1900 {
1901 qdf_assert(head && new);
1902
1903 dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1904
1905 new->nbuf = NULL;
1906
1907 ((union dp_rx_desc_list_elem_t *)new)->next = *head;
1908 *head = (union dp_rx_desc_list_elem_t *)new;
1909 /* reset tail if head->next is NULL */
1910 if (!*tail || !(*head)->next)
1911 *tail = *head;
1912 }
1913 #else
1914 static inline
dp_rx_set_reuse_nbuf(struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)1915 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1916 {
1917 }
1918
1919 static inline
__dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,struct dp_rx_desc * new,const char * func_name)1920 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
1921 union dp_rx_desc_list_elem_t **tail,
1922 struct dp_rx_desc *new,
1923 const char *func_name)
1924 {
1925 }
1926 #endif
1927
1928 #ifdef RX_DESC_DEBUG_CHECK
1929 /**
1930 * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1931 * @rx_desc: rx descriptor pointer
1932 *
1933 * Return: true, if magic is correct, else false.
1934 */
dp_rx_desc_check_magic(struct dp_rx_desc * rx_desc)1935 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1936 {
1937 if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1938 return false;
1939
1940 rx_desc->magic = 0;
1941 return true;
1942 }
1943
1944 /**
1945 * dp_rx_desc_prep() - prepare rx desc
1946 * @rx_desc: rx descriptor pointer to be prepared
1947 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1948 *
1949 * Note: assumption is that we are associating a nbuf which is mapped
1950 *
1951 * Return: none
1952 */
1953 static inline
dp_rx_desc_prep(struct dp_rx_desc * rx_desc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)1954 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1955 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1956 {
1957 rx_desc->magic = DP_RX_DESC_MAGIC;
1958 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1959 rx_desc->unmapped = 0;
1960 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
1961 dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
1962 rx_desc->prev_paddr_buf_start = rx_desc->paddr_buf_start;
1963 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1964 }
1965
1966 /**
1967 * dp_rx_desc_frag_prep() - prepare rx desc
1968 * @rx_desc: rx descriptor pointer to be prepared
1969 * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1970 *
1971 * Note: assumption is that we frag address is mapped
1972 *
1973 * Return: none
1974 */
1975 #ifdef DP_RX_MON_MEM_FRAG
1976 static inline
dp_rx_desc_frag_prep(struct dp_rx_desc * rx_desc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)1977 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1978 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1979 {
1980 rx_desc->magic = DP_RX_DESC_MAGIC;
1981 rx_desc->rx_buf_start =
1982 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1983 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1984 rx_desc->unmapped = 0;
1985 }
1986 #else
1987 static inline
dp_rx_desc_frag_prep(struct dp_rx_desc * rx_desc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)1988 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1989 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1990 {
1991 }
1992 #endif /* DP_RX_MON_MEM_FRAG */
1993
1994 /**
1995 * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
1996 * @rx_desc: rx descriptor
1997 * @ring_paddr: paddr obatined from the ring
1998 *
1999 * Return: QDF_STATUS
2000 */
2001 static inline
dp_rx_desc_paddr_sanity_check(struct dp_rx_desc * rx_desc,uint64_t ring_paddr)2002 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
2003 uint64_t ring_paddr)
2004 {
2005 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
2006 }
2007 #else
2008
dp_rx_desc_check_magic(struct dp_rx_desc * rx_desc)2009 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
2010 {
2011 return true;
2012 }
2013
2014 static inline
dp_rx_desc_prep(struct dp_rx_desc * rx_desc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)2015 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
2016 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2017 {
2018 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
2019 dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
2020 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2021 rx_desc->unmapped = 0;
2022 }
2023
2024 #ifdef DP_RX_MON_MEM_FRAG
2025 static inline
dp_rx_desc_frag_prep(struct dp_rx_desc * rx_desc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)2026 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2027 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2028 {
2029 rx_desc->rx_buf_start =
2030 (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
2031 rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2032 rx_desc->unmapped = 0;
2033 }
2034 #else
2035 static inline
dp_rx_desc_frag_prep(struct dp_rx_desc * rx_desc,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)2036 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2037 struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2038 {
2039 }
2040 #endif /* DP_RX_MON_MEM_FRAG */
2041
2042 static inline
dp_rx_desc_paddr_sanity_check(struct dp_rx_desc * rx_desc,uint64_t ring_paddr)2043 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
2044 uint64_t ring_paddr)
2045 {
2046 return true;
2047 }
2048 #endif /* RX_DESC_DEBUG_CHECK */
2049
2050 /**
2051 * dp_rx_enable_mon_dest_frag() - Enable frag processing for
2052 * monitor destination ring via frag.
2053 * @rx_desc_pool: Rx desc pool
2054 * @is_mon_dest_desc: Is it for monitor dest buffer
2055 *
2056 * Enable this flag only for monitor destination buffer processing
2057 * if DP_RX_MON_MEM_FRAG feature is enabled.
2058 * If flag is set then frag based function will be called for alloc,
2059 * map, prep desc and free ops for desc buffer else normal nbuf based
2060 * function will be called.
2061 *
2062 * Return: None
2063 */
2064 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
2065 bool is_mon_dest_desc);
2066
2067 #ifndef QCA_MULTIPASS_SUPPORT
2068 static inline
dp_rx_multipass_process(struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t tid)2069 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2070 uint8_t tid)
2071 {
2072 return false;
2073 }
2074 #else
2075 /**
2076 * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
2077 * @txrx_peer: DP txrx peer handle
2078 * @nbuf: skb
2079 * @tid: traffic priority
2080 *
2081 * Return: bool: true in case of success else false
2082 * Success is considered if:
2083 * i. If frame has vlan header
2084 * ii. If the frame comes from different peer and dont need multipass processing
2085 * Failure is considered if:
2086 * i. Frame comes from multipass peer but doesn't contain vlan header.
2087 * In failure case, drop such frames.
2088 */
2089 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
2090 uint8_t tid);
2091 #endif
2092
2093 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2094
2095 #ifndef WLAN_RX_PKT_CAPTURE_ENH
2096 static inline
dp_peer_set_rx_capture_enabled(struct dp_pdev * pdev,struct dp_peer * peer_handle,bool value,uint8_t * mac_addr)2097 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
2098 struct dp_peer *peer_handle,
2099 bool value, uint8_t *mac_addr)
2100 {
2101 return QDF_STATUS_SUCCESS;
2102 }
2103 #endif
2104
2105 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2106
2107 /**
2108 * dp_rx_deliver_to_stack() - deliver pkts to network stack
2109 * Caller to hold peer refcount and check for valid peer
2110 * @soc: soc
2111 * @vdev: vdev
2112 * @peer: txrx peer
2113 * @nbuf_head: skb list head
2114 * @nbuf_tail: skb list tail
2115 *
2116 * Return: QDF_STATUS
2117 */
2118 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
2119 struct dp_vdev *vdev,
2120 struct dp_txrx_peer *peer,
2121 qdf_nbuf_t nbuf_head,
2122 qdf_nbuf_t nbuf_tail);
2123
2124 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
2125 /**
2126 * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
2127 * caller to hold peer refcount and check for valid peer
2128 * @soc: soc
2129 * @vdev: vdev
2130 * @peer: peer
2131 * @nbuf_head: skb list head
2132 * @nbuf_tail: skb list tail
2133 *
2134 * Return: QDF_STATUS
2135 */
2136 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
2137 struct dp_vdev *vdev,
2138 struct dp_txrx_peer *peer,
2139 qdf_nbuf_t nbuf_head,
2140 qdf_nbuf_t nbuf_tail);
2141 #endif
2142
2143 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2144
2145 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
2146 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2147 do { \
2148 if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
2149 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \
2150 break; \
2151 } \
2152 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \
2153 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \
2154 if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \
2155 rx_desc->pool_id)) \
2156 DP_RX_MERGE_TWO_LIST(head, tail, \
2157 ebuf_head, ebuf_tail);\
2158 ebuf_head = NULL; \
2159 ebuf_tail = NULL; \
2160 } \
2161 } while (0)
2162 #else
2163 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2164 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
2165 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
2166
2167 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2168
2169 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2170 /**
2171 * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
2172 * @soc : dp_soc handle
2173 * @pdev: dp_pdev handle
2174 * @peer_id: peer_id of the peer for which completion came
2175 * @is_offload:
2176 * @netbuf: Buffer pointer
2177 *
2178 * This function is used to deliver rx packet to packet capture
2179 */
2180 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
2181 uint16_t peer_id, uint32_t is_offload,
2182 qdf_nbuf_t netbuf);
2183 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2184 uint32_t is_offload);
2185 #else
2186 static inline void
dp_rx_deliver_to_pkt_capture(struct dp_soc * soc,struct dp_pdev * pdev,uint16_t peer_id,uint32_t is_offload,qdf_nbuf_t netbuf)2187 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
2188 uint16_t peer_id, uint32_t is_offload,
2189 qdf_nbuf_t netbuf)
2190 {
2191 }
2192
2193 static inline void
dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t is_offload)2194 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2195 uint32_t is_offload)
2196 {
2197 }
2198 #endif
2199
2200 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2201 #ifdef FEATURE_MEC
2202 /**
2203 * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
2204 * back on same vap or a different vap.
2205 * @soc: core DP main context
2206 * @peer: dp peer handler
2207 * @rx_tlv_hdr: start of the rx TLV header
2208 * @nbuf: pkt buffer
2209 *
2210 * Return: bool (true if it is a looped back pkt else false)
2211 *
2212 */
2213 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2214 struct dp_txrx_peer *peer,
2215 uint8_t *rx_tlv_hdr,
2216 qdf_nbuf_t nbuf);
2217 #else
dp_rx_mcast_echo_check(struct dp_soc * soc,struct dp_txrx_peer * peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2218 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2219 struct dp_txrx_peer *peer,
2220 uint8_t *rx_tlv_hdr,
2221 qdf_nbuf_t nbuf)
2222 {
2223 return false;
2224 }
2225 #endif /* FEATURE_MEC */
2226 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2227
2228 #ifdef RECEIVE_OFFLOAD
2229 /**
2230 * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
2231 * @soc: DP SOC handle
2232 * @rx_tlv: RX TLV received for the msdu
2233 * @msdu: msdu for which GRO info needs to be filled
2234 * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
2235 *
2236 * Return: None
2237 */
2238 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2239 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
2240 #else
2241 static inline
dp_rx_fill_gro_info(struct dp_soc * soc,uint8_t * rx_tlv,qdf_nbuf_t msdu,uint32_t * rx_ol_pkt_cnt)2242 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2243 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
2244 {
2245 }
2246 #endif
2247
2248 /**
2249 * dp_rx_msdu_stats_update() - update per msdu stats.
2250 * @soc: core txrx main context
2251 * @nbuf: pointer to the first msdu of an amsdu.
2252 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2253 * @txrx_peer: pointer to the txrx peer object.
2254 * @ring_id: reo dest ring number on which pkt is reaped.
2255 * @tid_stats: per tid rx stats.
2256 * @link_id: link Id on which packet is received
2257 *
2258 * update all the per msdu stats for that nbuf.
2259 *
2260 * Return: void
2261 */
2262 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2263 uint8_t *rx_tlv_hdr,
2264 struct dp_txrx_peer *txrx_peer,
2265 uint8_t ring_id,
2266 struct cdp_tid_rx_stats *tid_stats,
2267 uint8_t link_id);
2268
2269 /**
2270 * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
2271 * no corresbonding peer found
2272 * @soc: core txrx main context
2273 * @nbuf: pkt skb pointer
2274 *
2275 * This function will try to deliver some RX special frames to stack
2276 * even there is no peer matched found. for instance, LFR case, some
2277 * eapol data will be sent to host before peer_map done.
2278 *
2279 * Return: None
2280 */
2281 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
2282
2283 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2284 #ifdef DP_RX_DROP_RAW_FRM
2285 /**
2286 * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2287 * @nbuf: pkt skb pointer
2288 *
2289 * Return: true - raw frame, dropped
2290 * false - not raw frame, do nothing
2291 */
2292 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
2293 #else
2294 static inline
dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)2295 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2296 {
2297 return false;
2298 }
2299 #endif
2300
2301 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2302 /**
2303 * dp_rx_update_stats() - Update soc level rx packet count
2304 * @soc: DP soc handle
2305 * @nbuf: nbuf received
2306 *
2307 * Return: none
2308 */
2309 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2310 #else
2311 static inline
dp_rx_update_stats(struct dp_soc * soc,qdf_nbuf_t nbuf)2312 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
2313 {
2314 }
2315 #endif
2316
2317 /**
2318 * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
2319 * @pdev: dp_pdev handle
2320 * @nbuf: pointer to the first msdu of an amsdu.
2321 * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2322 *
2323 * The ipsumed field of the skb is set based on whether HW validated the
2324 * IP/TCP/UDP checksum.
2325 *
2326 * Return: void
2327 */
2328 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1)
2329 static inline
dp_rx_cksum_offload(struct dp_pdev * pdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)2330 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2331 qdf_nbuf_t nbuf,
2332 uint8_t *rx_tlv_hdr)
2333 {
2334 qdf_nbuf_rx_cksum_t cksum = {0};
2335 //TODO - Move this to ring desc api
2336 //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
2337 //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
2338 uint32_t ip_csum_err, tcp_udp_csum_er;
2339
2340 hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
2341 &tcp_udp_csum_er);
2342
2343 if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
2344 if (qdf_likely(!ip_csum_err)) {
2345 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2346 if (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
2347 qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
2348 if (qdf_likely(!tcp_udp_csum_er)) {
2349 cksum.csum_level = 1;
2350 } else {
2351 cksum.l4_result =
2352 QDF_NBUF_RX_CKSUM_NONE;
2353 DP_STATS_INC(pdev,
2354 err.tcp_udp_csum_err, 1);
2355 }
2356 }
2357 } else {
2358 DP_STATS_INC(pdev, err.ip_csum_err, 1);
2359 }
2360 } else if (qdf_nbuf_is_ipv6_udp_pkt(nbuf) ||
2361 qdf_nbuf_is_ipv6_tcp_pkt(nbuf)) {
2362 if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er))
2363 cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2364 else if (ip_csum_err) {
2365 DP_STATS_INC(pdev, err.ip_csum_err, 1);
2366 } else {
2367 DP_STATS_INC(pdev, err.tcp_udp_csum_err, 1);
2368 }
2369 }
2370
2371 qdf_nbuf_set_rx_cksum(nbuf, &cksum);
2372 }
2373 #else
2374 static inline
dp_rx_cksum_offload(struct dp_pdev * pdev,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)2375 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2376 qdf_nbuf_t nbuf,
2377 uint8_t *rx_tlv_hdr)
2378 {
2379 }
2380 #endif
2381 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2382
2383 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
2384 static inline
dp_rx_reap_loop_pkt_limit_hit(struct dp_soc * soc,int num_reaped,int max_reap_limit)2385 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2386 int max_reap_limit)
2387 {
2388 bool limit_hit = false;
2389
2390 limit_hit =
2391 (num_reaped >= max_reap_limit) ? true : false;
2392
2393 if (limit_hit)
2394 DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
2395
2396 return limit_hit;
2397 }
2398
2399 static inline
dp_rx_enable_eol_data_check(struct dp_soc * soc)2400 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2401 {
2402 return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
2403 }
2404
dp_rx_get_loop_pkt_limit(struct dp_soc * soc)2405 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2406 {
2407 struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
2408
2409 return cfg->rx_reap_loop_pkt_limit;
2410 }
2411 #else
2412 static inline
dp_rx_reap_loop_pkt_limit_hit(struct dp_soc * soc,int num_reaped,int max_reap_limit)2413 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2414 int max_reap_limit)
2415 {
2416 return false;
2417 }
2418
2419 static inline
dp_rx_enable_eol_data_check(struct dp_soc * soc)2420 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2421 {
2422 return false;
2423 }
2424
dp_rx_get_loop_pkt_limit(struct dp_soc * soc)2425 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2426 {
2427 return 0;
2428 }
2429 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2430
2431 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2432
2433 static inline uint16_t
dp_rx_peer_metadata_peer_id_get(struct dp_soc * soc,uint32_t peer_metadata)2434 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
2435 {
2436 return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc,
2437 peer_metadata);
2438 }
2439
2440 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
2441 /**
2442 * dp_rx_nbuf_set_link_id_from_tlv() - Set link id in nbuf cb
2443 * @soc: SOC handle
2444 * @tlv_hdr: rx tlv header
2445 * @nbuf: nbuf pointer
2446 *
2447 * Return: None
2448 */
2449 static inline void
dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc * soc,uint8_t * tlv_hdr,qdf_nbuf_t nbuf)2450 dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr,
2451 qdf_nbuf_t nbuf)
2452 {
2453 uint32_t peer_metadata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2454 tlv_hdr);
2455
2456 if (soc->arch_ops.dp_rx_peer_set_link_id)
2457 soc->arch_ops.dp_rx_peer_set_link_id(nbuf, peer_metadata);
2458 }
2459
2460 /**
2461 * dp_rx_set_nbuf_band() - Set band info in nbuf cb
2462 * @nbuf: nbuf pointer
2463 * @txrx_peer: txrx_peer pointer
2464 * @link_id: Peer Link ID
2465 *
2466 * Returen: None
2467 */
2468 static inline void
dp_rx_set_nbuf_band(qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer,uint8_t link_id)2469 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2470 uint8_t link_id)
2471 {
2472 qdf_nbuf_rx_set_band(nbuf, txrx_peer->band[link_id]);
2473 }
2474 #else
2475 static inline void
dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc * soc,uint8_t * tlv_hdr,qdf_nbuf_t nbuf)2476 dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr,
2477 qdf_nbuf_t nbuf)
2478 {
2479 }
2480
2481 static inline void
dp_rx_set_nbuf_band(qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer,uint8_t link_id)2482 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2483 uint8_t link_id)
2484 {
2485 }
2486 #endif
2487
2488 /**
2489 * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
2490 * @soc: SOC handle
2491 * @rx_desc_pool: pointer to RX descriptor pool
2492 * @pool_id: pool ID
2493 *
2494 * Return: None
2495 */
2496 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
2497 struct rx_desc_pool *rx_desc_pool,
2498 uint32_t pool_id);
2499
2500 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
2501 struct rx_desc_pool *rx_desc_pool,
2502 uint32_t pool_id);
2503
2504 /**
2505 * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
2506 *
2507 * Return: True if any rx pkt tracepoint is enabled else false
2508 */
2509 static inline
dp_rx_pkt_tracepoints_enabled(void)2510 bool dp_rx_pkt_tracepoints_enabled(void)
2511 {
2512 return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
2513 qdf_trace_dp_rx_udp_pkt_enabled() ||
2514 qdf_trace_dp_rx_pkt_enabled());
2515 }
2516
2517 #ifdef FEATURE_DIRECT_LINK
2518 /**
2519 * dp_audio_smmu_map()- Map memory region into Audio SMMU CB
2520 * @qdf_dev: pointer to QDF device structure
2521 * @paddr: physical address
2522 * @iova: DMA address
2523 * @size: memory region size
2524 *
2525 * Return: 0 on success else failure code
2526 */
2527 static inline
dp_audio_smmu_map(qdf_device_t qdf_dev,qdf_dma_addr_t paddr,qdf_dma_addr_t iova,qdf_size_t size)2528 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2529 qdf_dma_addr_t iova, qdf_size_t size)
2530 {
2531 return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size);
2532 }
2533
2534 /**
2535 * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB
2536 * @qdf_dev: pointer to QDF device structure
2537 * @iova: DMA address
2538 * @size: memory region size
2539 *
2540 * Return: None
2541 */
2542 static inline
dp_audio_smmu_unmap(qdf_device_t qdf_dev,qdf_dma_addr_t iova,qdf_size_t size)2543 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2544 qdf_size_t size)
2545 {
2546 pld_audio_smmu_unmap(qdf_dev->dev, iova, size);
2547 }
2548 #else
2549 static inline
dp_audio_smmu_map(qdf_device_t qdf_dev,qdf_dma_addr_t paddr,qdf_dma_addr_t iova,qdf_size_t size)2550 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2551 qdf_dma_addr_t iova, qdf_size_t size)
2552 {
2553 return 0;
2554 }
2555
2556 static inline
dp_audio_smmu_unmap(qdf_device_t qdf_dev,qdf_dma_addr_t iova,qdf_size_t size)2557 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2558 qdf_size_t size)
2559 {
2560 }
2561 #endif
2562
2563 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2564 static inline
dp_pdev_rx_buffers_attach_simple(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers)2565 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2566 struct dp_srng *rxdma_srng,
2567 struct rx_desc_pool *rx_desc_pool,
2568 uint32_t num_req_buffers)
2569 {
2570 return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id,
2571 rxdma_srng,
2572 rx_desc_pool,
2573 num_req_buffers);
2574 }
2575
2576 static inline
dp_rx_buffers_replenish_simple(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)2577 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2578 struct dp_srng *rxdma_srng,
2579 struct rx_desc_pool *rx_desc_pool,
2580 uint32_t num_req_buffers,
2581 union dp_rx_desc_list_elem_t **desc_list,
2582 union dp_rx_desc_list_elem_t **tail)
2583 {
2584 __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2585 num_req_buffers, desc_list, tail);
2586 }
2587
2588 static inline
dp_rx_comp2refill_replenish(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)2589 void dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id,
2590 struct dp_srng *rxdma_srng,
2591 struct rx_desc_pool *rx_desc_pool,
2592 uint32_t num_req_buffers,
2593 union dp_rx_desc_list_elem_t **desc_list,
2594 union dp_rx_desc_list_elem_t **tail)
2595 {
2596 __dp_rx_comp2refill_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2597 num_req_buffers, desc_list, tail);
2598 }
2599
2600 static inline
dp_rx_buffers_lt_replenish_simple(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,bool force_replenish)2601 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2602 struct dp_srng *rxdma_srng,
2603 struct rx_desc_pool *rx_desc_pool,
2604 bool force_replenish)
2605 {
2606 __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
2607 rx_desc_pool,
2608 force_replenish);
2609 }
2610
2611 #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2612 static inline
dp_rx_nbuf_sync_no_dsb(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)2613 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2614 qdf_nbuf_t nbuf,
2615 uint32_t buf_size)
2616 {
2617 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2618 (void *)(nbuf->data + buf_size));
2619
2620 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2621 }
2622 #else
2623 #define L3_HEADER_PAD 2
2624 static inline
dp_rx_nbuf_sync_no_dsb(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)2625 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2626 qdf_nbuf_t nbuf,
2627 uint32_t buf_size)
2628 {
2629 if (nbuf->recycled_for_ds)
2630 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2631
2632 if (unlikely(!nbuf->fast_recycled)) {
2633 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2634 (void *)(nbuf->data + buf_size));
2635 }
2636
2637 DP_STATS_INC(dp_soc, rx.fast_recycled, 1);
2638 nbuf->fast_recycled = 0;
2639
2640 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2641 }
2642 #endif
2643
2644 static inline
dp_rx_nbuf_sync(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)2645 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2646 qdf_nbuf_t nbuf,
2647 uint32_t buf_size)
2648 {
2649 qdf_nbuf_dma_inv_range((void *)nbuf->data,
2650 (void *)(nbuf->data + buf_size));
2651
2652 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2653 }
2654
2655 #if !defined(SPECULATIVE_READ_DISABLED)
2656 static inline
dp_rx_nbuf_unmap(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)2657 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2658 struct dp_rx_desc *rx_desc,
2659 uint8_t reo_ring_num)
2660 {
2661 struct rx_desc_pool *rx_desc_pool;
2662 qdf_nbuf_t nbuf;
2663
2664 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2665 nbuf = rx_desc->nbuf;
2666
2667 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2668 (void *)(nbuf->data + rx_desc_pool->buf_size));
2669 }
2670
2671 static inline
dp_rx_nbuf_unmap_pool(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,qdf_nbuf_t nbuf)2672 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2673 struct rx_desc_pool *rx_desc_pool,
2674 qdf_nbuf_t nbuf)
2675 {
2676 qdf_nbuf_dma_inv_range((void *)nbuf->data,
2677 (void *)(nbuf->data + rx_desc_pool->buf_size));
2678 }
2679
2680 #else
2681 static inline
dp_rx_nbuf_unmap(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)2682 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2683 struct dp_rx_desc *rx_desc,
2684 uint8_t reo_ring_num)
2685 {
2686 }
2687
2688 static inline
dp_rx_nbuf_unmap_pool(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,qdf_nbuf_t nbuf)2689 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2690 struct rx_desc_pool *rx_desc_pool,
2691 qdf_nbuf_t nbuf)
2692 {
2693 }
2694 #endif
2695
2696 static inline
dp_rx_per_core_stats_update(struct dp_soc * soc,uint8_t ring_id,uint32_t bufs_reaped)2697 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2698 uint32_t bufs_reaped)
2699 {
2700 }
2701
2702 static inline
dp_rx_nbuf_alloc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)2703 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2704 struct rx_desc_pool *rx_desc_pool)
2705 {
2706 return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size,
2707 RX_BUFFER_RESERVATION,
2708 rx_desc_pool->buf_alignment, FALSE);
2709 }
2710
2711 static inline
dp_rx_nbuf_free(qdf_nbuf_t nbuf)2712 void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2713 {
2714 qdf_nbuf_free_simple(nbuf);
2715 }
2716 #else
2717 static inline
dp_pdev_rx_buffers_attach_simple(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers)2718 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2719 struct dp_srng *rxdma_srng,
2720 struct rx_desc_pool *rx_desc_pool,
2721 uint32_t num_req_buffers)
2722 {
2723 return dp_pdev_rx_buffers_attach(soc, mac_id,
2724 rxdma_srng,
2725 rx_desc_pool,
2726 num_req_buffers);
2727 }
2728
2729 static inline
dp_rx_buffers_replenish_simple(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,uint32_t num_req_buffers,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)2730 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2731 struct dp_srng *rxdma_srng,
2732 struct rx_desc_pool *rx_desc_pool,
2733 uint32_t num_req_buffers,
2734 union dp_rx_desc_list_elem_t **desc_list,
2735 union dp_rx_desc_list_elem_t **tail)
2736 {
2737 dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2738 num_req_buffers, desc_list, tail, false);
2739 }
2740
2741 static inline
dp_rx_buffers_lt_replenish_simple(struct dp_soc * soc,uint32_t mac_id,struct dp_srng * rxdma_srng,struct rx_desc_pool * rx_desc_pool,bool force_replenish)2742 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2743 struct dp_srng *rxdma_srng,
2744 struct rx_desc_pool *rx_desc_pool,
2745 bool force_replenish)
2746 {
2747 union dp_rx_desc_list_elem_t *desc_list = NULL;
2748 union dp_rx_desc_list_elem_t *tail = NULL;
2749
2750 __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2751 0, &desc_list, &tail, false, force_replenish,
2752 __func__);
2753 }
2754
2755 static inline
dp_rx_nbuf_sync_no_dsb(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)2756 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2757 qdf_nbuf_t nbuf,
2758 uint32_t buf_size)
2759 {
2760 return (qdf_dma_addr_t)NULL;
2761 }
2762
2763 static inline
dp_rx_nbuf_sync(struct dp_soc * dp_soc,qdf_nbuf_t nbuf,uint32_t buf_size)2764 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2765 qdf_nbuf_t nbuf,
2766 uint32_t buf_size)
2767 {
2768 return (qdf_dma_addr_t)NULL;
2769 }
2770
2771 static inline
dp_rx_nbuf_unmap(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)2772 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2773 struct dp_rx_desc *rx_desc,
2774 uint8_t reo_ring_num)
2775 {
2776 struct rx_desc_pool *rx_desc_pool;
2777
2778 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2779 dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
2780
2781 dp_audio_smmu_unmap(soc->osdev,
2782 QDF_NBUF_CB_PADDR(rx_desc->nbuf),
2783 rx_desc_pool->buf_size);
2784
2785 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
2786 rx_desc_pool->buf_size,
2787 false, __func__, __LINE__);
2788 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2789 QDF_DMA_FROM_DEVICE,
2790 rx_desc_pool->buf_size);
2791 rx_desc->unmapped = 1;
2792
2793 dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
2794 }
2795
2796 static inline
dp_rx_nbuf_unmap_pool(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,qdf_nbuf_t nbuf)2797 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2798 struct rx_desc_pool *rx_desc_pool,
2799 qdf_nbuf_t nbuf)
2800 {
2801 dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf),
2802 rx_desc_pool->buf_size);
2803 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
2804 rx_desc_pool->buf_size,
2805 false, __func__, __LINE__);
2806 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
2807 rx_desc_pool->buf_size);
2808 }
2809
2810 static inline
dp_rx_per_core_stats_update(struct dp_soc * soc,uint8_t ring_id,uint32_t bufs_reaped)2811 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2812 uint32_t bufs_reaped)
2813 {
2814 int cpu_id = qdf_get_cpu();
2815
2816 DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped);
2817 }
2818
2819 static inline
dp_rx_nbuf_alloc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)2820 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2821 struct rx_desc_pool *rx_desc_pool)
2822 {
2823 return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
2824 RX_BUFFER_RESERVATION,
2825 rx_desc_pool->buf_alignment, FALSE);
2826 }
2827
2828 static inline
dp_rx_nbuf_free(qdf_nbuf_t nbuf)2829 void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2830 {
2831 qdf_nbuf_free(nbuf);
2832 }
2833 #endif
2834
2835 #ifdef DP_UMAC_HW_RESET_SUPPORT
2836 /**
2837 * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
2838 * @soc: core txrx main context
2839 * @nbuf_list: nbuf list for delayed free
2840 *
2841 * Return: void
2842 */
2843 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
2844
2845 /**
2846 * dp_rx_desc_delayed_free() - Delayed free of the rx descs
2847 *
2848 * @soc: core txrx main context
2849 *
2850 * Return: void
2851 */
2852 void dp_rx_desc_delayed_free(struct dp_soc *soc);
2853 #endif
2854
2855 /**
2856 * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
2857 * @soc: core txrx main context
2858 * @nbuf : pointer to the first msdu of an amsdu.
2859 * @peer_id : Peer id of the peer
2860 * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference
2861 * @pkt_capture_offload : Flag indicating if pkt capture offload is needed
2862 * @vdev : Buffer to hold pointer to vdev
2863 * @rx_pdev : Buffer to hold pointer to rx pdev
2864 * @dsf : delay stats flag
2865 * @old_tid : Old tid
2866 *
2867 * Get txrx peer and vdev from peer id
2868 *
2869 * Return: Pointer to txrx peer
2870 */
2871 static inline struct dp_txrx_peer *
dp_rx_get_txrx_peer_and_vdev(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t peer_id,dp_txrx_ref_handle * txrx_ref_handle,bool pkt_capture_offload,struct dp_vdev ** vdev,struct dp_pdev ** rx_pdev,uint32_t * dsf,uint32_t * old_tid)2872 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc,
2873 qdf_nbuf_t nbuf,
2874 uint16_t peer_id,
2875 dp_txrx_ref_handle *txrx_ref_handle,
2876 bool pkt_capture_offload,
2877 struct dp_vdev **vdev,
2878 struct dp_pdev **rx_pdev,
2879 uint32_t *dsf,
2880 uint32_t *old_tid)
2881 {
2882 struct dp_txrx_peer *txrx_peer = NULL;
2883
2884 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle,
2885 DP_MOD_ID_RX);
2886
2887 if (qdf_likely(txrx_peer)) {
2888 *vdev = txrx_peer->vdev;
2889 } else {
2890 nbuf->next = NULL;
2891 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
2892 pkt_capture_offload);
2893 if (!pkt_capture_offload)
2894 dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2895
2896 goto end;
2897 }
2898
2899 if (qdf_unlikely(!(*vdev))) {
2900 qdf_nbuf_free(nbuf);
2901 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2902 goto end;
2903 }
2904
2905 *rx_pdev = (*vdev)->pdev;
2906 *dsf = (*rx_pdev)->delay_stats_flag;
2907 *old_tid = 0xff;
2908
2909 end:
2910 return txrx_peer;
2911 }
2912
2913 static inline QDF_STATUS
dp_peer_rx_reorder_queue_setup(struct dp_soc * soc,struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)2914 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer,
2915 uint32_t tid_bitmap, uint32_t ba_window_size)
2916 {
2917 return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc,
2918 peer, tid_bitmap,
2919 ba_window_size);
2920 }
2921
2922 static inline
dp_rx_nbuf_list_deliver(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,uint16_t peer_id,uint8_t pkt_capture_offload,qdf_nbuf_t deliver_list_head,qdf_nbuf_t deliver_list_tail)2923 void dp_rx_nbuf_list_deliver(struct dp_soc *soc,
2924 struct dp_vdev *vdev,
2925 struct dp_txrx_peer *txrx_peer,
2926 uint16_t peer_id,
2927 uint8_t pkt_capture_offload,
2928 qdf_nbuf_t deliver_list_head,
2929 qdf_nbuf_t deliver_list_tail)
2930 {
2931 qdf_nbuf_t nbuf, next;
2932
2933 if (qdf_likely(deliver_list_head)) {
2934 if (qdf_likely(txrx_peer)) {
2935 dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
2936 pkt_capture_offload,
2937 deliver_list_head);
2938 if (!pkt_capture_offload)
2939 dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
2940 deliver_list_head,
2941 deliver_list_tail);
2942 } else {
2943 nbuf = deliver_list_head;
2944 while (nbuf) {
2945 next = nbuf->next;
2946 nbuf->next = NULL;
2947 dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2948 nbuf = next;
2949 }
2950 }
2951 }
2952 }
2953
2954 #ifdef DP_TX_RX_TPUT_SIMULATE
2955 /*
2956 * Change this macro value to simulate different RX T-put,
2957 * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor
2958 * is 2, set macro value as 1 (multiplication factor - 1).
2959 */
2960 #define DP_RX_PKTS_DUPLICATE_CNT 0
2961 static inline
dp_rx_nbuf_list_dup_deliver(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,uint16_t peer_id,uint8_t pkt_capture_offload,qdf_nbuf_t ori_list_head,qdf_nbuf_t ori_list_tail)2962 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc,
2963 struct dp_vdev *vdev,
2964 struct dp_txrx_peer *txrx_peer,
2965 uint16_t peer_id,
2966 uint8_t pkt_capture_offload,
2967 qdf_nbuf_t ori_list_head,
2968 qdf_nbuf_t ori_list_tail)
2969 {
2970 qdf_nbuf_t new_skb = NULL;
2971 qdf_nbuf_t new_list_head = NULL;
2972 qdf_nbuf_t new_list_tail = NULL;
2973 qdf_nbuf_t nbuf = NULL;
2974 int i;
2975
2976 for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) {
2977 nbuf = ori_list_head;
2978 new_list_head = NULL;
2979 new_list_tail = NULL;
2980
2981 while (nbuf) {
2982 new_skb = qdf_nbuf_copy(nbuf);
2983 if (qdf_likely(new_skb))
2984 DP_RX_LIST_APPEND(new_list_head,
2985 new_list_tail,
2986 new_skb);
2987 else
2988 dp_err("copy skb failed");
2989
2990 nbuf = qdf_nbuf_next(nbuf);
2991 }
2992
2993 /* deliver the copied nbuf list */
2994 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
2995 pkt_capture_offload,
2996 new_list_head,
2997 new_list_tail);
2998 }
2999
3000 /* deliver the original skb_list */
3001 dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
3002 pkt_capture_offload,
3003 ori_list_head,
3004 ori_list_tail);
3005 }
3006
3007 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver
3008
3009 #else /* !DP_TX_RX_TPUT_SIMULATE */
3010
3011 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver
3012
3013 #endif /* DP_TX_RX_TPUT_SIMULATE */
3014
3015 /**
3016 * dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc
3017 * paddr corruption
3018 * @soc: core txrx main context
3019 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
3020 * @ring_desc: REO ring descriptor
3021 * @rx_desc: Rx descriptor
3022 *
3023 * Return: NONE
3024 */
3025 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
3026 hal_ring_handle_t hal_ring_hdl,
3027 hal_ring_desc_t ring_desc,
3028 struct dp_rx_desc *rx_desc);
3029 /**
3030 * dp_rx_is_sg_formation_required() - Check if sg formation is required
3031 * @info: WBM desc info
3032 *
3033 * Return: True if sg is required else false
3034 */
3035 bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info);
3036
3037 /**
3038 * dp_rx_err_tlv_invalidate() - Invalidate network buffer
3039 * @soc: core txrx main context
3040 * @nbuf: Network buffer to invalidate
3041 *
3042 * Return: NONE
3043 */
3044 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
3045 qdf_nbuf_t nbuf);
3046
3047 /**
3048 * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
3049 * @soc: DP SOC handle
3050 *
3051 * This is a war for HW issue where length is only valid in last msdu
3052 *
3053 * Return: NONE
3054 */
3055 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc);
3056
3057 /**
3058 * dp_rx_check_pkt_len() - Check for pktlen validity
3059 * @soc: DP SOC context
3060 * @pkt_len: computed length of the pkt from caller in bytes
3061 *
3062 * Return: true if pktlen > RX_BUFFER_SIZE, else return false
3063 *
3064 */
3065 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len);
3066
3067 /**
3068 * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
3069 * @soc: pointer to dp_soc struct
3070 * @pool_id: Pool id to find dp_pdev
3071 * @rx_tlv_hdr: TLV header of received packet
3072 * @nbuf: SKB
3073 *
3074 * In certain types of packets if peer_id is not correct then
3075 * driver may not be able find. Try finding peer by addr_2 of
3076 * received MPDU. If you find the peer then most likely sw_peer_id &
3077 * ast_idx is corrupted.
3078 *
3079 * Return: True if you find the peer by addr_2 of received MPDU else false
3080 */
3081 bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
3082 uint8_t pool_id,
3083 uint8_t *rx_tlv_hdr,
3084 qdf_nbuf_t nbuf);
3085
3086 /**
3087 * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
3088 * If so, drop the multicast frame.
3089 * @vdev: datapath vdev
3090 * @rx_tlv_hdr: TLV header
3091 *
3092 * Return: true if packet is to be dropped,
3093 * false, if packet is not dropped.
3094 */
3095 bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr);
3096
3097 /**
3098 * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
3099 * @soc: DP soc
3100 * @vdev: DP vdev handle
3101 * @txrx_peer: pointer to the txrx_peer object
3102 * @nbuf: skb list head
3103 * @tail: skb list tail
3104 * @is_eapol: eapol pkt check
3105 *
3106 * Return: None
3107 */
3108 void
3109 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
3110 struct dp_vdev *vdev,
3111 struct dp_txrx_peer *txrx_peer,
3112 qdf_nbuf_t nbuf,
3113 qdf_nbuf_t tail,
3114 bool is_eapol);
3115
3116 /**
3117 * dp_rx_set_wbm_err_info_in_nbuf() - function to set wbm err info in nbuf
3118 * @soc: DP soc
3119 * @nbuf: skb list head
3120 * @wbm_err: wbm error info details
3121 *
3122 * Return: None
3123 */
3124 void
3125 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
3126 qdf_nbuf_t nbuf,
3127 union hal_wbm_err_info_u wbm_err);
3128
3129 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
3130 static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc * soc)3131 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3132 {
3133 return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
3134 }
3135
3136 static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc * soc)3137 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3138 {
3139 return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
3140 }
3141 #else
3142 static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc * soc)3143 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3144 {
3145 struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
3146 uint8_t wbm2_sw_rx_rel_ring_id;
3147
3148 wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
3149
3150 return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
3151 wbm2_sw_rx_rel_ring_id);
3152 }
3153
3154 static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc * soc)3155 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3156 {
3157 return dp_rx_get_rx_bm_id(soc);
3158 }
3159 #endif
3160
3161 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
3162 /**
3163 * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
3164 *
3165 * @soc: core txrx main context
3166 * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
3167 * @ring_desc: opaque pointer to the RX ring descriptor
3168 * @rx_desc: host rx descriptor
3169 *
3170 * Return: void
3171 */
3172 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
3173 hal_ring_handle_t hal_ring_hdl,
3174 hal_ring_desc_t ring_desc,
3175 struct dp_rx_desc *rx_desc);
3176
3177 /**
3178 * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
3179 * (WBM), following error handling
3180 *
3181 * @soc: core DP main context
3182 * @ring_desc: opaque pointer to the REO error ring descriptor
3183 * @bm_action: put to idle_list or release to msdu_list
3184 *
3185 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
3186 */
3187 QDF_STATUS
3188 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
3189 uint8_t bm_action);
3190
3191 /**
3192 * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
3193 * (WBM) by address
3194 *
3195 * @soc: core DP main context
3196 * @link_desc_addr: link descriptor addr
3197 * @bm_action: put to idle_list or release to msdu_list
3198 *
3199 * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
3200 */
3201 QDF_STATUS
3202 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
3203 hal_buff_addrinfo_t link_desc_addr,
3204 uint8_t bm_action);
3205
3206 /**
3207 * dp_rxdma_err_process() - RxDMA error processing functionality
3208 * @int_ctx: pointer to DP interrupt context
3209 * @soc: core txrx main context
3210 * @mac_id: mac id which is one of 3 mac_ids
3211 * @quota: No. of units (packets) that can be serviced in one shot.
3212 *
3213 * Return: num of buffers processed
3214 */
3215 uint32_t
3216 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3217 uint32_t mac_id, uint32_t quota);
3218
3219 /**
3220 * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
3221 * frames to OS or wifi parse errors.
3222 * @soc: core DP main context
3223 * @nbuf: buffer pointer
3224 * @rx_tlv_hdr: start of rx tlv header
3225 * @txrx_peer: peer reference
3226 * @err_code: rxdma err code
3227 * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
3228 * pool_id has same mapping)
3229 * @link_id: link Id on which the packet is received
3230 *
3231 * Return: None
3232 */
3233 void
3234 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
3235 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
3236 uint8_t err_code, uint8_t mac_id, uint8_t link_id);
3237
3238 /**
3239 * dp_rx_process_mic_error(): Function to pass mic error indication to umac
3240 * @soc: core DP main context
3241 * @nbuf: buffer pointer
3242 * @rx_tlv_hdr: start of rx tlv header
3243 * @txrx_peer: txrx peer handle
3244 *
3245 * Return: void
3246 */
3247 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
3248 uint8_t *rx_tlv_hdr,
3249 struct dp_txrx_peer *txrx_peer);
3250
3251 /**
3252 * dp_2k_jump_handle() - Function to handle 2k jump exception
3253 * on WBM ring
3254 * @soc: core DP main context
3255 * @nbuf: buffer pointer
3256 * @rx_tlv_hdr: start of rx tlv header
3257 * @peer_id: peer id of first msdu
3258 * @tid: Tid for which exception occurred
3259 *
3260 * This function handles 2k jump violations arising out
3261 * of receiving aggregates in non BA case. This typically
3262 * may happen if aggregates are received on a QOS enabled TID
3263 * while Rx window size is still initialized to value of 2. Or
3264 * it may also happen if negotiated window size is 1 but peer
3265 * sends aggregates.
3266 */
3267 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
3268 uint16_t peer_id, uint8_t tid);
3269
3270 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3271
3272 /**
3273 * dp_rx_err_process() - Processes error frames routed to REO error ring
3274 * @int_ctx: pointer to DP interrupt context
3275 * @soc: core txrx main context
3276 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
3277 * @quota: No. of units (packets) that can be serviced in one shot.
3278 *
3279 * This function implements error processing and top level demultiplexer
3280 * for all the frames routed to REO error ring.
3281 *
3282 * Return: uint32_t: No. of elements processed
3283 */
3284 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3285 hal_ring_handle_t hal_ring_hdl, uint32_t quota);
3286
3287 /**
3288 * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
3289 * @int_ctx: pointer to DP interrupt context
3290 * @soc: core txrx main context
3291 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
3292 * serviced
3293 * @quota: No. of units (packets) that can be serviced in one shot.
3294 *
3295 * This function implements error processing and top level demultiplexer
3296 * for all the frames routed to WBM2HOST sw release ring.
3297 *
3298 * Return: uint32_t: No. of elements processed
3299 */
3300 uint32_t
3301 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3302 hal_ring_handle_t hal_ring_hdl, uint32_t quota);
3303
3304 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
3305 /**
3306 * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
3307 * @int_ctx: pointer to DP interrupt context
3308 * @soc: DP soc structure pointer
3309 * @hal_ring_hdl: HAL ring handle
3310 *
3311 * Return: 0 on success; error on failure
3312 */
3313 static inline int
dp_rx_srng_access_start(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)3314 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
3315 hal_ring_handle_t hal_ring_hdl)
3316 {
3317 return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
3318 }
3319
3320 /**
3321 * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
3322 * @int_ctx: pointer to DP interrupt context
3323 * @soc: DP soc structure pointer
3324 * @hal_ring_hdl: HAL ring handle
3325 *
3326 * Return: None
3327 */
3328 static inline void
dp_rx_srng_access_end(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)3329 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
3330 hal_ring_handle_t hal_ring_hdl)
3331 {
3332 hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
3333 }
3334 #else
3335 static inline int
dp_rx_srng_access_start(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)3336 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
3337 hal_ring_handle_t hal_ring_hdl)
3338 {
3339 return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
3340 }
3341
3342 static inline void
dp_rx_srng_access_end(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)3343 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
3344 hal_ring_handle_t hal_ring_hdl)
3345 {
3346 dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3347 }
3348 #endif
3349
3350 #ifdef RX_DESC_SANITY_WAR
3351 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
3352 hal_ring_handle_t hal_ring_hdl,
3353 hal_ring_desc_t ring_desc,
3354 struct dp_rx_desc *rx_desc);
3355 #else
3356 static inline
dp_rx_desc_sanity(struct dp_soc * soc,hal_soc_handle_t hal_soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)3357 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
3358 hal_ring_handle_t hal_ring_hdl,
3359 hal_ring_desc_t ring_desc,
3360 struct dp_rx_desc *rx_desc)
3361 {
3362 return QDF_STATUS_SUCCESS;
3363 }
3364 #endif
3365
3366 #ifdef RX_DESC_DEBUG_CHECK
3367 /**
3368 * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
3369 * corruption
3370 * @soc: DP SoC context
3371 * @ring_desc: REO ring descriptor
3372 * @rx_desc: Rx descriptor
3373 *
3374 * Return: NONE
3375 */
3376 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
3377 hal_ring_desc_t ring_desc,
3378 struct dp_rx_desc *rx_desc);
3379 #else
3380 static inline
dp_rx_desc_nbuf_sanity_check(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)3381 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
3382 hal_ring_desc_t ring_desc,
3383 struct dp_rx_desc *rx_desc)
3384 {
3385 return QDF_STATUS_SUCCESS;
3386 }
3387 #endif
3388 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3389
3390 /**
3391 * dp_rx_wbm_sg_list_reset() - Initialize sg list
3392 *
3393 * This api should be called at soc init and afterevery sg processing.
3394 *@soc: DP SOC handle
3395 */
dp_rx_wbm_sg_list_reset(struct dp_soc * soc)3396 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
3397 {
3398 if (soc) {
3399 soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
3400 soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
3401 soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
3402 soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
3403 }
3404 }
3405
3406 /**
3407 * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
3408 *
3409 * This api should be called in down path, to avoid any leak.
3410 *@soc: DP SOC handle
3411 */
dp_rx_wbm_sg_list_deinit(struct dp_soc * soc)3412 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
3413 {
3414 if (soc) {
3415 if (soc->wbm_sg_param.wbm_sg_nbuf_head)
3416 qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
3417
3418 dp_rx_wbm_sg_list_reset(soc);
3419 }
3420 }
3421
3422 /**
3423 * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
3424 * to refill
3425 * @soc: DP SOC handle
3426 * @buf_info: the last link desc buf info
3427 * @ring_buf_info: current buf address pointor including link desc
3428 *
3429 * Return: none.
3430 */
3431 void dp_rx_link_desc_refill_duplicate_check(
3432 struct dp_soc *soc,
3433 struct hal_buf_info *buf_info,
3434 hal_buff_addrinfo_t ring_buf_info);
3435 /**
3436 * dp_rx_srng_get_num_pending() - get number of pending entries
3437 * @hal_soc: hal soc opaque pointer
3438 * @hal_ring_hdl: opaque pointer to the HAL Rx Ring
3439 * @num_entries: number of entries in the hal_ring.
3440 * @near_full: pointer to a boolean. This is set if ring is near full.
3441 *
3442 * The function returns the number of entries in a destination ring which are
3443 * yet to be reaped. The function also checks if the ring is near full.
3444 * If more than half of the ring needs to be reaped, the ring is considered
3445 * approaching full.
3446 * The function uses hal_srng_dst_num_valid_locked to get the number of valid
3447 * entries. It should not be called within a SRNG lock. HW pointer value is
3448 * synced into cached_hp.
3449 *
3450 * Return: Number of pending entries if any
3451 */
3452 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
3453 hal_ring_handle_t hal_ring_hdl,
3454 uint32_t num_entries,
3455 bool *near_full);
3456
3457 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
3458 /**
3459 * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
3460 * @soc: Datapath soc structure
3461 * @ring_num: REO ring number
3462 * @ring_desc: REO ring descriptor
3463 *
3464 * Return: None
3465 */
3466 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
3467 hal_ring_desc_t ring_desc);
3468 #else
3469 static inline void
dp_rx_ring_record_entry(struct dp_soc * soc,uint8_t ring_num,hal_ring_desc_t ring_desc)3470 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
3471 hal_ring_desc_t ring_desc)
3472 {
3473 }
3474 #endif
3475
3476 #ifdef QCA_SUPPORT_WDS_EXTENDED
3477 /**
3478 * dp_rx_is_list_ready() - Make different lists for 4-address
3479 * and 3-address frames
3480 * @nbuf_head: skb list head
3481 * @vdev: vdev
3482 * @txrx_peer : txrx_peer
3483 * @peer_id: peer id of new received frame
3484 * @vdev_id: vdev_id of new received frame
3485 *
3486 * Return: true if peer_ids are different.
3487 */
3488 static inline bool
dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,uint16_t peer_id,uint8_t vdev_id)3489 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
3490 struct dp_vdev *vdev,
3491 struct dp_txrx_peer *txrx_peer,
3492 uint16_t peer_id,
3493 uint8_t vdev_id)
3494 {
3495 if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
3496 return true;
3497
3498 return false;
3499 }
3500
3501 /**
3502 * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
3503 * @soc: core txrx main context
3504 * @vdev: vdev
3505 * @txrx_peer: txrx peer
3506 * @nbuf_head: skb list head
3507 *
3508 * Return: true if packet is delivered to netdev per STA.
3509 */
3510 bool
3511 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
3512 struct dp_txrx_peer *txrx_peer,
3513 qdf_nbuf_t nbuf_head);
3514 #else
3515 static inline bool
dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,uint16_t peer_id,uint8_t vdev_id)3516 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
3517 struct dp_vdev *vdev,
3518 struct dp_txrx_peer *txrx_peer,
3519 uint16_t peer_id,
3520 uint8_t vdev_id)
3521 {
3522 if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
3523 return true;
3524
3525 return false;
3526 }
3527 #endif
3528
3529 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3530 /**
3531 * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
3532 * @pdev: pointer to dp_pdev structure
3533 * @rx_tlv: pointer to rx_pkt_tlvs structure
3534 * @nbuf: pointer to skb buffer
3535 *
3536 * Return: None
3537 */
3538 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
3539 uint8_t *rx_tlv,
3540 qdf_nbuf_t nbuf);
3541 #else
3542 static inline void
dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev * pdev,uint8_t * rx_tlv,qdf_nbuf_t nbuf)3543 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
3544 uint8_t *rx_tlv,
3545 qdf_nbuf_t nbuf)
3546 {
3547 }
3548 #endif
3549
3550 #else
3551 static inline QDF_STATUS
dp_rx_link_desc_return_by_addr(struct dp_soc * soc,hal_buff_addrinfo_t link_desc_addr,uint8_t bm_action)3552 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
3553 hal_buff_addrinfo_t link_desc_addr,
3554 uint8_t bm_action)
3555 {
3556 return QDF_STATUS_SUCCESS;
3557 }
3558
dp_rx_wbm_sg_list_reset(struct dp_soc * soc)3559 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
3560 {
3561 }
3562
dp_rx_wbm_sg_list_deinit(struct dp_soc * soc)3563 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
3564 {
3565 }
3566
3567 static inline uint32_t
dp_rxdma_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,uint32_t mac_id,uint32_t quota)3568 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3569 uint32_t mac_id, uint32_t quota)
3570 {
3571 return 0;
3572 }
3573 #endif /* WLAN_SOFTUMAC_SUPPORT */
3574
3575 #ifndef CONFIG_NBUF_AP_PLATFORM
3576 static inline uint8_t
dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer)3577 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
3578 struct dp_txrx_peer *txrx_peer)
3579 {
3580 return QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf);
3581 }
3582 #else
3583 static inline uint8_t
dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer)3584 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
3585 struct dp_txrx_peer *txrx_peer)
3586 {
3587 uint8_t link_id = 0;
3588
3589 link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1);
3590 if (link_id > DP_MAX_MLO_LINKS) {
3591 link_id = 0;
3592 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3593 rx.inval_link_id_pkt_cnt,
3594 1, link_id);
3595 }
3596
3597 return link_id;
3598 }
3599 #endif /* CONFIG_NBUF_AP_PLATFORM */
3600
3601 #endif /* _DP_RX_H */
3602