1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _DP_BE_RX_H_
21 #define _DP_BE_RX_H_
22
23 #include <dp_types.h>
24 #include "dp_be.h"
25 #include "dp_peer.h"
26 #include <dp_rx.h>
27 #include "hal_be_rx.h"
28 #include "hal_be_rx_tlv.h"
29
30 /*
31 * dp_be_intrabss_params
32 *
33 * @dest_soc: dest soc to forward the packet to
34 * @tx_vdev_id: vdev id retrieved from dest peer
35 */
36 struct dp_be_intrabss_params {
37 struct dp_soc *dest_soc;
38 uint8_t tx_vdev_id;
39 };
40
41 #ifndef QCA_HOST_MODE_WIFI_DISABLED
42
43 /**
44 * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
45 * pkt with DA not equal to vdev mac addr, fwd is not allowed.
46 * @soc: core txrx main context
47 * @ta_txrx_peer: source peer entry
48 * @rx_tlv_hdr: start address of rx tlvs
49 * @nbuf: nbuf that has to be intrabss forwarded
50 * @link_id: link id on which the packet is received
51 *
52 * Return: true if it is forwarded else false
53 */
54 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
55 struct dp_txrx_peer *ta_txrx_peer,
56 uint8_t *rx_tlv_hdr,
57 qdf_nbuf_t nbuf,
58 uint8_t link_id);
59 #endif
60
61 /**
62 * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
63 * @soc: core txrx main context
64 * @ta_txrx_peer: source txrx_peer entry
65 * @nbuf_copy: nbuf that has to be intrabss forwarded
66 * @tid_stats: tid_stats structure
67 * @link_id: link id on which the packet is received
68 *
69 * Return: true if it is forwarded else false
70 */
71 bool
72 dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
73 struct dp_txrx_peer *ta_txrx_peer,
74 qdf_nbuf_t nbuf_copy,
75 struct cdp_tid_rx_stats *tid_stats,
76 uint8_t link_id);
77
78 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
79 uint32_t *msg_word,
80 void *rx_filter);
81
82 /**
83 * dp_rx_process_be() - Brain of the Rx processing functionality
84 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
85 * @int_ctx: per interrupt context
86 * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
87 * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
88 * @quota: No. of units (packets) that can be serviced in one shot.
89 *
90 * This function implements the core of Rx functionality. This is
91 * expected to handle only non-error frames.
92 *
93 * Return: uint32_t: No. of elements processed
94 */
95 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
96 hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
97 uint32_t quota);
98
99 /**
100 * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
101 * @soc: Handle to DP Soc structure
102 * @rx_desc_pool: Rx descriptor pool handler
103 * @pool_id: Rx descriptor pool ID
104 *
105 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
106 */
107 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
108 struct rx_desc_pool *rx_desc_pool,
109 uint32_t pool_id);
110
111 /**
112 * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
113 * @soc: Handle to DP Soc structure
114 * @rx_desc_pool: Rx descriptor pool handler
115 * @pool_id: Rx descriptor pool ID
116 *
117 * Return: None
118 */
119 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
120 struct rx_desc_pool *rx_desc_pool,
121 uint32_t pool_id);
122
123 /**
124 * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
125 * address from WBM ring Desc
126 * @soc: Handle to DP Soc structure
127 * @ring_desc: ring descriptor structure pointer
128 * @r_rx_desc: pointer to a pointer of Rx Desc
129 *
130 * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
131 */
132 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
133 void *ring_desc,
134 struct dp_rx_desc **r_rx_desc);
135
136 /**
137 * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
138 * @soc:Handle to DP Soc structure
139 * @cookie: cookie used to lookup virtual address
140 *
141 * Return: Rx descriptor virtual address
142 */
143 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
144 uint32_t cookie);
145
146 #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
147 defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
148 /**
149 * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
150 * if not, do SW cookie conversion.
151 * @soc:Handle to DP Soc structure
152 * @rx_buf_cookie: RX desc cookie ID
153 * @r_rx_desc: double pointer for RX desc
154 *
155 * Return: None
156 */
157 static inline void
dp_rx_desc_sw_cc_check(struct dp_soc * soc,uint32_t rx_buf_cookie,struct dp_rx_desc ** r_rx_desc)158 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
159 uint32_t rx_buf_cookie,
160 struct dp_rx_desc **r_rx_desc)
161 {
162 if (qdf_unlikely(!(*r_rx_desc))) {
163 *r_rx_desc = (struct dp_rx_desc *)
164 dp_cc_desc_find(soc,
165 rx_buf_cookie);
166 }
167 }
168 #else
169 static inline void
dp_rx_desc_sw_cc_check(struct dp_soc * soc,uint32_t rx_buf_cookie,struct dp_rx_desc ** r_rx_desc)170 dp_rx_desc_sw_cc_check(struct dp_soc *soc,
171 uint32_t rx_buf_cookie,
172 struct dp_rx_desc **r_rx_desc)
173 {
174 }
175 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
176
177 struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
178 unsigned long cookie);
179
180 #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
181
182 #define HTT_RX_PEER_META_DATA_FIELD_GET(_var, _field_s, _field_m) \
183 (((_var) & (_field_m)) >> (_field_s))
184
185 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
186 static inline uint16_t
dp_rx_peer_metadata_peer_id_get_be(struct dp_soc * soc,uint32_t peer_metadata)187 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
188 {
189 uint8_t ml_peer_valid;
190 uint16_t peer_id;
191
192 peer_id = HTT_RX_PEER_META_DATA_FIELD_GET(peer_metadata,
193 soc->htt_peer_id_s,
194 soc->htt_peer_id_m);
195 ml_peer_valid = HTT_RX_PEER_META_DATA_FIELD_GET(
196 peer_metadata,
197 soc->htt_mld_peer_valid_s,
198 soc->htt_mld_peer_valid_m);
199
200 return (peer_id | (ml_peer_valid << soc->peer_id_shift));
201 }
202 #else
203 /* Combine ml_peer_valid and peer_id field */
204 #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
205 #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
206
207 static inline uint16_t
dp_rx_peer_metadata_peer_id_get_be(struct dp_soc * soc,uint32_t peer_metadata)208 dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
209 {
210 return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
211 DP_BE_PEER_METADATA_PEER_ID_SHIFT);
212 }
213 #endif
214
215 static inline uint16_t
dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc * soc,uint32_t peer_metadata)216 dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
217 {
218
219 return HTT_RX_PEER_META_DATA_FIELD_GET(peer_metadata,
220 soc->htt_vdev_id_s,
221 soc->htt_vdev_id_m);
222 }
223
224 static inline uint8_t
dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)225 dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
226 {
227 return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
228 }
229
230
231 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
232 /**
233 * dp_rx_nf_process() - Near Full state handler for RX rings.
234 * @int_ctx: interrupt context
235 * @hal_ring_hdl: Rx ring handle
236 * @reo_ring_num: RX ring number
237 * @quota: Quota of work to be done
238 *
239 * Return: work done in the handler
240 */
241 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
242 hal_ring_handle_t hal_ring_hdl,
243 uint8_t reo_ring_num,
244 uint32_t quota);
245 #else
246 static inline
dp_rx_nf_process(struct dp_intr * int_ctx,hal_ring_handle_t hal_ring_hdl,uint8_t reo_ring_num,uint32_t quota)247 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
248 hal_ring_handle_t hal_ring_hdl,
249 uint8_t reo_ring_num,
250 uint32_t quota)
251 {
252 return 0;
253 }
254 #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
255
256 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
257 struct dp_soc *
258 dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id);
259
260 struct dp_soc *
261 dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id);
262
263 uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc);
264 #else
265 static inline struct dp_soc *
dp_rx_replenish_soc_get(struct dp_soc * soc,uint8_t chip_id)266 dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
267 {
268 return soc;
269 }
270
271 static inline uint8_t
dp_soc_get_num_soc_be(struct dp_soc * soc)272 dp_soc_get_num_soc_be(struct dp_soc *soc)
273 {
274 return 1;
275 }
276 #endif
277
278 static inline QDF_STATUS
dp_peer_rx_reorder_q_setup_per_tid(struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)279 dp_peer_rx_reorder_q_setup_per_tid(struct dp_peer *peer,
280 uint32_t tid_bitmap,
281 uint32_t ba_window_size)
282 {
283 int tid;
284 struct dp_rx_tid *rx_tid;
285 struct dp_soc *soc = peer->vdev->pdev->soc;
286
287 if (!soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
288 dp_peer_debug("%pK: rx_reorder_queue_setup NULL bitmap 0x%x",
289 soc, tid_bitmap);
290 return QDF_STATUS_SUCCESS;
291 }
292
293 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
294 if (!(BIT(tid) & tid_bitmap))
295 continue;
296
297 rx_tid = &peer->rx_tid[tid];
298 if (!rx_tid->hw_qdesc_paddr) {
299 tid_bitmap &= ~BIT(tid);
300 continue;
301 }
302
303 if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
304 soc->ctrl_psoc,
305 peer->vdev->pdev->pdev_id,
306 peer->vdev->vdev_id,
307 peer->mac_addr.raw,
308 rx_tid->hw_qdesc_paddr,
309 tid, tid,
310 1, ba_window_size)) {
311 dp_peer_err("%pK: Fail to send reo q to FW. tid %d",
312 soc, tid);
313 return QDF_STATUS_E_FAILURE;
314 }
315 }
316
317 if (!tid_bitmap) {
318 dp_peer_err("tid_bitmap=0. All tids setup fail");
319 return QDF_STATUS_E_FAILURE;
320 }
321 return QDF_STATUS_SUCCESS;
322 }
323
324 static inline QDF_STATUS
dp_peer_multi_tid_params_setup(struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size,struct multi_rx_reorder_queue_setup_params * tid_params)325 dp_peer_multi_tid_params_setup(struct dp_peer *peer,
326 uint32_t tid_bitmap,
327 uint32_t ba_window_size,
328 struct multi_rx_reorder_queue_setup_params *tid_params)
329 {
330 struct dp_rx_tid *rx_tid;
331 int tid;
332
333 tid_params->peer_macaddr = peer->mac_addr.raw;
334 tid_params->tid_bitmap = tid_bitmap;
335 tid_params->vdev_id = peer->vdev->vdev_id;
336
337 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
338 if (!(BIT(tid) & tid_bitmap))
339 continue;
340
341 rx_tid = &peer->rx_tid[tid];
342 if (!rx_tid->hw_qdesc_paddr) {
343 tid_params->tid_bitmap &= ~BIT(tid);
344 continue;
345 }
346
347 tid_params->tid_num++;
348 tid_params->queue_params_list[tid].hw_qdesc_paddr =
349 rx_tid->hw_qdesc_paddr;
350 tid_params->queue_params_list[tid].queue_no = tid;
351 tid_params->queue_params_list[tid].ba_window_size_valid = 1;
352 tid_params->queue_params_list[tid].ba_window_size =
353 ba_window_size;
354 }
355
356 if (!tid_params->tid_bitmap) {
357 dp_peer_err("tid_bitmap=0. All tids setup fail");
358 return QDF_STATUS_E_FAILURE;
359 }
360
361 return QDF_STATUS_SUCCESS;
362 }
363
364 static inline QDF_STATUS
dp_peer_rx_reorder_multi_q_setup(struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)365 dp_peer_rx_reorder_multi_q_setup(struct dp_peer *peer,
366 uint32_t tid_bitmap,
367 uint32_t ba_window_size)
368 {
369 QDF_STATUS status;
370 struct dp_soc *soc = peer->vdev->pdev->soc;
371 struct multi_rx_reorder_queue_setup_params tid_params = {0};
372
373 if (!soc->cdp_soc.ol_ops->peer_multi_rx_reorder_queue_setup) {
374 dp_peer_debug("%pK: callback NULL", soc);
375 return QDF_STATUS_SUCCESS;
376 }
377
378 status = dp_peer_multi_tid_params_setup(peer, tid_bitmap,
379 ba_window_size,
380 &tid_params);
381 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status)))
382 return status;
383
384 if (soc->cdp_soc.ol_ops->peer_multi_rx_reorder_queue_setup(
385 soc->ctrl_psoc,
386 peer->vdev->pdev->pdev_id,
387 &tid_params)) {
388 dp_peer_err("%pK: multi_reorder_q_setup fail. tid_bitmap 0x%x",
389 soc, tid_bitmap);
390 return QDF_STATUS_E_FAILURE;
391 }
392
393 return QDF_STATUS_SUCCESS;
394 }
395
396 #ifdef WLAN_FEATURE_11BE_MLO
397 /**
398 * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
399 * @soc: Handle to DP Soc structure
400 * @vdev: DP vdev handle
401 * @peer: DP peer handle
402 * @nbuf: nbuf to be enqueued
403 * @link_id: link id on which the packet is received
404 *
405 * Return: true when packet sent to stack, false failure
406 */
407 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
408 struct dp_vdev *vdev,
409 struct dp_txrx_peer *peer,
410 qdf_nbuf_t nbuf,
411 uint8_t link_id);
412
413 /**
414 * dp_peer_rx_reorder_queue_setup_be() - Send reo queue
415 * setup wmi cmd to FW per peer type
416 * @soc: DP Soc handle
417 * @peer: dp peer to operate on
418 * @tid_bitmap: TIDs to be set up
419 * @ba_window_size: BlockAck window size
420 *
421 * Return: 0 - success, others - failure
422 */
423 static inline
dp_peer_rx_reorder_queue_setup_be(struct dp_soc * soc,struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)424 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
425 struct dp_peer *peer,
426 uint32_t tid_bitmap,
427 uint32_t ba_window_size)
428 {
429 uint8_t i;
430 struct dp_mld_link_peers link_peers_info;
431 struct dp_peer *link_peer;
432 struct dp_rx_tid *rx_tid;
433 int tid;
434 QDF_STATUS status;
435
436 if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
437 /* Some BE targets dont require WMI and use shared
438 * table managed by host for storing Reo queue ref structs
439 */
440 if (IS_MLO_DP_LINK_PEER(peer) ||
441 peer->peer_id == HTT_INVALID_PEER) {
442 /* Return if this is for MLD link peer and table
443 * is not used in MLD link peer case as MLD peer's
444 * qref is written to LUT in peer setup or peer map.
445 * At this point peer setup for link peer is called
446 * before peer map, hence peer id is not assigned.
447 * This could happen if peer_setup is called before
448 * host receives HTT peer map. In this case return
449 * success with no op and let peer map handle
450 * writing the reo_qref to LUT.
451 */
452 dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
453 return QDF_STATUS_SUCCESS;
454 }
455
456 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
457 if (!((1 << tid) & tid_bitmap))
458 continue;
459
460 rx_tid = &peer->rx_tid[tid];
461 if (!rx_tid->hw_qdesc_paddr) {
462 tid_bitmap &= ~BIT(tid);
463 continue;
464 }
465
466 hal_reo_shared_qaddr_write(soc->hal_soc,
467 peer->peer_id,
468 tid, peer->rx_tid[tid].
469 hw_qdesc_paddr);
470
471 if (!tid_bitmap) {
472 dp_peer_err("tid_bitmap=0. All tids setup fail");
473 return QDF_STATUS_E_FAILURE;
474 }
475 }
476
477 return QDF_STATUS_SUCCESS;
478 }
479
480 /* when (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) is true: */
481 if (IS_MLO_DP_MLD_PEER(peer)) {
482 /* get link peers with reference */
483 dp_get_link_peers_ref_from_mld_peer(soc, peer,
484 &link_peers_info,
485 DP_MOD_ID_CDP);
486 /* send WMI cmd to each link peers */
487 for (i = 0; i < link_peers_info.num_links; i++) {
488 link_peer = link_peers_info.link_peers[i];
489 if (soc->features.multi_rx_reorder_q_setup_support)
490 status = dp_peer_rx_reorder_multi_q_setup(
491 link_peer, tid_bitmap, ba_window_size);
492 else
493 status = dp_peer_rx_reorder_q_setup_per_tid(
494 link_peer,
495 tid_bitmap,
496 ba_window_size);
497 if (QDF_IS_STATUS_ERROR(status)) {
498 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
499 return status;
500 }
501 }
502 /* release link peers reference */
503 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
504 } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
505 if (soc->features.multi_rx_reorder_q_setup_support)
506 return dp_peer_rx_reorder_multi_q_setup(peer,
507 tid_bitmap,
508 ba_window_size);
509 else
510 return dp_peer_rx_reorder_q_setup_per_tid(peer,
511 tid_bitmap,
512 ba_window_size);
513 } else {
514 dp_peer_err("invalid peer type %d", peer->peer_type);
515
516 return QDF_STATUS_E_FAILURE;
517 }
518
519 return QDF_STATUS_SUCCESS;
520 }
521 #else
522 static inline
dp_peer_rx_reorder_queue_setup_be(struct dp_soc * soc,struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size)523 QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
524 struct dp_peer *peer,
525 uint32_t tid_bitmap,
526 uint32_t ba_window_size)
527 {
528 if (soc->features.multi_rx_reorder_q_setup_support)
529 return dp_peer_rx_reorder_multi_q_setup(peer,
530 tid_bitmap,
531 ba_window_size);
532 else
533 return dp_peer_rx_reorder_q_setup_per_tid(peer,
534 tid_bitmap,
535 ba_window_size);
536 }
537 #endif /* WLAN_FEATURE_11BE_MLO */
538
539 #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
540 static inline
dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf,qdf_nbuf_t next)541 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
542 {
543 if (next) {
544 /* prefetch skb->next and first few bytes of skb->cb */
545 qdf_prefetch(next);
546 /* skb->cb spread across 2 cache lines hence below prefetch */
547 qdf_prefetch(&next->_skb_refdst);
548 qdf_prefetch(&next->protocol);
549 qdf_prefetch(&next->data);
550 qdf_prefetch(next->data);
551 qdf_prefetch(next->data + 64);
552 }
553 }
554 #else
555 static inline
dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf,qdf_nbuf_t next)556 void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
557 {
558 }
559 #endif
560
561 #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
562 /**
563 * dp_rx_va_prefetch() - function to prefetch the SW desc
564 * @last_prefetched_hw_desc: HW desc
565 *
566 * Return: prefetched Rx descriptor virtual address
567 */
568 static inline
dp_rx_va_prefetch(void * last_prefetched_hw_desc)569 void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
570 {
571 void *prefetch_desc;
572
573 prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
574 qdf_prefetch(prefetch_desc);
575 return prefetch_desc;
576 }
577
578 /**
579 * dp_rx_prefetch_hw_sw_nbuf_32_byte_desc() - function to prefetch HW and SW
580 * descriptors
581 * @soc: DP soc context
582 * @hal_soc: Handle to HAL Soc structure
583 * @num_entries: valid number of HW descriptors
584 * @hal_ring_hdl: Destination ring pointer
585 * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
586 * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
587 *
588 * Return: None
589 */
590 static inline void
dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t num_entries,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t * last_prefetched_hw_desc,struct dp_rx_desc ** last_prefetched_sw_desc)591 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
592 hal_soc_handle_t hal_soc,
593 uint32_t num_entries,
594 hal_ring_handle_t hal_ring_hdl,
595 hal_ring_desc_t *last_prefetched_hw_desc,
596 struct dp_rx_desc **last_prefetched_sw_desc)
597 {
598 if (*last_prefetched_sw_desc) {
599 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
600 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
601 }
602
603 if (num_entries) {
604 *last_prefetched_sw_desc =
605 dp_rx_va_prefetch(*last_prefetched_hw_desc);
606
607 if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
608 *last_prefetched_hw_desc =
609 hal_srng_dst_prefetch_next_cached_desc(hal_soc,
610 hal_ring_hdl,
611 (uint8_t *)*last_prefetched_hw_desc);
612 else
613 *last_prefetched_hw_desc =
614 hal_srng_dst_get_next_32_byte_desc(hal_soc,
615 hal_ring_hdl,
616 (uint8_t *)*last_prefetched_hw_desc);
617 }
618 }
619 #else
620 static inline void
dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t num_entries,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t * last_prefetched_hw_desc,struct dp_rx_desc ** last_prefetched_sw_desc)621 dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
622 hal_soc_handle_t hal_soc,
623 uint32_t num_entries,
624 hal_ring_handle_t hal_ring_hdl,
625 hal_ring_desc_t *last_prefetched_hw_desc,
626 struct dp_rx_desc **last_prefetched_sw_desc)
627 {
628 }
629 #endif
630 #ifdef CONFIG_WORD_BASED_TLV
631 /**
632 * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
633 * entrance ring desc
634 *
635 * @hal_soc: Handle to HAL Soc structure
636 * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
637 * @buf: pointer to the start of RX PKT TLV headers
638 * @txrx_peer: pointer to txrx_peer
639 * @tid: tid value
640 *
641 * Return: qdesc address in reo destination ring buffer
642 */
643 static inline
dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,uint8_t * dst_ring_desc,uint8_t * buf,struct dp_txrx_peer * txrx_peer,unsigned int tid)644 uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
645 uint8_t *dst_ring_desc,
646 uint8_t *buf,
647 struct dp_txrx_peer *txrx_peer,
648 unsigned int tid)
649 {
650 struct dp_peer *peer = NULL;
651 uint64_t qdesc_addr = 0;
652
653 if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
654 qdesc_addr = (uint64_t)txrx_peer->peer_id;
655 } else {
656 peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
657 txrx_peer->peer_id,
658 DP_MOD_ID_CONFIG);
659 if (!peer)
660 return 0;
661
662 qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
663 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
664 }
665 return qdesc_addr;
666 }
667 #else
668 static inline
dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,uint8_t * dst_ring_desc,uint8_t * buf,struct dp_txrx_peer * txrx_peer,unsigned int tid)669 uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
670 uint8_t *dst_ring_desc,
671 uint8_t *buf,
672 struct dp_txrx_peer *txrx_peer,
673 unsigned int tid)
674 {
675 return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
676 }
677 #endif
678
679 /**
680 * dp_rx_wbm_err_reap_desc_be() - Function to reap and replenish
681 * WBM RX Error descriptors
682 *
683 * @int_ctx: pointer to DP interrupt context
684 * @soc: core DP main context
685 * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
686 * @quota: No. of units (packets) that can be serviced in one shot.
687 * @rx_bufs_used: No. of descriptors reaped
688 *
689 * This function implements the core Rx functionality like reap and
690 * replenish the RX error ring Descriptors, and create a nbuf list
691 * out of it. It also reads wbm error information from descriptors
692 * and update the nbuf tlv area.
693 *
694 * Return: qdf_nbuf_t: head pointer to the nbuf list created
695 */
696 qdf_nbuf_t
697 dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
698 hal_ring_handle_t hal_ring_hdl, uint32_t quota,
699 uint32_t *rx_bufs_used);
700
701 /**
702 * dp_rx_null_q_desc_handle_be() - Function to handle NULL Queue
703 * descriptor violation on either a
704 * REO or WBM ring
705 *
706 * @soc: core DP main context
707 * @nbuf: buffer pointer
708 * @rx_tlv_hdr: start of rx tlv header
709 * @pool_id: mac id
710 * @txrx_peer: txrx peer handle
711 * @is_reo_exception: flag to check if the error is from REO or WBM
712 * @link_id: link Id on which the packet is received
713 *
714 * This function handles NULL queue descriptor violations arising out
715 * a missing REO queue for a given peer or a given TID. This typically
716 * may happen if a packet is received on a QOS enabled TID before the
717 * ADDBA negotiation for that TID, when the TID queue is setup. Or
718 * it may also happen for MC/BC frames if they are not routed to the
719 * non-QOS TID queue, in the absence of any other default TID queue.
720 * This error can show up both in a REO destination or WBM release ring.
721 *
722 * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
723 * if nbuf could not be handled or dropped.
724 */
725 QDF_STATUS
726 dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
727 uint8_t *rx_tlv_hdr, uint8_t pool_id,
728 struct dp_txrx_peer *txrx_peer,
729 bool is_reo_exception, uint8_t link_id);
730
731 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
732 static inline void
dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf,uint32_t peer_mdata)733 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
734 {
735 uint8_t lmac_id;
736
737 lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
738 qdf_nbuf_set_lmac_id(nbuf, lmac_id);
739 }
740 #else
741 static inline void
dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf,uint32_t peer_mdata)742 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
743 {
744 }
745 #endif
746
747 #ifndef CONFIG_NBUF_AP_PLATFORM
748 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
749 static inline uint8_t
dp_rx_peer_mdata_link_id_get_be(uint32_t peer_mdata)750 dp_rx_peer_mdata_link_id_get_be(uint32_t peer_mdata)
751 {
752 uint8_t link_id;
753
754 link_id = HTT_RX_PEER_META_DATA_V1A_LOGICAL_LINK_ID_GET(peer_mdata) + 1;
755 if (link_id > DP_MAX_MLO_LINKS)
756 link_id = 0;
757
758 return link_id;
759 }
760 #else
761 static inline uint8_t
dp_rx_peer_mdata_link_id_get_be(uint32_t peer_metadata)762 dp_rx_peer_mdata_link_id_get_be(uint32_t peer_metadata)
763 {
764 return 0;
765 }
766 #endif /* DP_MLO_LINK_STATS_SUPPORT */
767
768 static inline void
dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)769 dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
770 {
771 QDF_NBUF_CB_RX_MPDU_SEQ_NUM(nbuf) =
772 hal_rx_mpdu_sequence_number_get_be(rx_tlv_hdr);
773 }
774
775 static inline void
dp_rx_set_link_id_be(qdf_nbuf_t nbuf,uint32_t peer_mdata)776 dp_rx_set_link_id_be(qdf_nbuf_t nbuf, uint32_t peer_mdata)
777 {
778 uint8_t logical_link_id;
779
780 logical_link_id = dp_rx_peer_mdata_link_id_get_be(peer_mdata);
781 QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf) = logical_link_id;
782 }
783
784 static inline uint16_t
dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)785 dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
786 {
787 return QDF_NBUF_CB_RX_PEER_ID(nbuf);
788 }
789
790 static inline void
dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,uint32_t mpdu_desc_info,uint32_t peer_mdata,uint32_t msdu_desc_info)791 dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
792 uint32_t mpdu_desc_info,
793 uint32_t peer_mdata,
794 uint32_t msdu_desc_info)
795 {
796 }
797
dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf,uint8_t reo_ring_num)798 static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
799 hal_ring_desc_t ring_desc,
800 qdf_nbuf_t nbuf,
801 uint8_t reo_ring_num)
802 {
803 struct hal_rx_mpdu_desc_info mpdu_desc_info;
804 struct hal_rx_msdu_desc_info msdu_desc_info;
805 uint8_t pkt_capture_offload = 0;
806 uint32_t peer_mdata = 0;
807
808 qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
809 qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
810
811 /* Get MPDU DESC info */
812 hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
813
814 /* Get MSDU DESC info */
815 hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
816
817 /* Set the end bit to identify the last buffer in MPDU */
818 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
819 qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
820
821 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
822 qdf_nbuf_set_rx_retry_flag(nbuf, 1);
823
824 if (qdf_unlikely(mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RAW_AMPDU))
825 qdf_nbuf_set_raw_frame(nbuf, 1);
826
827 peer_mdata = mpdu_desc_info.peer_meta_data;
828 QDF_NBUF_CB_RX_PEER_ID(nbuf) =
829 dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
830 QDF_NBUF_CB_RX_VDEV_ID(nbuf) =
831 dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
832 dp_rx_set_msdu_lmac_id(nbuf, peer_mdata);
833 dp_rx_set_link_id_be(nbuf, peer_mdata);
834
835 /* to indicate whether this msdu is rx offload */
836 pkt_capture_offload =
837 DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
838
839 /*
840 * save msdu flags first, last and continuation msdu in
841 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
842 * length to nbuf->cb. This ensures the info required for
843 * per pkt processing is always in the same cache line.
844 * This helps in improving throughput for smaller pkt
845 * sizes.
846 */
847 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
848 qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
849
850 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
851 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
852
853 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
854 qdf_nbuf_set_da_mcbc(nbuf, 1);
855
856 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
857 qdf_nbuf_set_da_valid(nbuf, 1);
858
859 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
860 qdf_nbuf_set_sa_valid(nbuf, 1);
861
862 if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
863 qdf_nbuf_set_intra_bss(nbuf, 1);
864
865 if (qdf_likely(mpdu_desc_info.mpdu_flags &
866 HAL_MPDU_F_QOS_CONTROL_VALID))
867 qdf_nbuf_set_tid_val(nbuf, mpdu_desc_info.tid);
868
869 /* set sw exception */
870 qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
871 nbuf,
872 hal_rx_sw_exception_get_be(ring_desc));
873
874 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_desc_info.msdu_len;
875
876 QDF_NBUF_CB_RX_CTX_ID(nbuf) = reo_ring_num;
877
878 return pkt_capture_offload;
879 }
880
hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)881 static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
882 uint8_t *rx_tlv_hdr)
883 {
884 return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr);
885 }
886
887 static inline uint8_t
dp_rx_wbm_err_msdu_continuation_get(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf)888 dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
889 hal_ring_desc_t ring_desc,
890 qdf_nbuf_t nbuf)
891 {
892 return hal_rx_wbm_err_msdu_continuation_get(soc->hal_soc,
893 ring_desc);
894 }
895 #else
896 static inline void
dp_rx_set_link_id_be(qdf_nbuf_t nbuf,uint32_t peer_mdata)897 dp_rx_set_link_id_be(qdf_nbuf_t nbuf, uint32_t peer_mdata)
898 {
899 }
900
901 static inline void
dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)902 dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
903 {
904 }
905
906 static inline uint16_t
dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)907 dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
908 {
909 uint32_t peer_metadata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
910
911 return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
912 DP_BE_PEER_METADATA_PEER_ID_SHIFT);
913 }
914
915 static inline void
dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,uint32_t mpdu_desc_info,uint32_t peer_mdata,uint32_t msdu_desc_info)916 dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
917 uint32_t mpdu_desc_info,
918 uint32_t peer_mdata,
919 uint32_t msdu_desc_info)
920 {
921 QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(nbuf) = mpdu_desc_info;
922 QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf) = peer_mdata;
923 QDF_NBUF_CB_RX_MSDU_DESC_INFO(nbuf) = msdu_desc_info;
924 }
925
dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf,uint8_t reo_ring_num)926 static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
927 hal_ring_desc_t ring_desc,
928 qdf_nbuf_t nbuf,
929 uint8_t reo_ring_num)
930 {
931 uint32_t mpdu_desc_info = 0;
932 uint32_t msdu_desc_info = 0;
933 uint32_t peer_mdata = 0;
934
935 /* get REO mpdu & msdu desc info */
936 hal_rx_get_mpdu_msdu_desc_info_be(ring_desc,
937 &mpdu_desc_info,
938 &peer_mdata,
939 &msdu_desc_info);
940
941 dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
942 mpdu_desc_info,
943 peer_mdata,
944 msdu_desc_info);
945
946 return 0;
947 }
948
hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)949 static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
950 uint8_t *rx_tlv_hdr)
951 {
952 return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0;
953 }
954
955 static inline uint8_t
dp_rx_wbm_err_msdu_continuation_get(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf)956 dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
957 hal_ring_desc_t ring_desc,
958 qdf_nbuf_t nbuf)
959 {
960 return qdf_nbuf_is_rx_chfrag_cont(nbuf);
961 }
962 #endif /* CONFIG_NBUF_AP_PLATFORM */
963
964 /**
965 * dp_rx_wbm_err_copy_desc_info_in_nbuf(): API to copy WBM dest ring
966 * descriptor information in nbuf CB/TLV
967 *
968 * @soc: pointer to Soc structure
969 * @ring_desc: wbm dest ring descriptor
970 * @nbuf: nbuf to save descriptor information
971 * @pool_id: pool id part of wbm error info
972 *
973 * Return: wbm error information details
974 */
975 static inline uint32_t
dp_rx_wbm_err_copy_desc_info_in_nbuf(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf,uint8_t pool_id)976 dp_rx_wbm_err_copy_desc_info_in_nbuf(struct dp_soc *soc,
977 hal_ring_desc_t ring_desc,
978 qdf_nbuf_t nbuf,
979 uint8_t pool_id)
980 {
981 uint32_t mpdu_desc_info = 0;
982 uint32_t msdu_desc_info = 0;
983 uint32_t peer_mdata = 0;
984 union hal_wbm_err_info_u wbm_err = { 0 };
985
986 /* get WBM mpdu & msdu desc info */
987 hal_rx_wbm_err_mpdu_msdu_info_get_be(ring_desc,
988 &wbm_err.info,
989 &mpdu_desc_info,
990 &msdu_desc_info,
991 &peer_mdata);
992
993 wbm_err.info_bit.pool_id = pool_id;
994 dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
995 mpdu_desc_info,
996 peer_mdata,
997 msdu_desc_info);
998 dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err);
999 return wbm_err.info;
1000 }
1001
1002 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
1003 struct dp_soc *
1004 dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id);
1005 #else
1006 static inline struct dp_soc *
dp_get_soc_by_chip_id_be(struct dp_soc * soc,uint8_t chip_id)1007 dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
1008 {
1009 return soc;
1010 }
1011 #endif
1012 #endif
1013