1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include "hal_api.h"
27 #include "qdf_trace.h"
28 #include "qdf_nbuf.h"
29 #include "dp_rx_defrag.h"
30 #include "dp_ipa.h"
31 #include "dp_internal.h"
32 #ifdef WIFI_MONITOR_SUPPORT
33 #include "dp_htt.h"
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <enet.h> /* LLC_SNAP_HDR_LEN */
40 #include "qdf_net_types.h"
41 #include "dp_rx_buffer_pool.h"
42
43 #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
44 #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
45 #define dp_rx_err_info(params...) \
46 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
47 #define dp_rx_err_info_rl(params...) \
48 __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
49 #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
50
51 #ifndef QCA_HOST_MODE_WIFI_DISABLED
52
53
54 /* Max regular Rx packet routing error */
55 #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
56 #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
57 #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
58
59 #ifdef FEATURE_MEC
dp_rx_mcast_echo_check(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)60 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
61 struct dp_txrx_peer *txrx_peer,
62 uint8_t *rx_tlv_hdr,
63 qdf_nbuf_t nbuf)
64 {
65 struct dp_vdev *vdev = txrx_peer->vdev;
66 struct dp_pdev *pdev = vdev->pdev;
67 struct dp_mec_entry *mecentry = NULL;
68 struct dp_ast_entry *ase = NULL;
69 uint16_t sa_idx = 0;
70 uint8_t *data;
71 /*
72 * Multicast Echo Check is required only if vdev is STA and
73 * received pkt is a multicast/broadcast pkt. otherwise
74 * skip the MEC check.
75 */
76 if (vdev->opmode != wlan_op_mode_sta)
77 return false;
78 if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
79 return false;
80
81 data = qdf_nbuf_data(nbuf);
82
83 /*
84 * if the received pkts src mac addr matches with vdev
85 * mac address then drop the pkt as it is looped back
86 */
87 if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
88 vdev->mac_addr.raw,
89 QDF_MAC_ADDR_SIZE)))
90 return true;
91
92 /*
93 * In case of qwrap isolation mode, donot drop loopback packets.
94 * In isolation mode, all packets from the wired stations need to go
95 * to rootap and loop back to reach the wireless stations and
96 * vice-versa.
97 */
98 if (qdf_unlikely(vdev->isolation_vdev))
99 return false;
100
101 /*
102 * if the received pkts src mac addr matches with the
103 * wired PCs MAC addr which is behind the STA or with
104 * wireless STAs MAC addr which are behind the Repeater,
105 * then drop the pkt as it is looped back
106 */
107 if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
108 sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
109
110 if ((sa_idx < 0) ||
111 (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
112 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
113 "invalid sa_idx: %d", sa_idx);
114 qdf_assert_always(0);
115 }
116
117 qdf_spin_lock_bh(&soc->ast_lock);
118 ase = soc->ast_table[sa_idx];
119
120 /*
121 * this check was not needed since MEC is not dependent on AST,
122 * but if we dont have this check SON has some issues in
123 * dual backhaul scenario. in APS SON mode, client connected
124 * to RE 2G and sends multicast packets. the RE sends it to CAP
125 * over 5G backhaul. the CAP loopback it on 2G to RE.
126 * On receiving in 2G STA vap, we assume that client has roamed
127 * and kickout the client.
128 */
129 if (ase && (ase->peer_id != txrx_peer->peer_id)) {
130 qdf_spin_unlock_bh(&soc->ast_lock);
131 goto drop;
132 }
133
134 qdf_spin_unlock_bh(&soc->ast_lock);
135 }
136
137 qdf_spin_lock_bh(&soc->mec_lock);
138
139 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
140 &data[QDF_MAC_ADDR_SIZE]);
141 if (!mecentry) {
142 qdf_spin_unlock_bh(&soc->mec_lock);
143 return false;
144 }
145
146 qdf_spin_unlock_bh(&soc->mec_lock);
147
148 drop:
149 dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
150 soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
151
152 return true;
153 }
154 #endif
155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
156
dp_rx_link_desc_refill_duplicate_check(struct dp_soc * soc,struct hal_buf_info * buf_info,hal_buff_addrinfo_t ring_buf_info)157 void dp_rx_link_desc_refill_duplicate_check(
158 struct dp_soc *soc,
159 struct hal_buf_info *buf_info,
160 hal_buff_addrinfo_t ring_buf_info)
161 {
162 struct hal_buf_info current_link_desc_buf_info = { 0 };
163
164 /* do duplicate link desc address check */
165 hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
166 ¤t_link_desc_buf_info);
167
168 /*
169 * TODO - Check if the hal soc api call can be removed
170 * since the cookie is just used for print.
171 * buffer_addr_info is the first element of ring_desc
172 */
173 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
174 (uint32_t *)ring_buf_info,
175 ¤t_link_desc_buf_info);
176
177 if (qdf_unlikely(current_link_desc_buf_info.paddr ==
178 buf_info->paddr)) {
179 dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
180 current_link_desc_buf_info.paddr,
181 current_link_desc_buf_info.sw_cookie);
182 DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
183 }
184 *buf_info = current_link_desc_buf_info;
185 }
186
187 QDF_STATUS
dp_rx_link_desc_return_by_addr(struct dp_soc * soc,hal_buff_addrinfo_t link_desc_addr,uint8_t bm_action)188 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
189 hal_buff_addrinfo_t link_desc_addr,
190 uint8_t bm_action)
191 {
192 struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
193 hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
194 hal_soc_handle_t hal_soc = soc->hal_soc;
195 QDF_STATUS status = QDF_STATUS_E_FAILURE;
196 void *src_srng_desc;
197
198 if (!wbm_rel_srng) {
199 dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
200 return status;
201 }
202
203 /* do duplicate link desc address check */
204 dp_rx_link_desc_refill_duplicate_check(
205 soc,
206 &soc->last_op_info.wbm_rel_link_desc,
207 link_desc_addr);
208
209 if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
210
211 /* TODO */
212 /*
213 * Need API to convert from hal_ring pointer to
214 * Ring Type / Ring Id combo
215 */
216 dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
217 soc, wbm_rel_srng);
218 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
219 goto done;
220 }
221 src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
222 if (qdf_likely(src_srng_desc)) {
223 /* Return link descriptor through WBM ring (SW2WBM)*/
224 hal_rx_msdu_link_desc_set(hal_soc,
225 src_srng_desc, link_desc_addr, bm_action);
226 status = QDF_STATUS_SUCCESS;
227 } else {
228 struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
229
230 DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
231
232 dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
233 srng->ring_id,
234 soc->stats.rx.err.hal_ring_access_full_fail);
235 dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
236 *srng->u.src_ring.hp_addr,
237 srng->u.src_ring.reap_hp,
238 *srng->u.src_ring.tp_addr,
239 srng->u.src_ring.cached_tp);
240 QDF_BUG(0);
241 }
242 done:
243 hal_srng_access_end(hal_soc, wbm_rel_srng);
244 return status;
245
246 }
247
248 qdf_export_symbol(dp_rx_link_desc_return_by_addr);
249
250 QDF_STATUS
dp_rx_link_desc_return(struct dp_soc * soc,hal_ring_desc_t ring_desc,uint8_t bm_action)251 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
252 uint8_t bm_action)
253 {
254 void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
255
256 return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
257 }
258
259 #ifndef QCA_HOST_MODE_WIFI_DISABLED
260
261 /**
262 * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
263 *
264 * @soc: core txrx main context
265 * @ring_desc: opaque pointer to the REO error ring descriptor
266 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
267 * @mac_id: mac ID
268 * @quota: No. of units (packets) that can be serviced in one shot.
269 *
270 * This function is used to drop all MSDU in an MPDU
271 *
272 * Return: uint32_t: No. of elements processed
273 */
274 static uint32_t
dp_rx_msdus_drop(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint8_t * mac_id,uint32_t quota)275 dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
276 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
277 uint8_t *mac_id,
278 uint32_t quota)
279 {
280 uint32_t rx_bufs_used = 0;
281 void *link_desc_va;
282 struct hal_buf_info buf_info;
283 struct dp_pdev *pdev;
284 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
285 int i;
286 uint8_t *rx_tlv_hdr;
287 uint32_t tid;
288 struct rx_desc_pool *rx_desc_pool;
289 struct dp_rx_desc *rx_desc;
290 /* First field in REO Dst ring Desc is buffer_addr_info */
291 void *buf_addr_info = ring_desc;
292 struct buffer_addr_info cur_link_desc_addr_info = { 0 };
293 struct buffer_addr_info next_link_desc_addr_info = { 0 };
294
295 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
296
297 /* buffer_addr_info is the first element of ring_desc */
298 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
299 (uint32_t *)ring_desc,
300 &buf_info);
301
302 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
303 if (!link_desc_va) {
304 dp_rx_err_debug("link desc va is null, soc %pk", soc);
305 return rx_bufs_used;
306 }
307
308 more_msdu_link_desc:
309 /* No UNMAP required -- this is "malloc_consistent" memory */
310 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
311 &mpdu_desc_info->msdu_count);
312
313 for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
314 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
315 soc, msdu_list.sw_cookie[i]);
316
317 qdf_assert_always(rx_desc);
318
319 /* all buffers from a MSDU link link belong to same pdev */
320 *mac_id = rx_desc->pool_id;
321 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
322 if (!pdev) {
323 dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
324 soc, rx_desc->pool_id);
325 return rx_bufs_used;
326 }
327
328 if (!dp_rx_desc_check_magic(rx_desc)) {
329 dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
330 soc, msdu_list.sw_cookie[i]);
331 return rx_bufs_used;
332 }
333
334 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
335 dp_ipa_rx_buf_smmu_mapping_lock(soc);
336 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
337 rx_desc->unmapped = 1;
338 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
339
340 rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
341
342 rx_bufs_used++;
343 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
344 rx_desc->rx_buf_start);
345 dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
346 soc, tid);
347
348 rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
349 if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
350 hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
351
352 dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
353 rx_desc->nbuf,
354 QDF_TX_RX_STATUS_DROP, true);
355 /* Just free the buffers */
356 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
357
358 dp_rx_add_to_free_desc_list(&pdev->free_list_head,
359 &pdev->free_list_tail, rx_desc);
360 }
361
362 /*
363 * If the msdu's are spread across multiple link-descriptors,
364 * we cannot depend solely on the msdu_count(e.g., if msdu is
365 * spread across multiple buffers).Hence, it is
366 * necessary to check the next link_descriptor and release
367 * all the msdu's that are part of it.
368 */
369 hal_rx_get_next_msdu_link_desc_buf_addr_info(
370 link_desc_va,
371 &next_link_desc_addr_info);
372
373 if (hal_rx_is_buf_addr_info_valid(
374 &next_link_desc_addr_info)) {
375 /* Clear the next link desc info for the current link_desc */
376 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
377
378 dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
379 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
380 hal_rx_buffer_addr_info_get_paddr(
381 &next_link_desc_addr_info,
382 &buf_info);
383 /* buffer_addr_info is the first element of ring_desc */
384 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
385 (uint32_t *)&next_link_desc_addr_info,
386 &buf_info);
387 cur_link_desc_addr_info = next_link_desc_addr_info;
388 buf_addr_info = &cur_link_desc_addr_info;
389
390 link_desc_va =
391 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
392
393 goto more_msdu_link_desc;
394 }
395 quota--;
396 dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
397 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
398 return rx_bufs_used;
399 }
400
401 /**
402 * dp_rx_pn_error_handle() - Handles PN check errors
403 *
404 * @soc: core txrx main context
405 * @ring_desc: opaque pointer to the REO error ring descriptor
406 * @mpdu_desc_info: MPDU descriptor information from ring descriptor
407 * @mac_id: mac ID
408 * @quota: No. of units (packets) that can be serviced in one shot.
409 *
410 * This function implements PN error handling
411 * If the peer is configured to ignore the PN check errors
412 * or if DP feels, that this frame is still OK, the frame can be
413 * re-injected back to REO to use some of the other features
414 * of REO e.g. duplicate detection/routing to other cores
415 *
416 * Return: uint32_t: No. of elements processed
417 */
418 static uint32_t
dp_rx_pn_error_handle(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint8_t * mac_id,uint32_t quota)419 dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
420 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
421 uint8_t *mac_id,
422 uint32_t quota)
423 {
424 uint16_t peer_id;
425 uint32_t rx_bufs_used = 0;
426 struct dp_txrx_peer *txrx_peer;
427 bool peer_pn_policy = false;
428 dp_txrx_ref_handle txrx_ref_handle = NULL;
429
430 peer_id = dp_rx_peer_metadata_peer_id_get(soc,
431 mpdu_desc_info->peer_meta_data);
432
433
434 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
435 &txrx_ref_handle,
436 DP_MOD_ID_RX_ERR);
437
438 if (qdf_likely(txrx_peer)) {
439 /*
440 * TODO: Check for peer specific policies & set peer_pn_policy
441 */
442 dp_err_rl("discard rx due to PN error for peer %pK",
443 txrx_peer);
444
445 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
446 }
447 dp_rx_err_err("%pK: Packet received with PN error", soc);
448
449 /* No peer PN policy -- definitely drop */
450 if (!peer_pn_policy)
451 rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
452 mpdu_desc_info,
453 mac_id, quota);
454
455 return rx_bufs_used;
456 }
457
458 #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
459 /**
460 * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
461 * @soc: Datapath soc handler
462 * @txrx_peer: pointer to DP peer
463 * @nbuf: pointer to the skb of RX frame
464 * @frame_mask: the mask for special frame needed
465 * @rx_tlv_hdr: start of rx tlv header
466 *
467 * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
468 * single nbuf is expected.
469 *
470 * return: true - nbuf has been delivered to stack, false - not.
471 */
472 static bool
dp_rx_deliver_oor_frame(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint32_t frame_mask,uint8_t * rx_tlv_hdr)473 dp_rx_deliver_oor_frame(struct dp_soc *soc,
474 struct dp_txrx_peer *txrx_peer,
475 qdf_nbuf_t nbuf, uint32_t frame_mask,
476 uint8_t *rx_tlv_hdr)
477 {
478 uint32_t l2_hdr_offset = 0;
479 uint16_t msdu_len = 0;
480 uint32_t skip_len;
481
482 l2_hdr_offset =
483 hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
484
485 if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
486 skip_len = l2_hdr_offset;
487 } else {
488 msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
489 skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
490 qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
491 }
492
493 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
494 dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
495 qdf_nbuf_pull_head(nbuf, skip_len);
496 qdf_nbuf_set_exc_frame(nbuf, 1);
497
498 dp_info_rl("OOR frame, mpdu sn 0x%x",
499 hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
500 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
501 return true;
502 }
503
504 #else
505 static bool
dp_rx_deliver_oor_frame(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,uint32_t frame_mask,uint8_t * rx_tlv_hdr)506 dp_rx_deliver_oor_frame(struct dp_soc *soc,
507 struct dp_txrx_peer *txrx_peer,
508 qdf_nbuf_t nbuf, uint32_t frame_mask,
509 uint8_t *rx_tlv_hdr)
510 {
511 return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
512 rx_tlv_hdr);
513 }
514 #endif
515
516 /**
517 * dp_rx_oor_handle() - Handles the msdu which is OOR error
518 *
519 * @soc: core txrx main context
520 * @nbuf: pointer to msdu skb
521 * @peer_id: dp peer ID
522 * @rx_tlv_hdr: start of rx tlv header
523 *
524 * This function process the msdu delivered from REO2TCL
525 * ring with error type OOR
526 *
527 * Return: None
528 */
529 static void
dp_rx_oor_handle(struct dp_soc * soc,qdf_nbuf_t nbuf,uint16_t peer_id,uint8_t * rx_tlv_hdr)530 dp_rx_oor_handle(struct dp_soc *soc,
531 qdf_nbuf_t nbuf,
532 uint16_t peer_id,
533 uint8_t *rx_tlv_hdr)
534 {
535 uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx);
536
537 struct dp_txrx_peer *txrx_peer = NULL;
538 dp_txrx_ref_handle txrx_ref_handle = NULL;
539
540 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
541 &txrx_ref_handle,
542 DP_MOD_ID_RX_ERR);
543 if (!txrx_peer) {
544 dp_info_rl("peer not found");
545 goto free_nbuf;
546 }
547
548 if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
549 rx_tlv_hdr)) {
550 DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
551 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
552 return;
553 }
554
555 free_nbuf:
556 if (txrx_peer)
557 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
558
559 DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
560 dp_rx_nbuf_free(nbuf);
561 }
562
563 /**
564 * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
565 * is a monotonous increment of packet number
566 * from the previous successfully re-ordered
567 * frame.
568 * @soc: Datapath SOC handle
569 * @ring_desc: REO ring descriptor
570 * @nbuf: Current packet
571 *
572 * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
573 */
574 static inline QDF_STATUS
dp_rx_err_nbuf_pn_check(struct dp_soc * soc,hal_ring_desc_t ring_desc,qdf_nbuf_t nbuf)575 dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
576 qdf_nbuf_t nbuf)
577 {
578 uint64_t prev_pn, curr_pn[2];
579
580 if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
581 return QDF_STATUS_SUCCESS;
582
583 hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
584 hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
585
586 if (curr_pn[0] > prev_pn)
587 return QDF_STATUS_SUCCESS;
588
589 return QDF_STATUS_E_FAILURE;
590 }
591
592 #ifdef WLAN_SKIP_BAR_UPDATE
593 static
dp_rx_err_handle_bar(struct dp_soc * soc,struct dp_peer * peer,qdf_nbuf_t nbuf)594 void dp_rx_err_handle_bar(struct dp_soc *soc,
595 struct dp_peer *peer,
596 qdf_nbuf_t nbuf)
597 {
598 dp_info_rl("BAR update to H.W is skipped");
599 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
600 }
601 #else
602 static
dp_rx_err_handle_bar(struct dp_soc * soc,struct dp_peer * peer,qdf_nbuf_t nbuf)603 void dp_rx_err_handle_bar(struct dp_soc *soc,
604 struct dp_peer *peer,
605 qdf_nbuf_t nbuf)
606 {
607 uint8_t *rx_tlv_hdr;
608 unsigned char type, subtype;
609 uint16_t start_seq_num;
610 uint32_t tid;
611 QDF_STATUS status;
612 struct ieee80211_frame_bar *bar;
613
614 /*
615 * 1. Is this a BAR frame. If not Discard it.
616 * 2. If it is, get the peer id, tid, ssn
617 * 2a Do a tid update
618 */
619
620 rx_tlv_hdr = qdf_nbuf_data(nbuf);
621 bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
622
623 type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
624 subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
625
626 if (!(type == IEEE80211_FC0_TYPE_CTL &&
627 subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
628 dp_err_rl("Not a BAR frame!");
629 return;
630 }
631
632 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
633 qdf_assert_always(tid < DP_MAX_TIDS);
634
635 start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
636
637 dp_info_rl("tid %u window_size %u start_seq_num %u",
638 tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
639
640 status = dp_rx_tid_update_wifi3(peer, tid,
641 peer->rx_tid[tid].ba_win_size,
642 start_seq_num,
643 true);
644 if (status != QDF_STATUS_SUCCESS) {
645 dp_err_rl("failed to handle bar frame update rx tid");
646 DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
647 } else {
648 DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
649 }
650 }
651 #endif
652
653 /**
654 * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
655 * @soc: Datapath SoC handle
656 * @nbuf: packet being processed
657 * @mpdu_desc_info: mpdu desc info for the current packet
658 * @tid: tid on which the packet arrived
659 * @err_status: Flag to indicate if REO encountered an error while routing this
660 * frame
661 * @error_code: REO error code
662 *
663 * Return: None
664 */
665 static void
_dp_rx_bar_frame_handle(struct dp_soc * soc,qdf_nbuf_t nbuf,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint32_t tid,uint8_t err_status,uint32_t error_code)666 _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
667 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
668 uint32_t tid, uint8_t err_status, uint32_t error_code)
669 {
670 uint16_t peer_id;
671 struct dp_peer *peer;
672
673 peer_id = dp_rx_peer_metadata_peer_id_get(soc,
674 mpdu_desc_info->peer_meta_data);
675 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
676 if (!peer)
677 return;
678
679 dp_info_rl("BAR frame: "
680 " peer_id = %d"
681 " tid = %u"
682 " SSN = %d"
683 " error status = %d",
684 peer->peer_id,
685 tid,
686 mpdu_desc_info->mpdu_seq,
687 err_status);
688
689 if (err_status == HAL_REO_ERROR_DETECTED) {
690 switch (error_code) {
691 case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
692 case HAL_REO_ERR_BAR_FRAME_OOR:
693 dp_rx_err_handle_bar(soc, peer, nbuf);
694 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
695 break;
696 default:
697 DP_STATS_INC(soc, rx.bar_frame, 1);
698 }
699 }
700
701 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
702 }
703
704 /**
705 * dp_rx_bar_frame_handle() - Function to handle err BAR frames
706 * @soc: core DP main context
707 * @ring_desc: Hal ring desc
708 * @rx_desc: dp rx desc
709 * @mpdu_desc_info: mpdu desc info
710 * @err_status: error status
711 * @err_code: error code
712 *
713 * Handle the error BAR frames received. Ensure the SOC level
714 * stats are updated based on the REO error code. The BAR frames
715 * are further processed by updating the Rx tids with the start
716 * sequence number (SSN) and BA window size. Desc is returned
717 * to the free desc list
718 *
719 * Return: none
720 */
721 static void
dp_rx_bar_frame_handle(struct dp_soc * soc,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,uint8_t err_status,uint32_t err_code)722 dp_rx_bar_frame_handle(struct dp_soc *soc,
723 hal_ring_desc_t ring_desc,
724 struct dp_rx_desc *rx_desc,
725 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
726 uint8_t err_status,
727 uint32_t err_code)
728 {
729 qdf_nbuf_t nbuf;
730 struct dp_pdev *pdev;
731 struct rx_desc_pool *rx_desc_pool;
732 uint8_t *rx_tlv_hdr;
733 uint32_t tid;
734
735 nbuf = rx_desc->nbuf;
736 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
737 dp_ipa_rx_buf_smmu_mapping_lock(soc);
738 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
739 rx_desc->unmapped = 1;
740 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
741 rx_tlv_hdr = qdf_nbuf_data(nbuf);
742 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
743 rx_tlv_hdr);
744 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
745
746 if (!pdev) {
747 dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
748 soc, rx_desc->pool_id);
749 return;
750 }
751
752 _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
753 err_code);
754 dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
755 QDF_TX_RX_STATUS_DROP, true);
756 dp_rx_link_desc_return(soc, ring_desc,
757 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
758 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
759 rx_desc->pool_id);
760 dp_rx_add_to_free_desc_list(&pdev->free_list_head,
761 &pdev->free_list_tail,
762 rx_desc);
763 }
764
765 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
766
dp_2k_jump_handle(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,uint16_t peer_id,uint8_t tid)767 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
768 uint16_t peer_id, uint8_t tid)
769 {
770 struct dp_peer *peer = NULL;
771 struct dp_rx_tid *rx_tid = NULL;
772 struct dp_txrx_peer *txrx_peer;
773 uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
774
775 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
776 if (!peer) {
777 dp_rx_err_info_rl("%pK: peer not found", soc);
778 goto free_nbuf;
779 }
780
781 txrx_peer = dp_get_txrx_peer(peer);
782 if (!txrx_peer) {
783 dp_rx_err_info_rl("%pK: txrx_peer not found", soc);
784 goto free_nbuf;
785 }
786
787 if (tid >= DP_MAX_TIDS) {
788 dp_info_rl("invalid tid");
789 goto nbuf_deliver;
790 }
791
792 rx_tid = &peer->rx_tid[tid];
793 qdf_spin_lock_bh(&rx_tid->tid_lock);
794
795 /* only if BA session is active, allow send Delba */
796 if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
797 qdf_spin_unlock_bh(&rx_tid->tid_lock);
798 goto nbuf_deliver;
799 }
800
801 if (!rx_tid->delba_tx_status) {
802 rx_tid->delba_tx_retry++;
803 rx_tid->delba_tx_status = 1;
804 rx_tid->delba_rcode =
805 IEEE80211_REASON_QOS_SETUP_REQUIRED;
806 qdf_spin_unlock_bh(&rx_tid->tid_lock);
807 if (soc->cdp_soc.ol_ops->send_delba) {
808 DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
809 1);
810 soc->cdp_soc.ol_ops->send_delba(
811 peer->vdev->pdev->soc->ctrl_psoc,
812 peer->vdev->vdev_id,
813 peer->mac_addr.raw,
814 tid,
815 rx_tid->delba_rcode,
816 CDP_DELBA_2K_JUMP);
817 }
818 } else {
819 qdf_spin_unlock_bh(&rx_tid->tid_lock);
820 }
821
822 nbuf_deliver:
823 if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
824 rx_tlv_hdr)) {
825 DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
826 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
827 return;
828 }
829
830 free_nbuf:
831 if (peer)
832 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
833 DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
834 dp_rx_nbuf_free(nbuf);
835 }
836
837 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
838 defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
839 bool
dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc * soc,uint8_t pool_id,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)840 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
841 uint8_t pool_id,
842 uint8_t *rx_tlv_hdr,
843 qdf_nbuf_t nbuf)
844 {
845 struct dp_peer *peer = NULL;
846 uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
847 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
848 struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
849
850 if (!pdev) {
851 dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
852 soc, pool_id);
853 return false;
854 }
855 /*
856 * WAR- In certain types of packets if peer_id is not correct then
857 * driver may not be able find. Try finding peer by addr_2 of
858 * received MPDU
859 */
860 if (wh)
861 peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
862 DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
863 if (peer) {
864 dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
865 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
866 QDF_TRACE_LEVEL_DEBUG);
867 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
868 1, qdf_nbuf_len(nbuf));
869 dp_rx_nbuf_free(nbuf);
870
871 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
872 return true;
873 }
874 return false;
875 }
876 #else
877 bool
dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc * soc,uint8_t pool_id,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)878 dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
879 uint8_t pool_id,
880 uint8_t *rx_tlv_hdr,
881 qdf_nbuf_t nbuf)
882 {
883 return false;
884 }
885 #endif
886
dp_rx_check_pkt_len(struct dp_soc * soc,uint32_t pkt_len)887 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
888 {
889 uint16_t buf_size;
890
891 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
892
893 if (qdf_unlikely(pkt_len > buf_size)) {
894 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
895 1, pkt_len);
896 return true;
897 } else {
898 return false;
899 }
900 }
901
902 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
903 void
dp_rx_deliver_to_osif_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,qdf_nbuf_t tail,bool is_eapol)904 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
905 struct dp_vdev *vdev,
906 struct dp_txrx_peer *txrx_peer,
907 qdf_nbuf_t nbuf,
908 qdf_nbuf_t tail,
909 bool is_eapol)
910 {
911 if (is_eapol && soc->eapol_over_control_port)
912 dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
913 else
914 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
915 }
916 #else
917 void
dp_rx_deliver_to_osif_stack(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf,qdf_nbuf_t tail,bool is_eapol)918 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
919 struct dp_vdev *vdev,
920 struct dp_txrx_peer *txrx_peer,
921 qdf_nbuf_t nbuf,
922 qdf_nbuf_t tail,
923 bool is_eapol)
924 {
925 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
926 }
927 #endif
928
929 #ifdef WLAN_FEATURE_11BE_MLO
dp_rx_err_match_dhost(qdf_ether_header_t * eh,struct dp_vdev * vdev)930 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
931 {
932 return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
933 QDF_MAC_ADDR_SIZE) == 0) ||
934 (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
935 QDF_MAC_ADDR_SIZE) == 0));
936 }
937
938 #else
dp_rx_err_match_dhost(qdf_ether_header_t * eh,struct dp_vdev * vdev)939 int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
940 {
941 return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
942 QDF_MAC_ADDR_SIZE) == 0);
943 }
944 #endif
945
946 #ifndef QCA_HOST_MODE_WIFI_DISABLED
947
948 bool
dp_rx_err_drop_3addr_mcast(struct dp_vdev * vdev,uint8_t * rx_tlv_hdr)949 dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
950 {
951 struct dp_soc *soc = vdev->pdev->soc;
952
953 if (!vdev->drop_3addr_mcast)
954 return false;
955
956 if (vdev->opmode != wlan_op_mode_sta)
957 return false;
958
959 if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
960 return true;
961
962 return false;
963 }
964
965 /**
966 * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
967 * for this frame received in REO error ring.
968 * @soc: Datapath SOC handle
969 * @error: REO error detected or not
970 * @error_code: Error code in case of REO error
971 *
972 * Return: true if pn check if needed in software,
973 * false, if pn check if not needed.
974 */
975 static inline bool
dp_rx_err_is_pn_check_needed(struct dp_soc * soc,uint8_t error,uint32_t error_code)976 dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
977 uint32_t error_code)
978 {
979 return (soc->features.pn_in_reo_dest &&
980 (error == HAL_REO_ERROR_DETECTED &&
981 (hal_rx_reo_is_2k_jump(error_code) ||
982 hal_rx_reo_is_oor_error(error_code) ||
983 hal_rx_reo_is_bar_oor_2k_jump(error_code))));
984 }
985
986 #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
987 static inline void
dp_rx_err_populate_mpdu_desc_info(struct dp_soc * soc,qdf_nbuf_t nbuf,struct hal_rx_mpdu_desc_info * mpdu_desc_info,bool first_msdu_in_mpdu_processed)988 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
989 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
990 bool first_msdu_in_mpdu_processed)
991 {
992 if (first_msdu_in_mpdu_processed) {
993 /*
994 * This is the 2nd indication of first_msdu in the same mpdu.
995 * Skip re-parsing the mdpu_desc_info and use the cached one,
996 * since this msdu is most probably from the current mpdu
997 * which is being processed
998 */
999 } else {
1000 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
1001 qdf_nbuf_data(nbuf),
1002 mpdu_desc_info);
1003 }
1004 }
1005 #else
1006 static inline void
dp_rx_err_populate_mpdu_desc_info(struct dp_soc * soc,qdf_nbuf_t nbuf,struct hal_rx_mpdu_desc_info * mpdu_desc_info,bool first_msdu_in_mpdu_processed)1007 dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
1008 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1009 bool first_msdu_in_mpdu_processed)
1010 {
1011 hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
1012 mpdu_desc_info);
1013 }
1014 #endif
1015
1016 /**
1017 * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
1018 *
1019 * @soc: core txrx main context
1020 * @ring_desc: opaque pointer to the REO error ring descriptor
1021 * @mpdu_desc_info: pointer to mpdu level description info
1022 * @link_desc_va: pointer to msdu_link_desc virtual address
1023 * @err_code: reo error code fetched from ring entry
1024 *
1025 * Function to handle msdus fetched from msdu link desc, currently
1026 * support REO error NULL queue, 2K jump, OOR.
1027 *
1028 * Return: msdu count processed
1029 */
1030 static uint32_t
dp_rx_reo_err_entry_process(struct dp_soc * soc,void * ring_desc,struct hal_rx_mpdu_desc_info * mpdu_desc_info,void * link_desc_va,enum hal_reo_error_code err_code)1031 dp_rx_reo_err_entry_process(struct dp_soc *soc,
1032 void *ring_desc,
1033 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
1034 void *link_desc_va,
1035 enum hal_reo_error_code err_code)
1036 {
1037 uint32_t rx_bufs_used = 0;
1038 struct dp_pdev *pdev;
1039 int i;
1040 uint8_t *rx_tlv_hdr_first;
1041 uint8_t *rx_tlv_hdr_last;
1042 uint32_t tid = DP_MAX_TIDS;
1043 uint16_t peer_id;
1044 struct dp_rx_desc *rx_desc;
1045 struct rx_desc_pool *rx_desc_pool;
1046 qdf_nbuf_t nbuf;
1047 qdf_nbuf_t next_nbuf;
1048 struct hal_buf_info buf_info;
1049 struct hal_rx_msdu_list msdu_list;
1050 uint16_t num_msdus;
1051 struct buffer_addr_info cur_link_desc_addr_info = { 0 };
1052 struct buffer_addr_info next_link_desc_addr_info = { 0 };
1053 /* First field in REO Dst ring Desc is buffer_addr_info */
1054 void *buf_addr_info = ring_desc;
1055 qdf_nbuf_t head_nbuf = NULL;
1056 qdf_nbuf_t tail_nbuf = NULL;
1057 uint16_t msdu_processed = 0;
1058 QDF_STATUS status;
1059 bool ret, is_pn_check_needed;
1060 uint8_t rx_desc_pool_id;
1061 struct dp_txrx_peer *txrx_peer = NULL;
1062 dp_txrx_ref_handle txrx_ref_handle = NULL;
1063 hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
1064 bool first_msdu_in_mpdu_processed = false;
1065 bool msdu_dropped = false;
1066 uint8_t link_id = 0;
1067
1068 peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1069 mpdu_desc_info->peer_meta_data);
1070 is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
1071 HAL_REO_ERROR_DETECTED,
1072 err_code);
1073 more_msdu_link_desc:
1074 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
1075 &num_msdus);
1076 for (i = 0; i < num_msdus; i++) {
1077 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
1078 soc,
1079 msdu_list.sw_cookie[i]);
1080
1081 if (dp_assert_always_internal_stat(rx_desc, soc,
1082 rx.err.reo_err_rx_desc_null))
1083 continue;
1084
1085 nbuf = rx_desc->nbuf;
1086
1087 /*
1088 * this is a unlikely scenario where the host is reaping
1089 * a descriptor which it already reaped just a while ago
1090 * but is yet to replenish it back to HW.
1091 * In this case host will dump the last 128 descriptors
1092 * including the software descriptor rx_desc and assert.
1093 */
1094 if (qdf_unlikely(!rx_desc->in_use) ||
1095 qdf_unlikely(!nbuf)) {
1096 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1097 dp_info_rl("Reaping rx_desc not in use!");
1098 dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1099 ring_desc, rx_desc);
1100 /* ignore duplicate RX desc and continue to process */
1101 /* Pop out the descriptor */
1102 msdu_dropped = true;
1103 continue;
1104 }
1105
1106 ret = dp_rx_desc_paddr_sanity_check(rx_desc,
1107 msdu_list.paddr[i]);
1108 if (!ret) {
1109 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1110 rx_desc->in_err_state = 1;
1111 msdu_dropped = true;
1112 continue;
1113 }
1114
1115 rx_desc_pool_id = rx_desc->pool_id;
1116 /* all buffers from a MSDU link belong to same pdev */
1117 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
1118
1119 rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
1120 dp_ipa_rx_buf_smmu_mapping_lock(soc);
1121 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1122 rx_desc->unmapped = 1;
1123 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1124
1125 QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
1126 rx_bufs_used++;
1127 dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1128 &pdev->free_list_tail, rx_desc);
1129
1130 DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
1131
1132 if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
1133 HAL_MSDU_F_MSDU_CONTINUATION)) {
1134 qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1135 continue;
1136 }
1137
1138 if (dp_rx_buffer_pool_refill(soc, head_nbuf,
1139 rx_desc_pool_id)) {
1140 /* MSDU queued back to the pool */
1141 msdu_dropped = true;
1142 head_nbuf = NULL;
1143 goto process_next_msdu;
1144 }
1145
1146 if (is_pn_check_needed) {
1147 if (msdu_list.msdu_info[i].msdu_flags &
1148 HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
1149 dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
1150 mpdu_desc_info,
1151 first_msdu_in_mpdu_processed);
1152 first_msdu_in_mpdu_processed = true;
1153 } else {
1154 if (!first_msdu_in_mpdu_processed) {
1155 /*
1156 * If no msdu in this mpdu was dropped
1157 * due to failed sanity checks, then
1158 * its not expected to hit this
1159 * condition. Hence we assert here.
1160 */
1161 if (!msdu_dropped)
1162 qdf_assert_always(0);
1163
1164 /*
1165 * We do not have valid mpdu_desc_info
1166 * to process this nbuf, hence drop it.
1167 * TODO - Increment stats
1168 */
1169 goto process_next_msdu;
1170 }
1171 /*
1172 * DO NOTHING -
1173 * Continue using the same mpdu_desc_info
1174 * details populated from the first msdu in
1175 * the mpdu.
1176 */
1177 }
1178
1179 status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
1180 if (QDF_IS_STATUS_ERROR(status)) {
1181 DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
1182 1);
1183 goto process_next_msdu;
1184 }
1185
1186 peer_id = dp_rx_peer_metadata_peer_id_get(soc,
1187 mpdu_desc_info->peer_meta_data);
1188
1189 if (mpdu_desc_info->bar_frame)
1190 _dp_rx_bar_frame_handle(soc, nbuf,
1191 mpdu_desc_info, tid,
1192 HAL_REO_ERROR_DETECTED,
1193 err_code);
1194 }
1195
1196 rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
1197 rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
1198
1199 if (qdf_unlikely(head_nbuf != tail_nbuf)) {
1200 /*
1201 * For SG case, only the length of last skb is valid
1202 * as HW only populate the msdu_len for last msdu
1203 * in rx link descriptor, use the length from
1204 * last skb to overwrite the head skb for further
1205 * SG processing.
1206 */
1207 QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
1208 QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
1209 nbuf = dp_rx_sg_create(soc, head_nbuf);
1210 qdf_nbuf_set_is_frag(nbuf, 1);
1211 DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
1212 }
1213 head_nbuf = NULL;
1214
1215 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
1216 soc, peer_id,
1217 &txrx_ref_handle,
1218 DP_MOD_ID_RX_ERR);
1219 if (!txrx_peer)
1220 dp_info_rl("txrx_peer is null peer_id %u",
1221 peer_id);
1222
1223 dp_rx_nbuf_set_link_id_from_tlv(soc, qdf_nbuf_data(nbuf), nbuf);
1224
1225 if (pdev && pdev->link_peer_stats &&
1226 txrx_peer && txrx_peer->is_mld_peer) {
1227 link_id = dp_rx_get_stats_arr_idx_from_link_id(
1228 nbuf,
1229 txrx_peer);
1230 }
1231
1232 if (txrx_peer)
1233 dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id);
1234
1235 switch (err_code) {
1236 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
1237 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
1238 case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
1239 /*
1240 * only first msdu, mpdu start description tlv valid?
1241 * and use it for following msdu.
1242 */
1243 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1244 rx_tlv_hdr_last))
1245 tid = hal_rx_mpdu_start_tid_get(
1246 soc->hal_soc,
1247 rx_tlv_hdr_first);
1248
1249 dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
1250 peer_id, tid);
1251 break;
1252 case HAL_REO_ERR_REGULAR_FRAME_OOR:
1253 case HAL_REO_ERR_BAR_FRAME_OOR:
1254 dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
1255 break;
1256 case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
1257 soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
1258 rx_tlv_hdr_last,
1259 rx_desc_pool_id,
1260 txrx_peer,
1261 TRUE,
1262 link_id);
1263 break;
1264 default:
1265 dp_err_rl("Non-support error code %d", err_code);
1266 dp_rx_nbuf_free(nbuf);
1267 }
1268
1269 if (txrx_peer)
1270 dp_txrx_peer_unref_delete(txrx_ref_handle,
1271 DP_MOD_ID_RX_ERR);
1272 process_next_msdu:
1273 nbuf = head_nbuf;
1274 while (nbuf) {
1275 next_nbuf = qdf_nbuf_next(nbuf);
1276 dp_rx_nbuf_free(nbuf);
1277 nbuf = next_nbuf;
1278 }
1279 msdu_processed++;
1280 head_nbuf = NULL;
1281 tail_nbuf = NULL;
1282 }
1283
1284 /*
1285 * If the msdu's are spread across multiple link-descriptors,
1286 * we cannot depend solely on the msdu_count(e.g., if msdu is
1287 * spread across multiple buffers).Hence, it is
1288 * necessary to check the next link_descriptor and release
1289 * all the msdu's that are part of it.
1290 */
1291 hal_rx_get_next_msdu_link_desc_buf_addr_info(
1292 link_desc_va,
1293 &next_link_desc_addr_info);
1294
1295 if (hal_rx_is_buf_addr_info_valid(
1296 &next_link_desc_addr_info)) {
1297 /* Clear the next link desc info for the current link_desc */
1298 hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
1299 dp_rx_link_desc_return_by_addr(
1300 soc,
1301 buf_addr_info,
1302 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1303
1304 hal_rx_buffer_addr_info_get_paddr(
1305 &next_link_desc_addr_info,
1306 &buf_info);
1307 /* buffer_addr_info is the first element of ring_desc */
1308 hal_rx_buf_cookie_rbm_get(soc->hal_soc,
1309 (uint32_t *)&next_link_desc_addr_info,
1310 &buf_info);
1311 link_desc_va =
1312 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
1313 cur_link_desc_addr_info = next_link_desc_addr_info;
1314 buf_addr_info = &cur_link_desc_addr_info;
1315
1316 goto more_msdu_link_desc;
1317 }
1318
1319 dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
1320 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
1321 if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
1322 DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
1323
1324 return rx_bufs_used;
1325 }
1326
1327 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1328
1329 void
dp_rx_process_rxdma_err(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,uint8_t err_code,uint8_t mac_id,uint8_t link_id)1330 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1331 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
1332 uint8_t err_code, uint8_t mac_id, uint8_t link_id)
1333 {
1334 uint32_t pkt_len, l2_hdr_offset;
1335 uint16_t msdu_len;
1336 struct dp_vdev *vdev;
1337 qdf_ether_header_t *eh;
1338 bool is_broadcast;
1339
1340 /*
1341 * Check if DMA completed -- msdu_done is the last bit
1342 * to be written
1343 */
1344 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1345
1346 dp_err_rl("MSDU DONE failure");
1347
1348 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1349 QDF_TRACE_LEVEL_INFO);
1350 qdf_assert(0);
1351 }
1352
1353 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
1354 rx_tlv_hdr);
1355 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1356 pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1357
1358 if (dp_rx_check_pkt_len(soc, pkt_len)) {
1359 /* Drop & free packet */
1360 dp_rx_nbuf_free(nbuf);
1361 return;
1362 }
1363 /* Set length in nbuf */
1364 qdf_nbuf_set_pktlen(nbuf, pkt_len);
1365
1366 qdf_nbuf_set_next(nbuf, NULL);
1367
1368 qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1369 qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1370
1371 if (!txrx_peer) {
1372 QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
1373 DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1374 qdf_nbuf_len(nbuf));
1375 /* Trigger invalid peer handler wrapper */
1376 dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
1377 return;
1378 }
1379
1380 vdev = txrx_peer->vdev;
1381 if (!vdev) {
1382 dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
1383 vdev);
1384 /* Drop & free packet */
1385 dp_rx_nbuf_free(nbuf);
1386 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1387 return;
1388 }
1389
1390 /*
1391 * Advance the packet start pointer by total size of
1392 * pre-header TLV's
1393 */
1394 dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
1395
1396 if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
1397 uint8_t *pkt_type;
1398
1399 pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
1400 if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
1401 if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
1402 htons(QDF_LLC_STP)) {
1403 DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
1404 goto process_mesh;
1405 } else {
1406 goto process_rx;
1407 }
1408 }
1409 }
1410 if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
1411 goto process_mesh;
1412
1413 /*
1414 * WAPI cert AP sends rekey frames as unencrypted.
1415 * Thus RXDMA will report unencrypted frame error.
1416 * To pass WAPI cert case, SW needs to pass unencrypted
1417 * rekey frame to stack.
1418 */
1419 if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1420 goto process_rx;
1421 }
1422 /*
1423 * In dynamic WEP case rekey frames are not encrypted
1424 * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
1425 * key install is already done
1426 */
1427 if ((vdev->sec_type == cdp_sec_type_wep104) &&
1428 (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
1429 goto process_rx;
1430
1431 process_mesh:
1432
1433 if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
1434 dp_rx_nbuf_free(nbuf);
1435 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1436 return;
1437 }
1438
1439 if (vdev->mesh_vdev) {
1440 if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
1441 == QDF_STATUS_SUCCESS) {
1442 dp_rx_err_info("%pK: mesh pkt filtered", soc);
1443 DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
1444
1445 dp_rx_nbuf_free(nbuf);
1446 return;
1447 }
1448 dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
1449 }
1450 process_rx:
1451 if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1452 rx_tlv_hdr) &&
1453 (vdev->rx_decap_type ==
1454 htt_cmn_pkt_type_ethernet))) {
1455 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1456 is_broadcast = (QDF_IS_ADDR_BROADCAST
1457 (eh->ether_dhost)) ? 1 : 0 ;
1458 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
1459 qdf_nbuf_len(nbuf), link_id);
1460 if (is_broadcast) {
1461 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
1462 qdf_nbuf_len(nbuf),
1463 link_id);
1464 }
1465 } else {
1466 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
1467 qdf_nbuf_len(nbuf),
1468 link_id);
1469 }
1470
1471 if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
1472 dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
1473 } else {
1474 /* Update the protocol tag in SKB based on CCE metadata */
1475 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1476 EXCEPTION_DEST_RING_ID, true, true);
1477 /* Update the flow tag in SKB based on FSE metadata */
1478 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
1479 DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
1480 qdf_nbuf_set_exc_frame(nbuf, 1);
1481 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
1482 qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
1483 }
1484
1485 return;
1486 }
1487
dp_rx_process_mic_error(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer)1488 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1489 uint8_t *rx_tlv_hdr,
1490 struct dp_txrx_peer *txrx_peer)
1491 {
1492 struct dp_vdev *vdev = NULL;
1493 struct dp_pdev *pdev = NULL;
1494 struct ol_if_ops *tops = NULL;
1495 uint16_t rx_seq, fragno;
1496 uint8_t is_raw;
1497 unsigned int tid;
1498 QDF_STATUS status;
1499 struct cdp_rx_mic_err_info mic_failure_info;
1500
1501 if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1502 rx_tlv_hdr))
1503 return;
1504
1505 if (!txrx_peer) {
1506 dp_info_rl("txrx_peer not found");
1507 goto fail;
1508 }
1509
1510 vdev = txrx_peer->vdev;
1511 if (!vdev) {
1512 dp_info_rl("VDEV not found");
1513 goto fail;
1514 }
1515
1516 pdev = vdev->pdev;
1517 if (!pdev) {
1518 dp_info_rl("PDEV not found");
1519 goto fail;
1520 }
1521
1522 is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
1523 if (is_raw) {
1524 fragno = dp_rx_frag_get_mpdu_frag_number(soc,
1525 qdf_nbuf_data(nbuf));
1526 /* Can get only last fragment */
1527 if (fragno) {
1528 tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
1529 qdf_nbuf_data(nbuf));
1530 rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
1531 qdf_nbuf_data(nbuf));
1532
1533 status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
1534 tid, rx_seq, nbuf);
1535 dp_info_rl("Frag pkt seq# %d frag# %d consumed "
1536 "status %d !", rx_seq, fragno, status);
1537 return;
1538 }
1539 }
1540
1541 if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
1542 &mic_failure_info.da_mac_addr.bytes[0])) {
1543 dp_err_rl("Failed to get da_mac_addr");
1544 goto fail;
1545 }
1546
1547 if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
1548 &mic_failure_info.ta_mac_addr.bytes[0])) {
1549 dp_err_rl("Failed to get ta_mac_addr");
1550 goto fail;
1551 }
1552
1553 mic_failure_info.key_id = 0;
1554 mic_failure_info.multicast =
1555 IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
1556 qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
1557 mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
1558 mic_failure_info.data = NULL;
1559 mic_failure_info.vdev_id = vdev->vdev_id;
1560
1561 tops = pdev->soc->cdp_soc.ol_ops;
1562 if (tops->rx_mic_error)
1563 tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
1564 &mic_failure_info);
1565
1566 fail:
1567 dp_rx_nbuf_free(nbuf);
1568 return;
1569 }
1570
1571 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
dp_rx_peek_trapped_packet(struct dp_soc * soc,struct dp_vdev * vdev)1572 static void dp_rx_peek_trapped_packet(struct dp_soc *soc,
1573 struct dp_vdev *vdev)
1574 {
1575 if (soc->cdp_soc.ol_ops->send_wakeup_trigger)
1576 soc->cdp_soc.ol_ops->send_wakeup_trigger(soc->ctrl_psoc,
1577 vdev->vdev_id);
1578 }
1579 #else
dp_rx_peek_trapped_packet(struct dp_soc * soc,struct dp_vdev * vdev)1580 static void dp_rx_peek_trapped_packet(struct dp_soc *soc,
1581 struct dp_vdev *vdev)
1582 {
1583 return;
1584 }
1585 #endif
1586
1587 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
1588 defined(WLAN_MCAST_MLO)
dp_rx_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1589 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1590 struct dp_vdev *vdev,
1591 struct dp_txrx_peer *peer,
1592 qdf_nbuf_t nbuf,
1593 uint8_t link_id)
1594 {
1595 if (soc->arch_ops.dp_rx_mcast_handler) {
1596 if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
1597 nbuf, link_id))
1598 return true;
1599 }
1600 return false;
1601 }
1602 #else
dp_rx_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1603 static bool dp_rx_igmp_handler(struct dp_soc *soc,
1604 struct dp_vdev *vdev,
1605 struct dp_txrx_peer *peer,
1606 qdf_nbuf_t nbuf,
1607 uint8_t link_id)
1608 {
1609 return false;
1610 }
1611 #endif
1612
1613 /**
1614 * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
1615 * Free any other packet which comes in
1616 * this path.
1617 *
1618 * @soc: core DP main context
1619 * @nbuf: buffer pointer
1620 * @txrx_peer: txrx peer handle
1621 * @rx_tlv_hdr: start of rx tlv header
1622 * @err_src: rxdma/reo
1623 * @link_id: link id on which the packet is received
1624 *
1625 * This function indicates EAPOL frame received in wbm error ring to stack.
1626 * Any other frame should be dropped.
1627 *
1628 * Return: SUCCESS if delivered to stack
1629 */
1630 static void
dp_rx_err_route_hdl(struct dp_soc * soc,qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer,uint8_t * rx_tlv_hdr,enum hal_rx_wbm_error_source err_src,uint8_t link_id)1631 dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
1632 struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
1633 enum hal_rx_wbm_error_source err_src,
1634 uint8_t link_id)
1635 {
1636 uint32_t pkt_len;
1637 uint16_t msdu_len;
1638 struct dp_vdev *vdev;
1639 struct hal_rx_msdu_metadata msdu_metadata;
1640 bool is_eapol;
1641 uint16_t buf_size;
1642
1643 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
1644
1645 qdf_nbuf_set_rx_chfrag_start(
1646 nbuf,
1647 hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
1648 rx_tlv_hdr));
1649 qdf_nbuf_set_rx_chfrag_end(nbuf,
1650 hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
1651 rx_tlv_hdr));
1652 qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
1653 rx_tlv_hdr));
1654 qdf_nbuf_set_da_valid(nbuf,
1655 hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
1656 rx_tlv_hdr));
1657 qdf_nbuf_set_sa_valid(nbuf,
1658 hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
1659 rx_tlv_hdr));
1660
1661 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
1662 msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
1663 pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
1664
1665 if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
1666 if (dp_rx_check_pkt_len(soc, pkt_len))
1667 goto drop_nbuf;
1668
1669 /* Set length in nbuf */
1670 qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size));
1671 }
1672
1673 /*
1674 * Check if DMA completed -- msdu_done is the last bit
1675 * to be written
1676 */
1677 if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1678 dp_err_rl("MSDU DONE failure");
1679 hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
1680 QDF_TRACE_LEVEL_INFO);
1681 qdf_assert(0);
1682 }
1683
1684 if (!txrx_peer)
1685 goto drop_nbuf;
1686
1687 vdev = txrx_peer->vdev;
1688 if (!vdev) {
1689 dp_err_rl("Null vdev!");
1690 DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
1691 goto drop_nbuf;
1692 }
1693
1694 /*
1695 * Advance the packet start pointer by total size of
1696 * pre-header TLV's
1697 */
1698 if (qdf_nbuf_is_frag(nbuf))
1699 qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1700 else
1701 qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
1702 soc->rx_pkt_tlv_size));
1703
1704 if (hal_rx_msdu_cce_metadata_get(soc->hal_soc, rx_tlv_hdr) ==
1705 CDP_STANDBY_METADATA)
1706 dp_rx_peek_trapped_packet(soc, vdev);
1707
1708 QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
1709 if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
1710 return;
1711
1712 dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
1713
1714 /*
1715 * Indicate EAPOL frame to stack only when vap mac address
1716 * matches the destination address.
1717 */
1718 is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
1719 if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
1720 qdf_ether_header_t *eh =
1721 (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1722 if (dp_rx_err_match_dhost(eh, vdev)) {
1723 DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
1724 qdf_nbuf_len(nbuf));
1725
1726 /*
1727 * Update the protocol tag in SKB based on
1728 * CCE metadata.
1729 */
1730 dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
1731 EXCEPTION_DEST_RING_ID,
1732 true, true);
1733 /* Update the flow tag in SKB based on FSE metadata */
1734 dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
1735 true);
1736 DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
1737 qdf_nbuf_len(nbuf),
1738 vdev->pdev->enhanced_stats_en);
1739 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
1740 rx.rx_success, 1,
1741 qdf_nbuf_len(nbuf),
1742 link_id);
1743 qdf_nbuf_set_exc_frame(nbuf, 1);
1744 qdf_nbuf_set_next(nbuf, NULL);
1745
1746 dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
1747 NULL, is_eapol);
1748
1749 return;
1750 }
1751 }
1752
1753 drop_nbuf:
1754
1755 DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
1756 err_src == HAL_RX_WBM_ERR_SRC_REO);
1757 DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
1758 err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
1759
1760 dp_rx_nbuf_free(nbuf);
1761 }
1762
1763 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1764
1765 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
1766 /**
1767 * dp_rx_link_cookie_check() - Validate link desc cookie
1768 * @ring_desc: ring descriptor
1769 *
1770 * Return: qdf status
1771 */
1772 static inline QDF_STATUS
dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)1773 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1774 {
1775 if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
1776 return QDF_STATUS_E_FAILURE;
1777
1778 return QDF_STATUS_SUCCESS;
1779 }
1780
1781 /**
1782 * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
1783 * @ring_desc: ring descriptor
1784 *
1785 * Return: None
1786 */
1787 static inline void
dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)1788 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1789 {
1790 HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
1791 }
1792 #else
1793 static inline QDF_STATUS
dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)1794 dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
1795 {
1796 return QDF_STATUS_SUCCESS;
1797 }
1798
1799 static inline void
dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)1800 dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
1801 {
1802 }
1803 #endif
1804
1805 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1806 /**
1807 * dp_rx_err_ring_record_entry() - Record rx err ring history
1808 * @soc: Datapath soc structure
1809 * @paddr: paddr of the buffer in RX err ring
1810 * @sw_cookie: SW cookie of the buffer in RX err ring
1811 * @rbm: Return buffer manager of the buffer in RX err ring
1812 *
1813 * Return: None
1814 */
1815 static inline void
dp_rx_err_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1816 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1817 uint32_t sw_cookie, uint8_t rbm)
1818 {
1819 struct dp_buf_info_record *record;
1820 uint32_t idx;
1821
1822 if (qdf_unlikely(!soc->rx_err_ring_history))
1823 return;
1824
1825 idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
1826 DP_RX_ERR_HIST_MAX);
1827
1828 /* No NULL check needed for record since its an array */
1829 record = &soc->rx_err_ring_history->entry[idx];
1830
1831 record->timestamp = qdf_get_log_timestamp();
1832 record->hbi.paddr = paddr;
1833 record->hbi.sw_cookie = sw_cookie;
1834 record->hbi.rbm = rbm;
1835 }
1836 #else
1837 static inline void
dp_rx_err_ring_record_entry(struct dp_soc * soc,uint64_t paddr,uint32_t sw_cookie,uint8_t rbm)1838 dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
1839 uint32_t sw_cookie, uint8_t rbm)
1840 {
1841 }
1842 #endif
1843
1844 #if defined(HANDLE_RX_REROUTE_ERR) || defined(REO_EXCEPTION_MSDU_WAR)
dp_rx_err_handle_msdu_buf(struct dp_soc * soc,hal_ring_desc_t ring_desc)1845 static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
1846 hal_ring_desc_t ring_desc)
1847 {
1848 int lmac_id = DP_INVALID_LMAC_ID;
1849 struct dp_rx_desc *rx_desc;
1850 struct hal_buf_info hbi;
1851 struct dp_pdev *pdev;
1852 struct rx_desc_pool *rx_desc_pool;
1853
1854 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1855
1856 rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
1857
1858 /* sanity */
1859 if (!rx_desc) {
1860 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
1861 goto assert_return;
1862 }
1863
1864 if (!rx_desc->nbuf)
1865 goto assert_return;
1866
1867 dp_rx_err_ring_record_entry(soc, hbi.paddr,
1868 hbi.sw_cookie,
1869 hal_rx_ret_buf_manager_get(soc->hal_soc,
1870 ring_desc));
1871 if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
1872 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1873 rx_desc->in_err_state = 1;
1874 goto assert_return;
1875 }
1876
1877 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1878 /* After this point the rx_desc and nbuf are valid */
1879 dp_ipa_rx_buf_smmu_mapping_lock(soc);
1880 qdf_assert_always(!rx_desc->unmapped);
1881 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
1882 rx_desc->unmapped = 1;
1883 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1884 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
1885 rx_desc->pool_id);
1886
1887 pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
1888 lmac_id = rx_desc->pool_id;
1889 dp_rx_add_to_free_desc_list(&pdev->free_list_head,
1890 &pdev->free_list_tail,
1891 rx_desc);
1892 return lmac_id;
1893
1894 assert_return:
1895 qdf_assert(0);
1896 return lmac_id;
1897 }
1898 #endif
1899
1900 #ifdef HANDLE_RX_REROUTE_ERR
dp_rx_err_exception(struct dp_soc * soc,hal_ring_desc_t ring_desc)1901 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1902 {
1903 int ret;
1904 uint64_t cur_time_stamp;
1905
1906 DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
1907
1908 /* Recover if overall error count exceeds threshold */
1909 if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
1910 DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
1911 dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1912 soc->stats.rx.err.reo_err_msdu_buf_rcved,
1913 soc->rx_route_err_start_pkt_ts);
1914 qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
1915 }
1916
1917 cur_time_stamp = qdf_get_log_timestamp_usecs();
1918 if (!soc->rx_route_err_start_pkt_ts)
1919 soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1920
1921 /* Recover if threshold number of packets received in threshold time */
1922 if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
1923 DP_RX_ERR_ROUTE_TIMEOUT_US) {
1924 soc->rx_route_err_start_pkt_ts = cur_time_stamp;
1925
1926 if (soc->rx_route_err_in_window >
1927 DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
1928 qdf_trigger_self_recovery(NULL,
1929 QDF_RX_REG_PKT_ROUTE_ERR);
1930 dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
1931 soc->stats.rx.err.reo_err_msdu_buf_rcved,
1932 soc->rx_route_err_start_pkt_ts);
1933 } else {
1934 soc->rx_route_err_in_window = 1;
1935 }
1936 } else {
1937 soc->rx_route_err_in_window++;
1938 }
1939
1940 ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
1941
1942 return ret;
1943 }
1944 #else /* HANDLE_RX_REROUTE_ERR */
1945 #ifdef REO_EXCEPTION_MSDU_WAR
dp_rx_err_exception(struct dp_soc * soc,hal_ring_desc_t ring_desc)1946 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1947 {
1948 return dp_rx_err_handle_msdu_buf(soc, ring_desc);
1949 }
1950 #else /* REO_EXCEPTION_MSDU_WAR */
dp_rx_err_exception(struct dp_soc * soc,hal_ring_desc_t ring_desc)1951 static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
1952 {
1953 qdf_assert_always(0);
1954
1955 return DP_INVALID_LMAC_ID;
1956 }
1957 #endif /* REO_EXCEPTION_MSDU_WAR */
1958 #endif /* HANDLE_RX_REROUTE_ERR */
1959
1960 #ifdef WLAN_MLO_MULTI_CHIP
1961 /**
1962 * dp_idle_link_bm_id_check() - war for HW issue
1963 *
1964 * @soc: DP SOC handle
1965 * @rbm: idle link RBM value
1966 * @ring_desc: reo error link descriptor
1967 *
1968 * This is a war for HW issue where link descriptor
1969 * of partner soc received due to packets wrongly
1970 * interpreted as fragments
1971 *
1972 * Return: true in case link desc is consumed
1973 * false in other cases
1974 */
dp_idle_link_bm_id_check(struct dp_soc * soc,uint8_t rbm,void * ring_desc)1975 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
1976 void *ring_desc)
1977 {
1978 struct dp_soc *replenish_soc = NULL;
1979
1980 /* return ok incase of link desc of same soc */
1981 if (rbm == soc->idle_link_bm_id)
1982 return false;
1983
1984 if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
1985 replenish_soc =
1986 soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
1987
1988 qdf_assert_always(replenish_soc);
1989
1990 /*
1991 * For WIN usecase we should only get fragment packets in
1992 * this ring as for MLO case fragmentation is not supported
1993 * we should not see links from other soc.
1994 *
1995 * Drop all packets from partner soc and replenish the descriptors
1996 */
1997 dp_handle_wbm_internal_error(replenish_soc, ring_desc,
1998 HAL_WBM_RELEASE_RING_2_DESC_TYPE);
1999
2000 return true;
2001 }
2002 #else
dp_idle_link_bm_id_check(struct dp_soc * soc,uint8_t rbm,void * ring_desc)2003 static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
2004 void *ring_desc)
2005 {
2006 return false;
2007 }
2008 #endif
2009
2010 static inline void
dp_rx_err_dup_frame(struct dp_soc * soc,struct hal_rx_mpdu_desc_info * mpdu_desc_info)2011 dp_rx_err_dup_frame(struct dp_soc *soc,
2012 struct hal_rx_mpdu_desc_info *mpdu_desc_info)
2013 {
2014 struct dp_txrx_peer *txrx_peer = NULL;
2015 dp_txrx_ref_handle txrx_ref_handle = NULL;
2016 uint16_t peer_id;
2017
2018 peer_id =
2019 dp_rx_peer_metadata_peer_id_get(soc,
2020 mpdu_desc_info->peer_meta_data);
2021 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2022 &txrx_ref_handle,
2023 DP_MOD_ID_RX_ERR);
2024 if (txrx_peer) {
2025 DP_STATS_INC(txrx_peer->vdev, rx.duplicate_count, 1);
2026 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
2027 }
2028 }
2029
2030 uint32_t
dp_rx_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint32_t quota)2031 dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2032 hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2033 {
2034 hal_ring_desc_t ring_desc;
2035 hal_soc_handle_t hal_soc;
2036 uint32_t count = 0;
2037 uint32_t rx_bufs_used = 0;
2038 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
2039 uint8_t mac_id = 0;
2040 uint8_t buf_type;
2041 uint8_t err_status;
2042 struct hal_rx_mpdu_desc_info mpdu_desc_info;
2043 struct hal_buf_info hbi;
2044 struct dp_pdev *dp_pdev;
2045 struct dp_srng *dp_rxdma_srng;
2046 struct rx_desc_pool *rx_desc_pool;
2047 void *link_desc_va;
2048 struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
2049 uint16_t num_msdus;
2050 struct dp_rx_desc *rx_desc = NULL;
2051 QDF_STATUS status;
2052 bool ret;
2053 uint32_t error_code = 0;
2054 bool sw_pn_check_needed;
2055 int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
2056 int i, rx_bufs_reaped_total;
2057 uint16_t peer_id;
2058 struct dp_txrx_peer *txrx_peer = NULL;
2059 dp_txrx_ref_handle txrx_ref_handle = NULL;
2060 uint32_t num_pending, num_entries;
2061 bool near_full;
2062
2063 /* Debug -- Remove later */
2064 qdf_assert(soc && hal_ring_hdl);
2065
2066 hal_soc = soc->hal_soc;
2067
2068 /* Debug -- Remove later */
2069 qdf_assert(hal_soc);
2070 num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
2071
2072 more_data:
2073 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2074
2075 /* TODO */
2076 /*
2077 * Need API to convert from hal_ring pointer to
2078 * Ring Type / Ring Id combo
2079 */
2080 DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2081 dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
2082 hal_ring_hdl);
2083 goto done;
2084 }
2085
2086 while (qdf_likely(quota-- && (ring_desc =
2087 hal_srng_dst_peek(hal_soc,
2088 hal_ring_hdl)))) {
2089
2090 DP_STATS_INC(soc, rx.err_ring_pkts, 1);
2091 err_status = hal_rx_err_status_get(hal_soc, ring_desc);
2092 buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
2093
2094 if (err_status == HAL_REO_ERROR_DETECTED)
2095 error_code = hal_rx_get_reo_error_code(hal_soc,
2096 ring_desc);
2097
2098 qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
2099 sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
2100 err_status,
2101 error_code);
2102 if (!sw_pn_check_needed) {
2103 /*
2104 * MPDU desc info will be present in the REO desc
2105 * only in the below scenarios
2106 * 1) pn_in_dest_disabled: always
2107 * 2) pn_in_dest enabled: All cases except 2k-jup
2108 * and OOR errors
2109 */
2110 hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
2111 &mpdu_desc_info);
2112 }
2113
2114 if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
2115 goto next_entry;
2116
2117 /*
2118 * For REO error ring, only MSDU LINK DESC is expected.
2119 * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
2120 */
2121 if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
2122 int lmac_id;
2123
2124 lmac_id = dp_rx_err_exception(soc, ring_desc);
2125 if (lmac_id >= 0)
2126 rx_bufs_reaped[lmac_id] += 1;
2127 goto next_entry;
2128 }
2129
2130 hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
2131 &hbi);
2132 /*
2133 * check for the magic number in the sw cookie
2134 */
2135 qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
2136 soc->link_desc_id_start);
2137
2138 if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
2139 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2140 goto next_entry;
2141 }
2142
2143 status = dp_rx_link_cookie_check(ring_desc);
2144 if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2145 DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
2146 break;
2147 }
2148
2149 hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2150 link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
2151 hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
2152 &num_msdus);
2153 if (!num_msdus ||
2154 !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
2155 dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
2156 num_msdus, msdu_list.sw_cookie[0]);
2157 dp_rx_link_desc_return(soc, ring_desc,
2158 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
2159 goto next_entry;
2160 }
2161
2162 dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
2163 msdu_list.sw_cookie[0],
2164 msdu_list.rbm[0]);
2165 // TODO - BE- Check if the RBM is to be checked for all chips
2166 if (qdf_unlikely((msdu_list.rbm[0] !=
2167 dp_rx_get_rx_bm_id(soc)) &&
2168 (msdu_list.rbm[0] !=
2169 soc->idle_link_bm_id) &&
2170 (msdu_list.rbm[0] !=
2171 dp_rx_get_defrag_bm_id(soc)))) {
2172 /* TODO */
2173 /* Call appropriate handler */
2174 if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2175 DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
2176 dp_rx_err_err("%pK: Invalid RBM %d",
2177 soc, msdu_list.rbm[0]);
2178 }
2179
2180 /* Return link descriptor through WBM ring (SW2WBM)*/
2181 dp_rx_link_desc_return(soc, ring_desc,
2182 HAL_BM_ACTION_RELEASE_MSDU_LIST);
2183 goto next_entry;
2184 }
2185
2186 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
2187 soc,
2188 msdu_list.sw_cookie[0]);
2189 qdf_assert_always(rx_desc);
2190
2191 mac_id = rx_desc->pool_id;
2192
2193 if (sw_pn_check_needed) {
2194 goto process_reo_error_code;
2195 }
2196
2197 if (mpdu_desc_info.bar_frame) {
2198 qdf_assert_always(mpdu_desc_info.msdu_count == 1);
2199
2200 dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
2201 &mpdu_desc_info, err_status,
2202 error_code);
2203
2204 rx_bufs_reaped[mac_id] += 1;
2205 goto next_entry;
2206 }
2207
2208 if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
2209 /*
2210 * We only handle one msdu per link desc for fragmented
2211 * case. We drop the msdus and release the link desc
2212 * back if there are more than one msdu in link desc.
2213 */
2214 if (qdf_unlikely(num_msdus > 1)) {
2215 count = dp_rx_msdus_drop(soc, ring_desc,
2216 &mpdu_desc_info,
2217 &mac_id, quota);
2218 rx_bufs_reaped[mac_id] += count;
2219 goto next_entry;
2220 }
2221
2222 /*
2223 * this is a unlikely scenario where the host is reaping
2224 * a descriptor which it already reaped just a while ago
2225 * but is yet to replenish it back to HW.
2226 * In this case host will dump the last 128 descriptors
2227 * including the software descriptor rx_desc and assert.
2228 */
2229
2230 if (qdf_unlikely(!rx_desc->in_use)) {
2231 DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2232 dp_info_rl("Reaping rx_desc not in use!");
2233 dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2234 ring_desc, rx_desc);
2235 /* ignore duplicate RX desc and continue */
2236 /* Pop out the descriptor */
2237 goto next_entry;
2238 }
2239
2240 ret = dp_rx_desc_paddr_sanity_check(rx_desc,
2241 msdu_list.paddr[0]);
2242 if (!ret) {
2243 DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2244 rx_desc->in_err_state = 1;
2245 goto next_entry;
2246 }
2247
2248 count = dp_rx_frag_handle(soc,
2249 ring_desc, &mpdu_desc_info,
2250 rx_desc, &mac_id, quota);
2251
2252 rx_bufs_reaped[mac_id] += count;
2253 DP_STATS_INC(soc, rx.rx_frags, 1);
2254
2255 peer_id = dp_rx_peer_metadata_peer_id_get(soc,
2256 mpdu_desc_info.peer_meta_data);
2257 txrx_peer =
2258 dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2259 &txrx_ref_handle,
2260 DP_MOD_ID_RX_ERR);
2261 if (txrx_peer) {
2262 DP_STATS_INC(txrx_peer->vdev,
2263 rx.fragment_count, 1);
2264 dp_txrx_peer_unref_delete(txrx_ref_handle,
2265 DP_MOD_ID_RX_ERR);
2266 }
2267 goto next_entry;
2268 }
2269
2270 process_reo_error_code:
2271 /*
2272 * Expect REO errors to be handled after this point
2273 */
2274 qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
2275
2276 dp_info_rl("Got pkt with REO ERROR: %d", error_code);
2277
2278 switch (error_code) {
2279 case HAL_REO_ERR_PN_CHECK_FAILED:
2280 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2281 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2282 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2283 if (dp_pdev)
2284 DP_STATS_INC(dp_pdev, err.reo_error, 1);
2285 count = dp_rx_pn_error_handle(soc,
2286 ring_desc,
2287 &mpdu_desc_info, &mac_id,
2288 quota);
2289
2290 rx_bufs_reaped[mac_id] += count;
2291 break;
2292 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2293 case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
2294 case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2295 case HAL_REO_ERR_REGULAR_FRAME_OOR:
2296 case HAL_REO_ERR_BAR_FRAME_OOR:
2297 case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2298 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2299 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2300 if (dp_pdev)
2301 DP_STATS_INC(dp_pdev, err.reo_error, 1);
2302 count = dp_rx_reo_err_entry_process(
2303 soc,
2304 ring_desc,
2305 &mpdu_desc_info,
2306 link_desc_va,
2307 error_code);
2308
2309 rx_bufs_reaped[mac_id] += count;
2310 break;
2311 case HAL_REO_ERR_NON_BA_DUPLICATE:
2312 dp_rx_err_dup_frame(soc, &mpdu_desc_info);
2313 fallthrough;
2314 case HAL_REO_ERR_QUEUE_DESC_INVALID:
2315 case HAL_REO_ERR_AMPDU_IN_NON_BA:
2316 case HAL_REO_ERR_BA_DUPLICATE:
2317 case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
2318 case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
2319 case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
2320 DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
2321 count = dp_rx_msdus_drop(soc, ring_desc,
2322 &mpdu_desc_info,
2323 &mac_id, quota);
2324 rx_bufs_reaped[mac_id] += count;
2325 break;
2326 default:
2327 /* Assert if unexpected error type */
2328 qdf_assert_always(0);
2329 }
2330 next_entry:
2331 dp_rx_link_cookie_invalidate(ring_desc);
2332 hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2333
2334 rx_bufs_reaped_total = 0;
2335 for (i = 0; i < MAX_PDEV_CNT; i++)
2336 rx_bufs_reaped_total += rx_bufs_reaped[i];
2337
2338 if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
2339 max_reap_limit))
2340 break;
2341 }
2342
2343 done:
2344 dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2345
2346 if (soc->rx.flags.defrag_timeout_check) {
2347 uint32_t now_ms =
2348 qdf_system_ticks_to_msecs(qdf_system_ticks());
2349
2350 if (now_ms >= soc->rx.defrag.next_flush_ms)
2351 dp_rx_defrag_waitlist_flush(soc);
2352 }
2353
2354 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2355 if (rx_bufs_reaped[mac_id]) {
2356 dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2357 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2358 rx_desc_pool = &soc->rx_desc_buf[mac_id];
2359
2360 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2361 rx_desc_pool,
2362 rx_bufs_reaped[mac_id],
2363 &dp_pdev->free_list_head,
2364 &dp_pdev->free_list_tail,
2365 false);
2366 rx_bufs_used += rx_bufs_reaped[mac_id];
2367 }
2368 rx_bufs_reaped[mac_id] = 0;
2369 }
2370
2371 if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2372 if (quota) {
2373 num_pending =
2374 dp_rx_srng_get_num_pending(hal_soc,
2375 hal_ring_hdl,
2376 num_entries,
2377 &near_full);
2378
2379 if (num_pending) {
2380 DP_STATS_INC(soc, rx.err.hp_oos2, 1);
2381
2382 if (!hif_exec_should_yield(soc->hif_handle,
2383 int_ctx->dp_intr_id))
2384 goto more_data;
2385
2386 if (qdf_unlikely(near_full)) {
2387 DP_STATS_INC(soc, rx.err.near_full, 1);
2388 goto more_data;
2389 }
2390 }
2391 }
2392 }
2393
2394 return rx_bufs_used; /* Assume no scale factor for now */
2395 }
2396
2397 #ifdef DROP_RXDMA_DECRYPT_ERR
2398 /**
2399 * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
2400 *
2401 * Return: true if rxdma decrypt err frames are handled and false otherwise
2402 */
dp_handle_rxdma_decrypt_err(void)2403 static inline bool dp_handle_rxdma_decrypt_err(void)
2404 {
2405 return false;
2406 }
2407 #else
dp_handle_rxdma_decrypt_err(void)2408 static inline bool dp_handle_rxdma_decrypt_err(void)
2409 {
2410 return true;
2411 }
2412 #endif
2413
dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc * soc)2414 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
2415 {
2416 if (soc->wbm_sg_last_msdu_war) {
2417 uint32_t len;
2418 qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
2419
2420 len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
2421 qdf_nbuf_data(temp));
2422 temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
2423 while (temp) {
2424 QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
2425 temp = temp->next;
2426 }
2427 }
2428 }
2429
2430 #ifdef RX_DESC_DEBUG_CHECK
dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)2431 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2432 hal_ring_handle_t hal_ring_hdl,
2433 hal_ring_desc_t ring_desc,
2434 struct dp_rx_desc *rx_desc)
2435 {
2436 struct hal_buf_info hbi;
2437
2438 hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2439 /* Sanity check for possible buffer paddr corruption */
2440 if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
2441 return QDF_STATUS_SUCCESS;
2442
2443 hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
2444
2445 return QDF_STATUS_E_FAILURE;
2446 }
2447
2448 #else
dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,hal_ring_desc_t ring_desc,struct dp_rx_desc * rx_desc)2449 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2450 hal_ring_handle_t hal_ring_hdl,
2451 hal_ring_desc_t ring_desc,
2452 struct dp_rx_desc *rx_desc)
2453 {
2454 return QDF_STATUS_SUCCESS;
2455 }
2456 #endif
2457 bool
dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info * info)2458 dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
2459 {
2460 /*
2461 * Currently Null Queue and Unencrypted error handlers has support for
2462 * SG. Other error handler do not deal with SG buffer.
2463 */
2464 if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
2465 (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
2466 ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
2467 (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
2468 return true;
2469
2470 return false;
2471 }
2472
2473 #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
dp_rx_err_tlv_invalidate(struct dp_soc * soc,qdf_nbuf_t nbuf)2474 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2475 qdf_nbuf_t nbuf)
2476 {
2477 /*
2478 * In case of fast recycle TX driver can avoid invalidate
2479 * of buffer in case of SFE forward. We need to invalidate
2480 * the TLV headers after writing to this location
2481 */
2482 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2483 (void *)(nbuf->data +
2484 soc->rx_pkt_tlv_size +
2485 L3_HEADER_PAD));
2486 }
2487 #else
dp_rx_err_tlv_invalidate(struct dp_soc * soc,qdf_nbuf_t nbuf)2488 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2489 qdf_nbuf_t nbuf)
2490 {
2491 }
2492 #endif
2493
2494 #ifndef CONFIG_NBUF_AP_PLATFORM
2495 static inline uint16_t
dp_rx_get_peer_id(struct dp_soc * soc,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2496 dp_rx_get_peer_id(struct dp_soc *soc,
2497 uint8_t *rx_tlv_hdr,
2498 qdf_nbuf_t nbuf)
2499 {
2500 uint32_t peer_mdata = 0;
2501
2502 peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2503 rx_tlv_hdr);
2504 return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2505 }
2506
2507 static inline void
dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,union hal_wbm_err_info_u * wbm_err)2508 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2509 qdf_nbuf_t nbuf,
2510 uint8_t *rx_tlv_hdr,
2511 union hal_wbm_err_info_u *wbm_err)
2512 {
2513 hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
2514 (uint8_t *)&wbm_err->info,
2515 sizeof(union hal_wbm_err_info_u));
2516 }
2517
2518 void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,union hal_wbm_err_info_u wbm_err)2519 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2520 qdf_nbuf_t nbuf,
2521 union hal_wbm_err_info_u wbm_err)
2522 {
2523 hal_rx_priv_info_set_in_tlv(soc->hal_soc,
2524 qdf_nbuf_data(nbuf),
2525 (uint8_t *)&wbm_err.info,
2526 sizeof(union hal_wbm_err_info_u));
2527 }
2528 #else
2529 static inline uint16_t
dp_rx_get_peer_id(struct dp_soc * soc,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)2530 dp_rx_get_peer_id(struct dp_soc *soc,
2531 uint8_t *rx_tlv_hdr,
2532 qdf_nbuf_t nbuf)
2533 {
2534 uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
2535
2536 return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
2537 }
2538
2539 static inline void
dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr,union hal_wbm_err_info_u * wbm_err)2540 dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
2541 qdf_nbuf_t nbuf,
2542 uint8_t *rx_tlv_hdr,
2543 union hal_wbm_err_info_u *wbm_err)
2544 {
2545 wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
2546 }
2547
2548 void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc * soc,qdf_nbuf_t nbuf,union hal_wbm_err_info_u wbm_err)2549 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
2550 qdf_nbuf_t nbuf,
2551 union hal_wbm_err_info_u wbm_err)
2552 {
2553 QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
2554 }
2555 #endif /* CONFIG_NBUF_AP_PLATFORM */
2556
2557 uint32_t
dp_rx_wbm_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,uint32_t quota)2558 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
2559 hal_ring_handle_t hal_ring_hdl, uint32_t quota)
2560 {
2561 hal_soc_handle_t hal_soc;
2562 uint32_t rx_bufs_used = 0;
2563 struct dp_pdev *dp_pdev;
2564 uint8_t *rx_tlv_hdr;
2565 bool is_tkip_mic_err;
2566 qdf_nbuf_t nbuf_head = NULL;
2567 qdf_nbuf_t nbuf, next;
2568 union hal_wbm_err_info_u wbm_err = { 0 };
2569 uint8_t pool_id;
2570 uint8_t tid = 0;
2571 uint8_t link_id = 0;
2572
2573 /* Debug -- Remove later */
2574 qdf_assert(soc && hal_ring_hdl);
2575
2576 hal_soc = soc->hal_soc;
2577
2578 /* Debug -- Remove later */
2579 qdf_assert(hal_soc);
2580
2581 nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
2582 hal_ring_hdl,
2583 quota,
2584 &rx_bufs_used);
2585 nbuf = nbuf_head;
2586 while (nbuf) {
2587 struct dp_txrx_peer *txrx_peer;
2588 struct dp_peer *peer;
2589 uint16_t peer_id;
2590 uint8_t err_code;
2591 uint8_t *tlv_hdr;
2592 dp_txrx_ref_handle txrx_ref_handle = NULL;
2593 rx_tlv_hdr = qdf_nbuf_data(nbuf);
2594
2595 /*
2596 * retrieve the wbm desc info from nbuf CB/TLV, so we can
2597 * handle error cases appropriately
2598 */
2599 dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
2600 rx_tlv_hdr,
2601 &wbm_err);
2602
2603 peer_id = dp_rx_get_peer_id(soc,
2604 rx_tlv_hdr,
2605 nbuf);
2606 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
2607 &txrx_ref_handle,
2608 DP_MOD_ID_RX_ERR);
2609
2610 if (!txrx_peer)
2611 dp_info_rl("peer is null peer_id %u err_src %u, "
2612 "REO: push_rsn %u err_code %u, "
2613 "RXDMA: push_rsn %u err_code %u",
2614 peer_id, wbm_err.info_bit.wbm_err_src,
2615 wbm_err.info_bit.reo_psh_rsn,
2616 wbm_err.info_bit.reo_err_code,
2617 wbm_err.info_bit.rxdma_psh_rsn,
2618 wbm_err.info_bit.rxdma_err_code);
2619
2620 /* Set queue_mapping in nbuf to 0 */
2621 dp_set_rx_queue(nbuf, 0);
2622
2623 next = nbuf->next;
2624 /*
2625 * Form the SG for msdu continued buffers
2626 * QCN9000 has this support
2627 */
2628 if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2629 nbuf = dp_rx_sg_create(soc, nbuf);
2630 next = nbuf->next;
2631 /*
2632 * SG error handling is not done correctly,
2633 * drop SG frames for now.
2634 */
2635 dp_rx_nbuf_free(nbuf);
2636 dp_info_rl("scattered msdu dropped");
2637 nbuf = next;
2638 if (txrx_peer)
2639 dp_txrx_peer_unref_delete(txrx_ref_handle,
2640 DP_MOD_ID_RX_ERR);
2641 continue;
2642 }
2643
2644 dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf);
2645
2646 pool_id = wbm_err.info_bit.pool_id;
2647 dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2648
2649 if (dp_pdev && dp_pdev->link_peer_stats &&
2650 txrx_peer && txrx_peer->is_mld_peer) {
2651 link_id = dp_rx_get_stats_arr_idx_from_link_id(
2652 nbuf,
2653 txrx_peer);
2654 } else {
2655 link_id = 0;
2656 }
2657
2658 if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
2659 if (wbm_err.info_bit.reo_psh_rsn
2660 == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
2661
2662 DP_STATS_INC(soc,
2663 rx.err.reo_error
2664 [wbm_err.info_bit.reo_err_code], 1);
2665 /* increment @pdev level */
2666 if (dp_pdev)
2667 DP_STATS_INC(dp_pdev, err.reo_error,
2668 1);
2669
2670 switch (wbm_err.info_bit.reo_err_code) {
2671 /*
2672 * Handling for packets which have NULL REO
2673 * queue descriptor
2674 */
2675 case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
2676 pool_id = wbm_err.info_bit.pool_id;
2677 soc->arch_ops.dp_rx_null_q_desc_handle(
2678 soc, nbuf,
2679 rx_tlv_hdr,
2680 pool_id,
2681 txrx_peer,
2682 FALSE,
2683 link_id);
2684 break;
2685 /* TODO */
2686 /* Add per error code accounting */
2687 case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
2688 if (txrx_peer)
2689 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2690 rx.err.jump_2k_err,
2691 1,
2692 link_id);
2693
2694 pool_id = wbm_err.info_bit.pool_id;
2695
2696 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2697 rx_tlv_hdr)) {
2698 tid =
2699 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2700 }
2701 QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2702 hal_rx_msdu_start_msdu_len_get(
2703 soc->hal_soc, rx_tlv_hdr);
2704 nbuf->next = NULL;
2705 dp_2k_jump_handle(soc, nbuf,
2706 rx_tlv_hdr,
2707 peer_id, tid);
2708 break;
2709 case HAL_REO_ERR_REGULAR_FRAME_OOR:
2710 if (txrx_peer)
2711 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2712 rx.err.oor_err,
2713 1,
2714 link_id);
2715 if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2716 rx_tlv_hdr)) {
2717 tid =
2718 hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
2719 }
2720 QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2721 hal_rx_msdu_start_msdu_len_get(
2722 soc->hal_soc, rx_tlv_hdr);
2723 nbuf->next = NULL;
2724 dp_rx_oor_handle(soc, nbuf,
2725 peer_id,
2726 rx_tlv_hdr);
2727 break;
2728 case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
2729 case HAL_REO_ERR_BAR_FRAME_OOR:
2730 peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
2731 if (peer) {
2732 dp_rx_err_handle_bar(soc, peer,
2733 nbuf);
2734 dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2735 }
2736 dp_rx_nbuf_free(nbuf);
2737 break;
2738
2739 case HAL_REO_ERR_PN_CHECK_FAILED:
2740 case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
2741 if (txrx_peer)
2742 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2743 rx.err.pn_err,
2744 1,
2745 link_id);
2746 dp_rx_nbuf_free(nbuf);
2747 break;
2748
2749 default:
2750 dp_info_rl("Got pkt with REO ERROR: %d",
2751 wbm_err.info_bit.
2752 reo_err_code);
2753 dp_rx_nbuf_free(nbuf);
2754 }
2755 } else if (wbm_err.info_bit.reo_psh_rsn
2756 == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
2757 dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2758 rx_tlv_hdr,
2759 HAL_RX_WBM_ERR_SRC_REO,
2760 link_id);
2761 } else {
2762 /* should not enter here */
2763 dp_rx_err_alert("invalid reo push reason %u",
2764 wbm_err.info_bit.reo_psh_rsn);
2765 dp_rx_nbuf_free(nbuf);
2766 dp_assert_always_internal(0);
2767 }
2768 } else if (wbm_err.info_bit.wbm_err_src ==
2769 HAL_RX_WBM_ERR_SRC_RXDMA) {
2770 if (wbm_err.info_bit.rxdma_psh_rsn
2771 == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2772 DP_STATS_INC(soc,
2773 rx.err.rxdma_error
2774 [wbm_err.info_bit.rxdma_err_code], 1);
2775 /* increment @pdev level */
2776 if (dp_pdev)
2777 DP_STATS_INC(dp_pdev,
2778 err.rxdma_error, 1);
2779
2780 switch (wbm_err.info_bit.rxdma_err_code) {
2781 case HAL_RXDMA_ERR_UNENCRYPTED:
2782
2783 case HAL_RXDMA_ERR_WIFI_PARSE:
2784 if (txrx_peer)
2785 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2786 rx.err.rxdma_wifi_parse_err,
2787 1,
2788 link_id);
2789
2790 pool_id = wbm_err.info_bit.pool_id;
2791 dp_rx_process_rxdma_err(soc, nbuf,
2792 rx_tlv_hdr,
2793 txrx_peer,
2794 wbm_err.
2795 info_bit.
2796 rxdma_err_code,
2797 pool_id,
2798 link_id);
2799 break;
2800
2801 case HAL_RXDMA_ERR_TKIP_MIC:
2802 dp_rx_process_mic_error(soc, nbuf,
2803 rx_tlv_hdr,
2804 txrx_peer);
2805 if (txrx_peer)
2806 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2807 rx.err.mic_err,
2808 1,
2809 link_id);
2810 break;
2811
2812 case HAL_RXDMA_ERR_DECRYPT:
2813 /* All the TKIP-MIC failures are treated as Decrypt Errors
2814 * for QCN9224 Targets
2815 */
2816 is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
2817
2818 if (is_tkip_mic_err && txrx_peer) {
2819 dp_rx_process_mic_error(soc, nbuf,
2820 rx_tlv_hdr,
2821 txrx_peer);
2822 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2823 rx.err.mic_err,
2824 1,
2825 link_id);
2826 break;
2827 }
2828
2829 if (txrx_peer) {
2830 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
2831 rx.err.decrypt_err,
2832 1,
2833 link_id);
2834 dp_rx_nbuf_free(nbuf);
2835 break;
2836 }
2837
2838 if (!dp_handle_rxdma_decrypt_err()) {
2839 dp_rx_nbuf_free(nbuf);
2840 break;
2841 }
2842
2843 pool_id = wbm_err.info_bit.pool_id;
2844 err_code = wbm_err.info_bit.rxdma_err_code;
2845 tlv_hdr = rx_tlv_hdr;
2846 dp_rx_process_rxdma_err(soc, nbuf,
2847 tlv_hdr, NULL,
2848 err_code,
2849 pool_id,
2850 link_id);
2851 break;
2852 case HAL_RXDMA_MULTICAST_ECHO:
2853 if (txrx_peer)
2854 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2855 rx.mec_drop, 1,
2856 qdf_nbuf_len(nbuf),
2857 link_id);
2858 dp_rx_nbuf_free(nbuf);
2859 break;
2860 case HAL_RXDMA_UNAUTHORIZED_WDS:
2861 pool_id = wbm_err.info_bit.pool_id;
2862 err_code = wbm_err.info_bit.rxdma_err_code;
2863 tlv_hdr = rx_tlv_hdr;
2864 dp_rx_process_rxdma_err(soc, nbuf,
2865 tlv_hdr,
2866 txrx_peer,
2867 err_code,
2868 pool_id,
2869 link_id);
2870 break;
2871 default:
2872 dp_rx_nbuf_free(nbuf);
2873 dp_err_rl("RXDMA error %d",
2874 wbm_err.info_bit.rxdma_err_code);
2875 }
2876 } else if (wbm_err.info_bit.rxdma_psh_rsn
2877 == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
2878 dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
2879 rx_tlv_hdr,
2880 HAL_RX_WBM_ERR_SRC_RXDMA,
2881 link_id);
2882 } else if (wbm_err.info_bit.rxdma_psh_rsn
2883 == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
2884 dp_rx_err_err("rxdma push reason %u",
2885 wbm_err.info_bit.rxdma_psh_rsn);
2886 DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
2887 dp_rx_nbuf_free(nbuf);
2888 } else {
2889 /* should not enter here */
2890 dp_rx_err_alert("invalid rxdma push reason %u",
2891 wbm_err.info_bit.rxdma_psh_rsn);
2892 dp_rx_nbuf_free(nbuf);
2893 dp_assert_always_internal(0);
2894 }
2895 } else {
2896 /* Should not come here */
2897 qdf_assert(0);
2898 }
2899
2900 if (txrx_peer)
2901 dp_txrx_peer_unref_delete(txrx_ref_handle,
2902 DP_MOD_ID_RX_ERR);
2903
2904 nbuf = next;
2905 }
2906 return rx_bufs_used; /* Assume no scale factor for now */
2907 }
2908
2909 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2910
2911 /**
2912 * dup_desc_dbg() - dump and assert if duplicate rx desc found
2913 *
2914 * @soc: core DP main context
2915 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2916 * @rx_desc: void pointer to rx descriptor
2917 *
2918 * Return: void
2919 */
dup_desc_dbg(struct dp_soc * soc,hal_rxdma_desc_t rxdma_dst_ring_desc,void * rx_desc)2920 static void dup_desc_dbg(struct dp_soc *soc,
2921 hal_rxdma_desc_t rxdma_dst_ring_desc,
2922 void *rx_desc)
2923 {
2924 DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
2925 dp_rx_dump_info_and_assert(
2926 soc,
2927 soc->rx_rel_ring.hal_srng,
2928 hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
2929 rx_desc);
2930 }
2931
2932 /**
2933 * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
2934 *
2935 * @soc: core DP main context
2936 * @mac_id: mac id which is one of 3 mac_ids
2937 * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
2938 * @head: head of descs list to be freed
2939 * @tail: tail of decs list to be freed
2940 *
2941 * Return: number of msdu in MPDU to be popped
2942 */
2943 static inline uint32_t
dp_rx_err_mpdu_pop(struct dp_soc * soc,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail)2944 dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
2945 hal_rxdma_desc_t rxdma_dst_ring_desc,
2946 union dp_rx_desc_list_elem_t **head,
2947 union dp_rx_desc_list_elem_t **tail)
2948 {
2949 void *rx_msdu_link_desc;
2950 qdf_nbuf_t msdu;
2951 qdf_nbuf_t last;
2952 struct hal_rx_msdu_list msdu_list;
2953 uint16_t num_msdus;
2954 struct hal_buf_info buf_info;
2955 uint32_t rx_bufs_used = 0;
2956 uint32_t msdu_cnt;
2957 uint32_t i;
2958 uint8_t push_reason;
2959 uint8_t rxdma_error_code = 0;
2960 uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
2961 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2962 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
2963 hal_rxdma_desc_t ring_desc;
2964 struct rx_desc_pool *rx_desc_pool;
2965
2966 if (!pdev) {
2967 dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
2968 soc, mac_id);
2969 return rx_bufs_used;
2970 }
2971
2972 msdu = 0;
2973
2974 last = NULL;
2975
2976 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
2977 &buf_info, &msdu_cnt);
2978
2979 push_reason =
2980 hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
2981 if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
2982 rxdma_error_code =
2983 hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
2984 }
2985
2986 do {
2987 rx_msdu_link_desc =
2988 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
2989
2990 qdf_assert_always(rx_msdu_link_desc);
2991
2992 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
2993 &msdu_list, &num_msdus);
2994
2995 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
2996 /* if the msdus belongs to NSS offloaded radio &&
2997 * the rbm is not SW1_BM then return the msdu_link
2998 * descriptor without freeing the msdus (nbufs). let
2999 * these buffers be given to NSS completion ring for
3000 * NSS to free them.
3001 * else iterate through the msdu link desc list and
3002 * free each msdu in the list.
3003 */
3004 if (msdu_list.rbm[0] !=
3005 HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
3006 wlan_cfg_get_dp_pdev_nss_enabled(
3007 pdev->wlan_cfg_ctx))
3008 bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
3009 else {
3010 for (i = 0; i < num_msdus; i++) {
3011 struct dp_rx_desc *rx_desc =
3012 soc->arch_ops.
3013 dp_rx_desc_cookie_2_va(
3014 soc,
3015 msdu_list.sw_cookie[i]);
3016 qdf_assert_always(rx_desc);
3017 msdu = rx_desc->nbuf;
3018 /*
3019 * this is a unlikely scenario
3020 * where the host is reaping
3021 * a descriptor which
3022 * it already reaped just a while ago
3023 * but is yet to replenish
3024 * it back to HW.
3025 * In this case host will dump
3026 * the last 128 descriptors
3027 * including the software descriptor
3028 * rx_desc and assert.
3029 */
3030 ring_desc = rxdma_dst_ring_desc;
3031 if (qdf_unlikely(!rx_desc->in_use)) {
3032 dup_desc_dbg(soc,
3033 ring_desc,
3034 rx_desc);
3035 continue;
3036 }
3037
3038 if (rx_desc->unmapped == 0) {
3039 rx_desc_pool =
3040 &soc->rx_desc_buf[rx_desc->pool_id];
3041 dp_ipa_rx_buf_smmu_mapping_lock(soc);
3042 dp_rx_nbuf_unmap_pool(soc,
3043 rx_desc_pool,
3044 msdu);
3045 rx_desc->unmapped = 1;
3046 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3047 }
3048
3049 dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
3050 soc, msdu);
3051
3052 dp_rx_buffer_pool_nbuf_free(soc, msdu,
3053 rx_desc->pool_id);
3054 rx_bufs_used++;
3055 dp_rx_add_to_free_desc_list(head,
3056 tail, rx_desc);
3057 }
3058 }
3059 } else {
3060 rxdma_error_code = HAL_RXDMA_ERR_WAR;
3061 }
3062
3063 /*
3064 * Store the current link buffer into to the local structure
3065 * to be used for release purpose.
3066 */
3067 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3068 buf_info.paddr, buf_info.sw_cookie,
3069 buf_info.rbm);
3070
3071 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3072 &buf_info);
3073 dp_rx_link_desc_return_by_addr(soc,
3074 (hal_buff_addrinfo_t)
3075 rx_link_buf_info,
3076 bm_action);
3077 } while (buf_info.paddr);
3078
3079 DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
3080 if (pdev)
3081 DP_STATS_INC(pdev, err.rxdma_error, 1);
3082
3083 if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
3084 dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
3085 }
3086
3087 return rx_bufs_used;
3088 }
3089
3090 uint32_t
dp_rxdma_err_process(struct dp_intr * int_ctx,struct dp_soc * soc,uint32_t mac_id,uint32_t quota)3091 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3092 uint32_t mac_id, uint32_t quota)
3093 {
3094 struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
3095 hal_rxdma_desc_t rxdma_dst_ring_desc;
3096 hal_soc_handle_t hal_soc;
3097 void *err_dst_srng;
3098 union dp_rx_desc_list_elem_t *head = NULL;
3099 union dp_rx_desc_list_elem_t *tail = NULL;
3100 struct dp_srng *dp_rxdma_srng;
3101 struct rx_desc_pool *rx_desc_pool;
3102 uint32_t work_done = 0;
3103 uint32_t rx_bufs_used = 0;
3104
3105 if (!pdev)
3106 return 0;
3107
3108 err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
3109
3110 if (!err_dst_srng) {
3111 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3112 soc, err_dst_srng);
3113 return 0;
3114 }
3115
3116 hal_soc = soc->hal_soc;
3117
3118 qdf_assert(hal_soc);
3119
3120 if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
3121 dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
3122 soc, err_dst_srng);
3123 return 0;
3124 }
3125
3126 while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
3127 hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
3128
3129 rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
3130 rxdma_dst_ring_desc,
3131 &head, &tail);
3132 }
3133
3134 dp_srng_access_end(int_ctx, soc, err_dst_srng);
3135
3136 if (rx_bufs_used) {
3137 if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3138 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3139 rx_desc_pool = &soc->rx_desc_buf[mac_id];
3140 } else {
3141 dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
3142 rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
3143 }
3144
3145 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3146 rx_desc_pool, rx_bufs_used, &head, &tail, false);
3147
3148 work_done += rx_bufs_used;
3149 }
3150
3151 return work_done;
3152 }
3153
3154 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3155
3156 static inline void
dp_wbm_int_err_mpdu_pop(struct dp_soc * soc,uint32_t mac_id,hal_rxdma_desc_t rxdma_dst_ring_desc,union dp_rx_desc_list_elem_t ** head,union dp_rx_desc_list_elem_t ** tail,uint32_t * rx_bufs_used)3157 dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
3158 hal_rxdma_desc_t rxdma_dst_ring_desc,
3159 union dp_rx_desc_list_elem_t **head,
3160 union dp_rx_desc_list_elem_t **tail,
3161 uint32_t *rx_bufs_used)
3162 {
3163 void *rx_msdu_link_desc;
3164 qdf_nbuf_t msdu;
3165 qdf_nbuf_t last;
3166 struct hal_rx_msdu_list msdu_list;
3167 uint16_t num_msdus;
3168 struct hal_buf_info buf_info;
3169 uint32_t msdu_cnt, i;
3170 uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
3171 struct rx_desc_pool *rx_desc_pool;
3172 struct dp_rx_desc *rx_desc;
3173
3174 msdu = 0;
3175
3176 last = NULL;
3177
3178 hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
3179 &buf_info, &msdu_cnt);
3180
3181 do {
3182 rx_msdu_link_desc =
3183 dp_rx_cookie_2_link_desc_va(soc, &buf_info);
3184
3185 if (!rx_msdu_link_desc) {
3186 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
3187 break;
3188 }
3189
3190 hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
3191 &msdu_list, &num_msdus);
3192
3193 if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
3194 for (i = 0; i < num_msdus; i++) {
3195 if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
3196 dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
3197 msdu_list.sw_cookie[i]);
3198 continue;
3199 }
3200
3201 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3202 soc,
3203 msdu_list.sw_cookie[i]);
3204 qdf_assert_always(rx_desc);
3205 rx_desc_pool =
3206 &soc->rx_desc_buf[rx_desc->pool_id];
3207 msdu = rx_desc->nbuf;
3208
3209 /*
3210 * this is a unlikely scenario where the host is reaping
3211 * a descriptor which it already reaped just a while ago
3212 * but is yet to replenish it back to HW.
3213 */
3214 if (qdf_unlikely(!rx_desc->in_use) ||
3215 qdf_unlikely(!msdu)) {
3216 dp_rx_err_info_rl("Reaping rx_desc not in use!");
3217 continue;
3218 }
3219
3220 dp_ipa_rx_buf_smmu_mapping_lock(soc);
3221 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
3222 rx_desc->unmapped = 1;
3223 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3224
3225 dp_rx_buffer_pool_nbuf_free(soc, msdu,
3226 rx_desc->pool_id);
3227 rx_bufs_used[rx_desc->pool_id]++;
3228 dp_rx_add_to_free_desc_list(head,
3229 tail, rx_desc);
3230 }
3231 }
3232
3233 /*
3234 * Store the current link buffer into to the local structure
3235 * to be used for release purpose.
3236 */
3237 hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
3238 buf_info.paddr, buf_info.sw_cookie,
3239 buf_info.rbm);
3240
3241 hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
3242 &buf_info);
3243 dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
3244 rx_link_buf_info,
3245 HAL_BM_ACTION_PUT_IN_IDLE_LIST);
3246 } while (buf_info.paddr);
3247 }
3248
3249 void
dp_handle_wbm_internal_error(struct dp_soc * soc,void * hal_desc,uint32_t buf_type)3250 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
3251 uint32_t buf_type)
3252 {
3253 struct hal_buf_info buf_info = {0};
3254 struct dp_rx_desc *rx_desc = NULL;
3255 struct rx_desc_pool *rx_desc_pool;
3256 uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
3257 union dp_rx_desc_list_elem_t *head = NULL;
3258 union dp_rx_desc_list_elem_t *tail = NULL;
3259 uint8_t pool_id;
3260 uint8_t mac_id;
3261
3262 hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
3263
3264 if (!buf_info.paddr) {
3265 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
3266 return;
3267 }
3268
3269 /* buffer_addr_info is the first element of ring_desc */
3270 hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
3271 &buf_info);
3272
3273 if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
3274 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
3275 rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
3276 soc,
3277 buf_info.sw_cookie);
3278
3279 if (rx_desc && rx_desc->nbuf) {
3280 rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3281 dp_ipa_rx_buf_smmu_mapping_lock(soc);
3282 dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
3283 rx_desc->nbuf);
3284 rx_desc->unmapped = 1;
3285 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
3286
3287 dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
3288 rx_desc->pool_id);
3289 dp_rx_add_to_free_desc_list(&head,
3290 &tail,
3291 rx_desc);
3292
3293 rx_bufs_reaped[rx_desc->pool_id]++;
3294 }
3295 } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
3296 pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
3297
3298 dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
3299 &head, &tail, rx_bufs_reaped);
3300 }
3301
3302 for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
3303 struct rx_desc_pool *rx_desc_pool;
3304 struct dp_srng *dp_rxdma_srng;
3305
3306 if (!rx_bufs_reaped[mac_id])
3307 continue;
3308
3309 DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
3310 dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
3311 rx_desc_pool = &soc->rx_desc_buf[mac_id];
3312
3313 dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
3314 rx_desc_pool,
3315 rx_bufs_reaped[mac_id],
3316 &head, &tail, false);
3317 }
3318 }
3319
3320 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3321