Lines Matching refs:txrx_peer

124 					   struct dp_txrx_peer *txrx_peer,  in dp_rx_mec_check_wrapper()  argument
128 return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf); in dp_rx_mec_check_wrapper()
132 struct dp_txrx_peer *txrx_peer, in dp_rx_mec_check_wrapper() argument
347 struct dp_txrx_peer *txrx_peer, in dp_rx_deliver_to_osif_stack_rh() argument
353 dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); in dp_rx_deliver_to_osif_stack_rh()
355 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); in dp_rx_deliver_to_osif_stack_rh()
361 struct dp_txrx_peer *txrx_peer, in dp_rx_deliver_to_osif_stack_rh() argument
366 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); in dp_rx_deliver_to_osif_stack_rh()
377 struct dp_txrx_peer *txrx_peer = NULL; in dp_rx_decrypt_unecrypt_err_handler_rh() local
408 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, in dp_rx_decrypt_unecrypt_err_handler_rh()
411 if (!txrx_peer) { in dp_rx_decrypt_unecrypt_err_handler_rh()
439 vdev = txrx_peer->vdev; in dp_rx_decrypt_unecrypt_err_handler_rh()
473 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, in dp_rx_decrypt_unecrypt_err_handler_rh()
476 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, in dp_rx_decrypt_unecrypt_err_handler_rh()
480 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1, in dp_rx_decrypt_unecrypt_err_handler_rh()
486 dp_rx_deliver_raw(vdev, nbuf, txrx_peer, 0); in dp_rx_decrypt_unecrypt_err_handler_rh()
493 DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1); in dp_rx_decrypt_unecrypt_err_handler_rh()
495 dp_rx_deliver_to_osif_stack_rh(soc, vdev, txrx_peer, nbuf, NULL, in dp_rx_decrypt_unecrypt_err_handler_rh()
500 if (txrx_peer) in dp_rx_decrypt_unecrypt_err_handler_rh()
505 if (txrx_peer) in dp_rx_decrypt_unecrypt_err_handler_rh()
515 struct dp_txrx_peer *txrx_peer = NULL; in dp_rx_2k_jump_oor_err_handler_rh() local
528 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, in dp_rx_2k_jump_oor_err_handler_rh()
531 if (!txrx_peer) { in dp_rx_2k_jump_oor_err_handler_rh()
543 if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask, in dp_rx_2k_jump_oor_err_handler_rh()
556 if (txrx_peer) in dp_rx_2k_jump_oor_err_handler_rh()
572 struct dp_txrx_peer *txrx_peer = NULL; in dp_rx_mic_err_handler_rh() local
591 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, in dp_rx_mic_err_handler_rh()
594 if (!txrx_peer) { in dp_rx_mic_err_handler_rh()
599 vdev = txrx_peer->vdev; in dp_rx_mic_err_handler_rh()
623 status = dp_rx_defrag_add_last_frag(soc, txrx_peer, in dp_rx_mic_err_handler_rh()
627 if (txrx_peer) in dp_rx_mic_err_handler_rh()
667 if (txrx_peer) in dp_rx_mic_err_handler_rh()
723 struct dp_txrx_peer *txrx_peer; in dp_rx_data_indication_handler() local
772 txrx_peer = NULL; in dp_rx_data_indication_handler()
967 if (qdf_likely(txrx_peer)) in dp_rx_data_indication_handler()
1002 if (qdf_unlikely(!txrx_peer)) { in dp_rx_data_indication_handler()
1003 txrx_peer = in dp_rx_data_indication_handler()
1010 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { in dp_rx_data_indication_handler()
1014 } else if (txrx_peer && txrx_peer->peer_id != peer_id) { in dp_rx_data_indication_handler()
1018 txrx_peer = in dp_rx_data_indication_handler()
1025 if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) { in dp_rx_data_indication_handler()
1031 if (txrx_peer) { in dp_rx_data_indication_handler()
1134 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, in dp_rx_data_indication_handler()
1158 if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { in dp_rx_data_indication_handler()
1160 DP_PEER_PER_PKT_STATS_INC(txrx_peer, in dp_rx_data_indication_handler()
1173 if (qdf_likely(txrx_peer) && in dp_rx_data_indication_handler()
1174 qdf_unlikely(!txrx_peer->authorize) && in dp_rx_data_indication_handler()
1180 DP_PEER_PER_PKT_STATS_INC(txrx_peer, in dp_rx_data_indication_handler()
1192 dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, in dp_rx_data_indication_handler()
1215 txrx_peer, in dp_rx_data_indication_handler()
1219 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, in dp_rx_data_indication_handler()
1231 txrx_peer, in dp_rx_data_indication_handler()
1237 if (dp_rx_intrabss_fwd_rh(soc, txrx_peer, in dp_rx_data_indication_handler()
1252 dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY, in dp_rx_data_indication_handler()
1258 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, in dp_rx_data_indication_handler()
1260 if (qdf_unlikely(txrx_peer->in_twt)) in dp_rx_data_indication_handler()
1261 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, in dp_rx_data_indication_handler()
1270 DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id, in dp_rx_data_indication_handler()
1275 if (qdf_likely(txrx_peer)) in dp_rx_data_indication_handler()
1298 static inline void dp_rx_defrag_deliver_rh(struct dp_txrx_peer *txrx_peer, in dp_rx_defrag_deliver_rh() argument
1302 struct dp_vdev *vdev = txrx_peer->vdev; in dp_rx_defrag_deliver_rh()
1316 dp_rx_deliver_to_stack(soc, vdev, txrx_peer, deliver_list_head, in dp_rx_defrag_deliver_rh()
1325 struct dp_txrx_peer *txrx_peer = NULL; in dp_rx_defrag_store_fragment_rh() local
1350 txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, &txrx_ref_handle, in dp_rx_defrag_store_fragment_rh()
1353 if (!txrx_peer) { in dp_rx_defrag_store_fragment_rh()
1404 pdev = txrx_peer->vdev->pdev; in dp_rx_defrag_store_fragment_rh()
1405 rx_tid = &txrx_peer->rx_tid[tid]; in dp_rx_defrag_store_fragment_rh()
1408 rx_reorder_array_elem = txrx_peer->rx_tid[tid].array; in dp_rx_defrag_store_fragment_rh()
1411 txrx_peer); in dp_rx_defrag_store_fragment_rh()
1444 dp_rx_reorder_flush_frag(txrx_peer, tid); in dp_rx_defrag_store_fragment_rh()
1465 dp_rx_defrag_cleanup(txrx_peer, tid); in dp_rx_defrag_store_fragment_rh()
1473 status = dp_rx_defrag_fraglist_insert(txrx_peer, tid, in dp_rx_defrag_store_fragment_rh()
1479 dp_rx_defrag_waitlist_remove(txrx_peer, tid); in dp_rx_defrag_store_fragment_rh()
1486 txrx_peer->rx_tid[tid].defrag_timeout_ms = in dp_rx_defrag_store_fragment_rh()
1490 dp_rx_defrag_waitlist_add(txrx_peer, tid); in dp_rx_defrag_store_fragment_rh()
1500 status = dp_rx_defrag(txrx_peer, tid, rx_reorder_array_elem->head, in dp_rx_defrag_store_fragment_rh()
1505 dp_rx_defrag_cleanup(txrx_peer, tid); in dp_rx_defrag_store_fragment_rh()
1510 dp_rx_defrag_deliver_rh(txrx_peer, tid, rx_reorder_array_elem->head); in dp_rx_defrag_store_fragment_rh()
1513 dp_rx_defrag_cleanup(txrx_peer, tid); in dp_rx_defrag_store_fragment_rh()
1523 if (txrx_peer) in dp_rx_defrag_store_fragment_rh()