Lines Matching refs:txrx_peer

1125 		  struct dp_txrx_peer *txrx_peer, uint8_t link_id)  in dp_rx_deliver_raw()  argument
1138 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1, in dp_rx_deliver_raw()
1308 struct dp_txrx_peer *txrx_peer) in dp_rx_fill_mesh_stats() argument
1342 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH); in dp_rx_fill_mesh_stats()
1359 txrx_peer->peer_id, in dp_rx_fill_mesh_stats()
2036 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) in dp_rx_deliver_to_stack_ext() argument
2051 if (!txrx_peer->wds_ext.init) in dp_rx_deliver_to_stack_ext()
2054 if (txrx_peer->osif_rx) in dp_rx_deliver_to_stack_ext()
2055 txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head); in dp_rx_deliver_to_stack_ext()
2065 struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head) in dp_rx_deliver_to_stack_ext() argument
2085 link_id = dp_rx_get_stats_arr_idx_from_link_id(nbuf, peer->txrx_peer); in dp_set_nbuf_band()
2086 dp_rx_set_nbuf_band(nbuf, peer->txrx_peer, link_id); in dp_set_nbuf_band()
2110 if (!peer->txrx_peer) { in dp_rx_flush_rx_cached()
2116 if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) { in dp_rx_flush_rx_cached()
2117 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); in dp_rx_flush_rx_cached()
2128 bufqi = &peer->txrx_peer->bufq_info; in dp_rx_flush_rx_cached()
2157 qdf_atomic_dec(&peer->txrx_peer->flush_in_progress); in dp_rx_flush_rx_cached()
2170 struct dp_txrx_peer *txrx_peer, in dp_rx_enqueue_rx() argument
2174 struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; in dp_rx_enqueue_rx()
2177 struct dp_soc *soc = txrx_peer->vdev->pdev->soc; in dp_rx_enqueue_rx()
2190 ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, in dp_rx_enqueue_rx()
2195 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, in dp_rx_enqueue_rx()
2203 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, in dp_rx_enqueue_rx()
2211 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, in dp_rx_enqueue_rx()
2225 bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev, in dp_rx_enqueue_rx()
2258 struct dp_txrx_peer *txrx_peer, in dp_rx_enqueue_rx() argument
2278 struct dp_txrx_peer *txrx_peer, in dp_rx_check_delivery_to_stack() argument
2282 txrx_peer, nbuf_head))) in dp_rx_check_delivery_to_stack()
2309 struct dp_txrx_peer *txrx_peer, in dp_rx_check_delivery_to_stack() argument
2325 if (txrx_peer) in dp_rx_check_delivery_to_stack()
2326 DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num, in dp_rx_check_delivery_to_stack()
2345 struct dp_txrx_peer *txrx_peer, in dp_rx_validate_rx_callbacks() argument
2366 if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) { in dp_rx_validate_rx_callbacks()
2367 dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head); in dp_rx_validate_rx_callbacks()
2371 DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf, in dp_rx_validate_rx_callbacks()
2383 struct dp_txrx_peer *txrx_peer, in dp_rx_raw_pkt_mld_addr_conv() argument
2393 peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, in dp_rx_raw_pkt_mld_addr_conv()
2421 struct dp_txrx_peer *txrx_peer, in dp_rx_raw_pkt_mld_addr_conv() argument
2428 struct dp_txrx_peer *txrx_peer, in dp_rx_deliver_to_stack() argument
2432 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != in dp_rx_deliver_to_stack()
2438 dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head); in dp_rx_deliver_to_stack()
2443 dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head); in dp_rx_deliver_to_stack()
2451 struct dp_txrx_peer *txrx_peer, in dp_rx_eapol_deliver_to_stack() argument
2455 if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) != in dp_rx_eapol_deliver_to_stack()
2467 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \ argument
2476 txrx_peer_local = txrx_peer; \
2487 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) argument
2513 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, in dp_rx_rates_stats_update() argument
2529 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id); in dp_rx_rates_stats_update()
2544 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id); in dp_rx_rates_stats_update()
2547 txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate, in dp_rx_rates_stats_update()
2549 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id); in dp_rx_rates_stats_update()
2550 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id); in dp_rx_rates_stats_update()
2551 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id); in dp_rx_rates_stats_update()
2552 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id); in dp_rx_rates_stats_update()
2553 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id); in dp_rx_rates_stats_update()
2554 DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id); in dp_rx_rates_stats_update()
2559 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, in dp_rx_rates_stats_update() argument
2582 struct dp_txrx_peer *txrx_peer, in dp_rx_msdu_extd_stats_update() argument
2594 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id); in dp_rx_msdu_extd_stats_update()
2595 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu), in dp_rx_msdu_extd_stats_update()
2625 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1, in dp_rx_msdu_extd_stats_update()
2628 DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, in dp_rx_msdu_extd_stats_update()
2631 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id); in dp_rx_msdu_extd_stats_update()
2637 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id); in dp_rx_msdu_extd_stats_update()
2639 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id); in dp_rx_msdu_extd_stats_update()
2640 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1, in dp_rx_msdu_extd_stats_update()
2643 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1, in dp_rx_msdu_extd_stats_update()
2647 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1, in dp_rx_msdu_extd_stats_update()
2649 DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1, in dp_rx_msdu_extd_stats_update()
2654 DP_PEER_EXTD_STATS_INC(txrx_peer, in dp_rx_msdu_extd_stats_update()
2658 dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer, in dp_rx_msdu_extd_stats_update()
2665 struct dp_txrx_peer *txrx_peer, in dp_rx_msdu_extd_stats_update() argument
2673 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, in dp_peer_update_rx_pkt_per_lmac() argument
2682 if (qdf_likely(txrx_peer)) in dp_peer_update_rx_pkt_per_lmac()
2683 dp_err_rl("peer_id: %u", txrx_peer->peer_id); in dp_peer_update_rx_pkt_per_lmac()
2689 DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1, in dp_peer_update_rx_pkt_per_lmac()
2691 txrx_peer->is_mld_peer, link_id); in dp_peer_update_rx_pkt_per_lmac()
2695 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, in dp_peer_update_rx_pkt_per_lmac() argument
2703 struct dp_txrx_peer *txrx_peer, in dp_rx_msdu_stats_update() argument
2709 struct dp_vdev *vdev = txrx_peer->vdev; in dp_rx_msdu_stats_update()
2714 dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); in dp_rx_msdu_stats_update()
2717 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, in dp_rx_msdu_stats_update()
2719 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, in dp_rx_msdu_stats_update()
2721 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, in dp_rx_msdu_stats_update()
2723 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1, in dp_rx_msdu_stats_update()
2725 dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id); in dp_rx_msdu_stats_update()
2731 DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id); in dp_rx_msdu_stats_update()
2734 DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, in dp_rx_msdu_stats_update()
2739 DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len, in dp_rx_msdu_stats_update()
2743 txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts = in dp_rx_msdu_stats_update()
2747 txrx_peer, link_id); in dp_rx_msdu_stats_update()
2753 struct dp_txrx_peer *txrx_peer) in dp_wds_rx_policy_check() argument
2817 struct dp_txrx_peer *txrx_peer; in dp_rx_nbuf_band_set() local
2827 txrx_peer = dp_get_txrx_peer(peer); in dp_rx_nbuf_band_set()
2828 if (qdf_likely(txrx_peer)) { in dp_rx_nbuf_band_set()
2830 qdf_nbuf_rx_set_band(nbuf, txrx_peer->ll_band[link_id]); in dp_rx_nbuf_band_set()
2909 if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer && in dp_rx_deliver_to_stack_no_peer()
2914 dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) { in dp_rx_deliver_to_stack_no_peer()
3474 struct dp_txrx_peer *txrx_peer, in dp_rx_deliver_special_frame() argument
3497 if (txrx_peer->vdev) { in dp_rx_deliver_special_frame()
3498 dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf, in dp_rx_deliver_special_frame()
3506 dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, in dp_rx_deliver_special_frame()
3516 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, in dp_rx_multipass_process() argument
3521 if (qdf_unlikely(!txrx_peer->vlan_id)) in dp_rx_multipass_process()
3536 (txrx_peer->vlan_id & VLAN_VID_MASK)); in dp_rx_multipass_process()
3539 dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf); in dp_rx_multipass_process()