Lines Matching refs:txrx_peer

2503 			   struct dp_txrx_peer *txrx_peer,  in dp_tx_latency_stats_update()  argument
2511 struct dp_vdev *vdev = txrx_peer->vdev; in dp_tx_latency_stats_update()
2532 tx_latency = &txrx_peer->stats[link_id].tx_latency; in dp_tx_latency_stats_update()
2534 dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx, in dp_tx_latency_stats_update()
2537 dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx, in dp_tx_latency_stats_update()
2540 dp_tx_latency_stats_update_bucket(txrx_peer->vdev, tx_latency, idx, in dp_tx_latency_stats_update()
2608 struct dp_txrx_peer *txrx_peer; in dp_tx_latency_stats_update_cca() local
2636 txrx_peer = dp_get_txrx_peer(peer); in dp_tx_latency_stats_update_cca()
2637 if (qdf_unlikely(!txrx_peer)) { in dp_tx_latency_stats_update_cca()
2644 if (link_id >= txrx_peer->stats_arr_size) in dp_tx_latency_stats_update_cca()
2647 tx_latency = &txrx_peer->stats[link_id].tx_latency; in dp_tx_latency_stats_update_cca()
2680 struct dp_txrx_peer *txrx_peer; in dp_tx_latency_stats_get_per_peer() local
2697 txrx_peer = dp_get_txrx_peer(peer); in dp_tx_latency_stats_get_per_peer()
2698 if (!txrx_peer) in dp_tx_latency_stats_get_per_peer()
2702 if (link_id >= txrx_peer->stats_arr_size) in dp_tx_latency_stats_get_per_peer()
2705 tx_latency = &txrx_peer->stats[link_id].tx_latency; in dp_tx_latency_stats_get_per_peer()
2745 struct dp_txrx_peer *txrx_peer; in dp_tx_latency_stats_get_peer_iter() local
2754 txrx_peer = dp_get_txrx_peer(peer); in dp_tx_latency_stats_get_peer_iter()
2755 if (!txrx_peer) in dp_tx_latency_stats_get_peer_iter()
2849 struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer); in dp_tx_latency_stats_clear_per_peer() local
2851 if (!txrx_peer) { in dp_tx_latency_stats_clear_per_peer()
2856 for (link_id = 0; link_id < txrx_peer->stats_arr_size; link_id++) { in dp_tx_latency_stats_clear_per_peer()
2857 tx_latency = &txrx_peer->stats[link_id].tx_latency; in dp_tx_latency_stats_clear_per_peer()
3008 struct dp_txrx_peer *txrx_peer, in dp_tx_latency_stats_update() argument
3862 struct dp_txrx_peer *txrx_peer; in dp_tx_nawds_handler() local
3873 txrx_peer = dp_get_txrx_peer(peer); in dp_tx_nawds_handler()
3874 if (!txrx_peer) in dp_tx_nawds_handler()
3877 if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) { in dp_tx_nawds_handler()
3886 if (dp_peer_is_wds_ext_peer(txrx_peer)) in dp_tx_nawds_handler()
3892 if (sa_peer_id == txrx_peer->peer_id) { in dp_tx_nawds_handler()
3894 DP_PEER_PER_PKT_STATS_INC(txrx_peer, in dp_tx_nawds_handler()
3918 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, in dp_tx_nawds_handler()
4590 struct dp_txrx_peer *txrx_peer; local
4613 txrx_peer = dp_get_txrx_peer(peer);
4615 if (!txrx_peer || txrx_peer->bss_peer)
4624 if (!txrx_peer->wds_enabled ||
4625 !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
4638 txrx_peer = dp_get_txrx_peer(peer);
4639 if (!txrx_peer)
4642 if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
4651 ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
4652 (txrx_peer->wds_enabled &&
4653 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
4655 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
4657 (txrx_peer->bss_peer &&
4748 struct dp_txrx_peer *txrx_peer, argument
4753 dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
4770 struct dp_txrx_peer *txrx_peer, argument
4864 static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer, argument
4869 struct dp_pdev *pdev = txrx_peer->vdev->pdev;
4878 if (!txrx_peer->delay_stats)
4882 delay_stats = txrx_peer->delay_stats;
4891 tx_desc, ts, txrx_peer->vdev);
4895 void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer, argument
5049 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer, argument
5054 struct dp_pdev *pdev = txrx_peer->vdev->pdev;
5063 if (!txrx_peer->jitter_stats)
5067 jitter_stats = txrx_peer->jitter_stats;
5077 ts, txrx_peer->vdev, tx_desc);
5080 static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer, argument
5194 struct dp_txrx_peer *txrx_peer, argument
5200 dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, argument
5207 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
5215 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer) argument
5217 return txrx_peer->mpdu_retry_threshold;
5221 dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer) argument
5238 struct dp_txrx_peer *txrx_peer, uint8_t link_id) argument
5241 uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
5251 DP_PEER_EXTD_STATS_INC(txrx_peer,
5255 DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
5256 DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
5257 DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
5259 DP_PEER_EXTD_STATS_INC(txrx_peer,
5262 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
5263 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
5264 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
5267 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
5272 DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
5282 struct dp_txrx_peer *txrx_peer, uint8_t link_id) argument
5293 struct dp_txrx_peer *txrx_peer, argument
5300 if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
5311 txrx_peer,
5321 struct dp_txrx_peer *txrx_peer, argument
5343 struct dp_txrx_peer *txrx_peer, uint8_t ring_id, argument
5346 struct dp_pdev *pdev = txrx_peer->vdev->pdev;
5361 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
5367 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
5370 qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
5371 dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
5378 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
5381 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
5384 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
5387 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
5389 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
5392 txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
5395 dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
5408 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
5410 DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
5413 dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
5416 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
5419 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
5422 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
5425 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
5428 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
5431 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
5434 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
5437 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5441 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5445 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5449 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5453 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5457 DP_PEER_PER_PKT_STATS_INC(txrx_peer,
5461 DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
5568 struct dp_txrx_peer *txrx_peer, argument
5590 txrx_peer->peer_id,
5606 qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
5611 txrx_peer->stats[link_id].
5624 struct dp_txrx_peer *txrx_peer, argument
5647 struct dp_txrx_peer *txrx_peer) argument
5671 if (txrx_peer)
5672 peer_id = txrx_peer->peer_id;
5682 txrx_peer, ts,
5972 dp_update_mcast_stats(struct dp_txrx_peer *txrx_peer, uint8_t link_id, argument
5976 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
5981 dp_update_mcast_stats(struct dp_txrx_peer *txrx_peer, uint8_t link_id, argument
6001 struct dp_txrx_peer *txrx_peer; local
6019 txrx_peer = dp_get_txrx_peer(peer);
6020 if (qdf_likely(txrx_peer)) {
6023 txrx_peer,
6024 txrx_peer->vdev);
6025 qdf_nbuf_tx_set_band(nbuf, txrx_peer->ll_band[link_id]);
6041 struct dp_txrx_peer *txrx_peer, argument
6098 if (!txrx_peer) {
6112 vdev = txrx_peer->vdev;
6114 link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
6116 dp_tx_set_nbuf_band(nbuf, txrx_peer, link_id);
6133 if (qdf_unlikely(txrx_peer->bss_peer &&
6136 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
6139 if (txrx_peer->vdev->tx_encap_type ==
6142 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
6148 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
6151 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
6153 if (qdf_unlikely(txrx_peer->in_twt)) {
6154 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
6160 dp_update_mcast_stats(txrx_peer, link_id, length, nbuf);
6164 dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
6165 dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
6166 dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
6167 dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
6170 dp_tx_latency_stats_update(soc, txrx_peer, tx_desc, ts, link_id);
6174 dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
6192 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, argument
6196 if (update || (!txrx_peer->hw_txrx_stats_en)) {
6197 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6200 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6204 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, argument
6208 if (!txrx_peer->hw_txrx_stats_en) {
6209 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6212 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6217 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer, argument
6221 DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
6224 DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
6371 struct dp_txrx_peer *txrx_peer, argument
6379 if (qdf_likely(txrx_peer)) {
6384 vdev = txrx_peer->vdev;
6387 txrx_peer,
6392 txrx_peer,
6396 dp_tx_update_peer_basic_stats(txrx_peer, desc->length,
6404 struct dp_txrx_peer *txrx_peer, argument
6433 struct dp_txrx_peer *txrx_peer = NULL; local
6447 if (txrx_peer)
6451 txrx_peer =
6464 dp_tx_update_ppeds_tx_comp_stats(soc, txrx_peer, &ts,
6489 if (qdf_likely(txrx_peer))
6490 dp_tx_update_peer_basic_stats(txrx_peer,
6511 dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
6514 dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
6520 if (txrx_peer)
7935 struct dp_txrx_peer *txrx_peer = NULL; local
7955 TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
7957 if (*vlan_id == txrx_peer->vlan_id) {
7977 if (*vlan_id == peer->txrx_peer->vlan_id) {