Lines Matching refs:nbuf
53 qdf_nbuf_t nbuf, uint16_t sa_peer_id);
54 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
152 qdf_nbuf_t nbuf; member
431 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
445 qdf_nbuf_t nbuf,
573 qdf_nbuf_t nbuf);
590 uint8_t vdev_id, qdf_nbuf_t nbuf);
607 qdf_nbuf_t nbuf,
626 uint8_t vdev_id, qdf_nbuf_t nbuf,
642 qdf_nbuf_t nbuf);
656 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
668 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
683 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
686 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
739 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
742 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
800 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf); in dp_tx_prefetch_hw_sw_nbuf_desc()
801 qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64); in dp_tx_prefetch_hw_sw_nbuf_desc()
850 qdf_nbuf_t nbuf, in dp_tx_multipass_process() argument
872 qdf_nbuf_t nbuf,
939 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
951 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
964 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
975 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
1005 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
1013 queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) % in dp_tx_get_queue()
1018 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
1022 queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) % in dp_tx_get_queue()
1028 qdf_nbuf_t nbuf, struct dp_tx_queue *queue) in dp_tx_get_queue() argument
1677 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf) in dp_sawf_tag_valid_get() argument
1736 qdf_nbuf_t nbuf);
1746 #define dp_pkt_add_timestamp(vdev, index, time, nbuf) argument
1797 static inline bool is_spl_packet(qdf_nbuf_t nbuf) in is_spl_packet() argument
1799 if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) in is_spl_packet()
1817 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf) in dp_tx_limit_check() argument
1839 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf) in is_dp_spl_tx_limit_reached() argument
1844 if (is_spl_packet(nbuf)) { in is_dp_spl_tx_limit_reached()
1878 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf) in dp_tx_limit_check() argument
1882 uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf); in dp_tx_limit_check()
1885 if (is_dp_spl_tx_limit_reached(vdev, nbuf)) { in dp_tx_limit_check()
1895 if (is_dp_spl_tx_limit_reached(vdev, nbuf)) { in dp_tx_limit_check()
2062 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf) in dp_tx_limit_check() argument
2161 dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, in dp_tx_set_nbuf_band() argument
2164 qdf_nbuf_tx_set_band(nbuf, txrx_peer->band[link_id]); in dp_tx_set_nbuf_band()
2168 dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, in dp_tx_set_nbuf_band() argument