Lines Matching refs:nbuf

145 	qdf_nbuf_t nbuf;  member
238 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) in dp_rx_set_hdr_pad() argument
240 QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; in dp_rx_set_hdr_pad()
244 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) in dp_rx_set_hdr_pad() argument
261 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) in dp_rx_is_special_frame() argument
264 qdf_nbuf_is_ipv4_arp_pkt(nbuf)) || in dp_rx_is_special_frame()
266 qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) || in dp_rx_is_special_frame()
268 qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) || in dp_rx_is_special_frame()
270 qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) || in dp_rx_is_special_frame()
272 qdf_nbuf_data_is_dns_query(nbuf)) || in dp_rx_is_special_frame()
274 qdf_nbuf_data_is_dns_response(nbuf))) in dp_rx_is_special_frame()
296 qdf_nbuf_t nbuf, uint32_t frame_mask,
300 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) in dp_rx_is_special_frame() argument
307 qdf_nbuf_t nbuf, uint32_t frame_mask, in dp_rx_deliver_special_frame() argument
332 qdf_nbuf_t nbuf) in dp_rx_data_is_specific() argument
334 if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf))) in dp_rx_data_is_specific()
346 if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) in dp_rx_data_is_specific()
350 if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)) in dp_rx_data_is_specific()
361 qdf_nbuf_t nbuf) in dp_rx_data_is_specific() argument
376 qdf_nbuf_t nbuf, uint8_t link_id) in dp_rx_check_ndi_mdns_fwding() argument
379 qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { in dp_rx_check_ndi_mdns_fwding()
390 qdf_nbuf_t nbuf, uint8_t link_id) in dp_rx_check_ndi_mdns_fwding() argument
469 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) in dp_set_rx_queue() argument
471 qdf_nbuf_record_rx_queue(nbuf, queue_id); in dp_set_rx_queue()
475 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) in dp_set_rx_queue() argument
985 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
1121 info->prev_nbuf = rx_desc->nbuf; in dp_rx_desc_update_dbg_info()
1164 new->nbuf = NULL; in __dp_rx_add_to_free_desc_list()
1183 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1384 qdf_nbuf_t nbuf) in dp_nbuf_dst_addr_is_mld_addr() argument
1389 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + in dp_nbuf_dst_addr_is_mld_addr()
1394 qdf_nbuf_t nbuf) in dp_nbuf_dst_addr_is_mld_addr() argument
1401 qdf_nbuf_t nbuf) in dp_nbuf_dst_addr_is_self_addr() argument
1404 (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) + in dp_nbuf_dst_addr_is_self_addr()
1421 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) in dp_rx_intrabss_eapol_drop_check() argument
1423 if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && in dp_rx_intrabss_eapol_drop_check()
1425 nbuf) || in dp_rx_intrabss_eapol_drop_check()
1427 nbuf)))) { in dp_rx_intrabss_eapol_drop_check()
1428 qdf_nbuf_free(nbuf); in dp_rx_intrabss_eapol_drop_check()
1440 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf) in dp_rx_intrabss_eapol_drop_check() argument
1459 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1478 uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1516 struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
1527 qdf_nbuf_t nbuf, in dp_rx_wds_srcport_learn() argument
1534 struct dp_peer *ta_peer, qdf_nbuf_t nbuf, in dp_rx_ipa_wds_srcport_learn() argument
1549 rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id, in dp_rx_desc_dump()
1566 qdf_nbuf_t nbuf) in check_qwrap_multicast_loopback() argument
1570 uint8_t *data = qdf_nbuf_data(nbuf); in check_qwrap_multicast_loopback()
1594 qdf_nbuf_t nbuf) in check_qwrap_multicast_loopback() argument
1625 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, in dp_rx_update_protocol_tag() argument
1645 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) in dp_rx_err_cce_drop() argument
1666 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats) in dp_rx_update_flow_tag() argument
1831 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1847 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1861 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1873 qdf_nbuf_t nbuf);
1878 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) in dp_rx_set_reuse_nbuf() argument
1880 rx_desc->reuse_nbuf = nbuf; in dp_rx_set_reuse_nbuf()
1905 new->nbuf = NULL; in __dp_rx_add_to_free_desc_list_reuse()
1915 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf) in dp_rx_set_reuse_nbuf() argument
1958 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; in dp_rx_desc_prep()
1960 rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf); in dp_rx_desc_prep()
1961 dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf); in dp_rx_desc_prep()
2005 return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)); in dp_rx_desc_paddr_sanity_check()
2018 rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf; in dp_rx_desc_prep()
2019 dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf); in dp_rx_desc_prep()
2069 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, in dp_rx_multipass_process() argument
2089 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
2149 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \
2152 DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \
2153 if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \
2164 DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
2183 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2194 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, in dp_rx_deliver_to_pkt_capture_no_peer() argument
2216 qdf_nbuf_t nbuf);
2221 qdf_nbuf_t nbuf) in dp_rx_mcast_echo_check() argument
2262 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2281 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
2292 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
2295 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) in dp_rx_is_raw_frame_dropped() argument
2309 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2312 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) in dp_rx_update_stats() argument
2331 qdf_nbuf_t nbuf, in dp_rx_cksum_offload() argument
2343 if (qdf_nbuf_is_ipv4_pkt(nbuf)) { in dp_rx_cksum_offload()
2346 if (qdf_nbuf_is_ipv4_udp_pkt(nbuf) || in dp_rx_cksum_offload()
2347 qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) { in dp_rx_cksum_offload()
2360 } else if (qdf_nbuf_is_ipv6_udp_pkt(nbuf) || in dp_rx_cksum_offload()
2361 qdf_nbuf_is_ipv6_tcp_pkt(nbuf)) { in dp_rx_cksum_offload()
2371 qdf_nbuf_set_rx_cksum(nbuf, &cksum); in dp_rx_cksum_offload()
2376 qdf_nbuf_t nbuf, in dp_rx_cksum_offload() argument
2431 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2451 qdf_nbuf_t nbuf) in dp_rx_nbuf_set_link_id_from_tlv() argument
2457 soc->arch_ops.dp_rx_peer_set_link_id(nbuf, peer_metadata); in dp_rx_nbuf_set_link_id_from_tlv()
2469 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, in dp_rx_set_nbuf_band() argument
2472 qdf_nbuf_rx_set_band(nbuf, txrx_peer->band[link_id]); in dp_rx_set_nbuf_band()
2477 qdf_nbuf_t nbuf) in dp_rx_nbuf_set_link_id_from_tlv() argument
2482 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer, in dp_rx_set_nbuf_band() argument
2614 qdf_nbuf_t nbuf, in dp_rx_nbuf_sync_no_dsb() argument
2617 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, in dp_rx_nbuf_sync_no_dsb()
2618 (void *)(nbuf->data + buf_size)); in dp_rx_nbuf_sync_no_dsb()
2620 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); in dp_rx_nbuf_sync_no_dsb()
2626 qdf_nbuf_t nbuf, in dp_rx_nbuf_sync_no_dsb() argument
2629 if (nbuf->recycled_for_ds) in dp_rx_nbuf_sync_no_dsb()
2630 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); in dp_rx_nbuf_sync_no_dsb()
2632 if (unlikely(!nbuf->fast_recycled)) { in dp_rx_nbuf_sync_no_dsb()
2633 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, in dp_rx_nbuf_sync_no_dsb()
2634 (void *)(nbuf->data + buf_size)); in dp_rx_nbuf_sync_no_dsb()
2638 nbuf->fast_recycled = 0; in dp_rx_nbuf_sync_no_dsb()
2640 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); in dp_rx_nbuf_sync_no_dsb()
2646 qdf_nbuf_t nbuf, in dp_rx_nbuf_sync() argument
2649 qdf_nbuf_dma_inv_range((void *)nbuf->data, in dp_rx_nbuf_sync()
2650 (void *)(nbuf->data + buf_size)); in dp_rx_nbuf_sync()
2652 return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data); in dp_rx_nbuf_sync()
2662 qdf_nbuf_t nbuf; in dp_rx_nbuf_unmap() local
2665 nbuf = rx_desc->nbuf; in dp_rx_nbuf_unmap()
2667 qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data, in dp_rx_nbuf_unmap()
2668 (void *)(nbuf->data + rx_desc_pool->buf_size)); in dp_rx_nbuf_unmap()
2674 qdf_nbuf_t nbuf) in dp_rx_nbuf_unmap_pool() argument
2676 qdf_nbuf_dma_inv_range((void *)nbuf->data, in dp_rx_nbuf_unmap_pool()
2677 (void *)(nbuf->data + rx_desc_pool->buf_size)); in dp_rx_nbuf_unmap_pool()
2691 qdf_nbuf_t nbuf) in dp_rx_nbuf_unmap_pool() argument
2712 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) in dp_rx_nbuf_free() argument
2714 qdf_nbuf_free_simple(nbuf); in dp_rx_nbuf_free()
2757 qdf_nbuf_t nbuf, in dp_rx_nbuf_sync_no_dsb() argument
2765 qdf_nbuf_t nbuf, in dp_rx_nbuf_sync() argument
2782 QDF_NBUF_CB_PADDR(rx_desc->nbuf), in dp_rx_nbuf_unmap()
2785 dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, in dp_rx_nbuf_unmap()
2788 qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, in dp_rx_nbuf_unmap()
2799 qdf_nbuf_t nbuf) in dp_rx_nbuf_unmap_pool() argument
2801 dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf), in dp_rx_nbuf_unmap_pool()
2803 dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, in dp_rx_nbuf_unmap_pool()
2806 qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE, in dp_rx_nbuf_unmap_pool()
2829 void dp_rx_nbuf_free(qdf_nbuf_t nbuf) in dp_rx_nbuf_free() argument
2831 qdf_nbuf_free(nbuf); in dp_rx_nbuf_free()
2873 qdf_nbuf_t nbuf, in dp_rx_get_txrx_peer_and_vdev() argument
2890 nbuf->next = NULL; in dp_rx_get_txrx_peer_and_vdev()
2891 dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf, in dp_rx_get_txrx_peer_and_vdev()
2894 dp_rx_deliver_to_stack_no_peer(soc, nbuf); in dp_rx_get_txrx_peer_and_vdev()
2900 qdf_nbuf_free(nbuf); in dp_rx_get_txrx_peer_and_vdev()
2931 qdf_nbuf_t nbuf, next; in dp_rx_nbuf_list_deliver() local
2943 nbuf = deliver_list_head; in dp_rx_nbuf_list_deliver()
2944 while (nbuf) { in dp_rx_nbuf_list_deliver()
2945 next = nbuf->next; in dp_rx_nbuf_list_deliver()
2946 nbuf->next = NULL; in dp_rx_nbuf_list_deliver()
2947 dp_rx_deliver_to_stack_no_peer(soc, nbuf); in dp_rx_nbuf_list_deliver()
2948 nbuf = next; in dp_rx_nbuf_list_deliver()
2973 qdf_nbuf_t nbuf = NULL; in dp_rx_nbuf_list_dup_deliver() local
2977 nbuf = ori_list_head; in dp_rx_nbuf_list_dup_deliver()
2981 while (nbuf) { in dp_rx_nbuf_list_dup_deliver()
2982 new_skb = qdf_nbuf_copy(nbuf); in dp_rx_nbuf_list_dup_deliver()
2990 nbuf = qdf_nbuf_next(nbuf); in dp_rx_nbuf_list_dup_deliver()
3045 qdf_nbuf_t nbuf);
3084 qdf_nbuf_t nbuf);
3112 qdf_nbuf_t nbuf,
3126 qdf_nbuf_t nbuf,
3234 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
3247 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
3267 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
3540 qdf_nbuf_t nbuf);
3545 qdf_nbuf_t nbuf) in dp_rx_mark_first_packet_after_wow_wakeup() argument
3577 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf, in dp_rx_get_stats_arr_idx_from_link_id() argument
3580 return QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf); in dp_rx_get_stats_arr_idx_from_link_id()
3584 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf, in dp_rx_get_stats_arr_idx_from_link_id() argument
3589 link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1); in dp_rx_get_stats_arr_idx_from_link_id()