Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 25 of 423) sorted by relevance

12345678910>>...17

/linux-4.19.296/include/linux/
Dskbuff.h486 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
705 void (*destructor)(struct sk_buff *skb);
871 static inline bool skb_pfmemalloc(const struct sk_buff *skb) in skb_pfmemalloc() argument
873 return unlikely(skb->pfmemalloc); in skb_pfmemalloc()
890 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) in skb_dst() argument
895 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst()
898 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst()
909 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set() argument
911 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set()
924 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set_noref() argument
[all …]
Dif_vlan.h63 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) in vlan_eth_hdr() argument
65 return (struct vlan_ethhdr *)skb_mac_header(skb); in vlan_eth_hdr()
216 extern bool vlan_do_receive(struct sk_buff **skb);
265 static inline bool vlan_do_receive(struct sk_buff **skb) in vlan_do_receive() argument
340 static inline int __vlan_insert_inner_tag(struct sk_buff *skb, in __vlan_insert_inner_tag() argument
346 if (skb_cow_head(skb, VLAN_HLEN) < 0) in __vlan_insert_inner_tag()
349 skb_push(skb, VLAN_HLEN); in __vlan_insert_inner_tag()
353 memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); in __vlan_insert_inner_tag()
354 skb->mac_header -= VLAN_HLEN; in __vlan_insert_inner_tag()
356 veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); in __vlan_insert_inner_tag()
[all …]
Dnetfilter_bridge.h10 int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
12 static inline void br_drop_fake_rtable(struct sk_buff *skb) in br_drop_fake_rtable() argument
14 struct dst_entry *dst = skb_dst(skb); in br_drop_fake_rtable()
17 skb_dst_drop(skb); in br_drop_fake_rtable()
20 static inline int nf_bridge_get_physinif(const struct sk_buff *skb) in nf_bridge_get_physinif() argument
24 if (skb->nf_bridge == NULL) in nf_bridge_get_physinif()
27 nf_bridge = skb->nf_bridge; in nf_bridge_get_physinif()
31 static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) in nf_bridge_get_physoutif() argument
35 if (skb->nf_bridge == NULL) in nf_bridge_get_physoutif()
38 nf_bridge = skb->nf_bridge; in nf_bridge_get_physoutif()
[all …]
Dvirtio_net.h25 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, in virtio_net_hdr_set_proto() argument
28 if (skb->protocol) in virtio_net_hdr_set_proto()
34 skb->protocol = cpu_to_be16(ETH_P_IP); in virtio_net_hdr_set_proto()
37 skb->protocol = cpu_to_be16(ETH_P_IPV6); in virtio_net_hdr_set_proto()
46 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, in virtio_net_hdr_to_skb() argument
83 skb_reset_mac_header(skb); in virtio_net_hdr_to_skb()
90 if (!pskb_may_pull(skb, needed)) in virtio_net_hdr_to_skb()
93 if (!skb_partial_csum_set(skb, start, off)) in virtio_net_hdr_to_skb()
96 p_off = skb_transport_offset(skb) + thlen; in virtio_net_hdr_to_skb()
97 if (!pskb_may_pull(skb, p_off)) in virtio_net_hdr_to_skb()
[all …]
/linux-4.19.296/drivers/bluetooth/
Dbtbcm.c42 struct sk_buff *skb; in btbcm_check_bdaddr() local
44 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, in btbcm_check_bdaddr()
46 if (IS_ERR(skb)) { in btbcm_check_bdaddr()
47 int err = PTR_ERR(skb); in btbcm_check_bdaddr()
52 if (skb->len != sizeof(*bda)) { in btbcm_check_bdaddr()
54 kfree_skb(skb); in btbcm_check_bdaddr()
58 bda = (struct hci_rp_read_bd_addr *)skb->data; in btbcm_check_bdaddr()
81 kfree_skb(skb); in btbcm_check_bdaddr()
89 struct sk_buff *skb; in btbcm_set_bdaddr() local
92 skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); in btbcm_set_bdaddr()
[all …]
Dh4_recv.h32 int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
57 struct sk_buff *skb, in h4_recv_buf() argument
64 if (IS_ERR(skb)) in h4_recv_buf()
65 skb = NULL; in h4_recv_buf()
73 if (!skb) { in h4_recv_buf()
78 skb = bt_skb_alloc((&pkts[i])->maxlen, in h4_recv_buf()
80 if (!skb) in h4_recv_buf()
83 hci_skb_pkt_type(skb) = (&pkts[i])->type; in h4_recv_buf()
84 hci_skb_expect(skb) = (&pkts[i])->hlen; in h4_recv_buf()
89 if (!skb) in h4_recv_buf()
[all …]
Dbtintel.c41 struct sk_buff *skb; in btintel_check_bdaddr() local
43 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, in btintel_check_bdaddr()
45 if (IS_ERR(skb)) { in btintel_check_bdaddr()
46 int err = PTR_ERR(skb); in btintel_check_bdaddr()
52 if (skb->len != sizeof(*bda)) { in btintel_check_bdaddr()
54 kfree_skb(skb); in btintel_check_bdaddr()
58 bda = (struct hci_rp_read_bd_addr *)skb->data; in btintel_check_bdaddr()
71 kfree_skb(skb); in btintel_check_bdaddr()
80 struct sk_buff *skb; in btintel_enter_mfg() local
82 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); in btintel_enter_mfg()
[all …]
Dhci_h4.c104 static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) in h4_enqueue() argument
108 BT_DBG("hu %p skb %p", hu, skb); in h4_enqueue()
111 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); in h4_enqueue()
112 skb_queue_tail(&h4->txq, skb); in h4_enqueue()
170 struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, in h4_recv_buf() argument
178 if (IS_ERR(skb)) in h4_recv_buf()
179 skb = NULL; in h4_recv_buf()
192 if (!skb) { in h4_recv_buf()
197 skb = bt_skb_alloc((&pkts[i])->maxlen, in h4_recv_buf()
199 if (!skb) in h4_recv_buf()
[all …]
Dbtrsi.c49 static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb) in rsi_hci_send_pkt() argument
54 switch (hci_skb_pkt_type(skb)) { in rsi_hci_send_pkt()
66 if (skb_headroom(skb) < RSI_HEADROOM_FOR_BT_HAL) { in rsi_hci_send_pkt()
68 new_skb = skb_realloc_headroom(skb, RSI_HEADROOM_FOR_BT_HAL); in rsi_hci_send_pkt()
71 bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb); in rsi_hci_send_pkt()
72 kfree_skb(skb); in rsi_hci_send_pkt()
73 skb = new_skb; in rsi_hci_send_pkt()
74 if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) { in rsi_hci_send_pkt()
75 u8 *skb_data = skb->data; in rsi_hci_send_pkt()
76 int skb_len = skb->len; in rsi_hci_send_pkt()
[all …]
Dhci_vhci.c80 static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) in vhci_send_frame() argument
84 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); in vhci_send_frame()
85 skb_queue_tail(&data->readq, skb); in vhci_send_frame()
94 struct sk_buff *skb; in __vhci_create_device() local
110 skb = bt_skb_alloc(4, GFP_KERNEL); in __vhci_create_device()
111 if (!skb) in __vhci_create_device()
116 kfree_skb(skb); in __vhci_create_device()
143 kfree_skb(skb); in __vhci_create_device()
147 hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; in __vhci_create_device()
149 skb_put_u8(skb, 0xff); in __vhci_create_device()
[all …]
/linux-4.19.296/include/net/
Dllc_c_ev.h123 static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) in llc_conn_ev() argument
125 return (struct llc_conn_state_ev *)skb->cb; in llc_conn_ev()
128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
[all …]
Dllc_c_ac.h90 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
92 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
93 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
95 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
96 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
97 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
98 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
100 struct sk_buff *skb);
102 struct sk_buff *skb);
[all …]
Dudp.h112 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) in __udp_lib_checksum_complete() argument
114 return (UDP_SKB_CB(skb)->cscov == skb->len ? in __udp_lib_checksum_complete()
115 __skb_checksum_complete(skb) : in __udp_lib_checksum_complete()
116 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); in __udp_lib_checksum_complete()
119 static inline int udp_lib_checksum_complete(struct sk_buff *skb) in udp_lib_checksum_complete() argument
121 return !skb_csum_unnecessary(skb) && in udp_lib_checksum_complete()
122 __udp_lib_checksum_complete(skb); in udp_lib_checksum_complete()
131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb) in udp_csum_outgoing() argument
133 __wsum csum = csum_partial(skb_transport_header(skb), in udp_csum_outgoing()
135 skb_queue_walk(&sk->sk_write_queue, skb) { in udp_csum_outgoing()
[all …]
Dudplite.h21 int len, int odd, struct sk_buff *skb) in udplite_getfrag() argument
30 static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) in udplite_checksum_init() argument
46 else if (cscov < 8 || cscov > skb->len) { in udplite_checksum_init()
51 cscov, skb->len); in udplite_checksum_init()
54 } else if (cscov < skb->len) { in udplite_checksum_init()
55 UDP_SKB_CB(skb)->partial_cov = 1; in udplite_checksum_init()
56 UDP_SKB_CB(skb)->cscov = cscov; in udplite_checksum_init()
57 if (skb->ip_summed == CHECKSUM_COMPLETE) in udplite_checksum_init()
58 skb->ip_summed = CHECKSUM_NONE; in udplite_checksum_init()
59 skb->csum_valid = 0; in udplite_checksum_init()
[all …]
Dnetlink.h240 int netlink_rcv_skb(struct sk_buff *skb,
243 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
259 struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
260 struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
262 void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
263 struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
264 struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
266 void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
267 void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
269 void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
[all …]
Dl3mdev.h32 struct sk_buff *skb, u16 proto);
34 struct sock *sk, struct sk_buff *skb,
139 struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) in l3mdev_l3_rcv() argument
143 if (netif_is_l3_slave(skb->dev)) in l3mdev_l3_rcv()
144 master = netdev_master_upper_dev_get_rcu(skb->dev); in l3mdev_l3_rcv()
145 else if (netif_is_l3_master(skb->dev) || in l3mdev_l3_rcv()
146 netif_has_l3_rx_handler(skb->dev)) in l3mdev_l3_rcv()
147 master = skb->dev; in l3mdev_l3_rcv()
150 skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto); in l3mdev_l3_rcv()
152 return skb; in l3mdev_l3_rcv()
[all …]
Dip6_checksum.h43 static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) in ip6_compute_pseudo() argument
45 return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in ip6_compute_pseudo()
46 &ipv6_hdr(skb)->daddr, in ip6_compute_pseudo()
47 skb->len, proto, 0)); in ip6_compute_pseudo()
50 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) in ip6_gro_compute_pseudo() argument
52 const struct ipv6hdr *iph = skb_gro_network_header(skb); in ip6_gro_compute_pseudo()
55 skb_gro_len(skb), proto, 0)); in ip6_gro_compute_pseudo()
66 static inline void __tcp_v6_send_check(struct sk_buff *skb, in __tcp_v6_send_check() argument
70 struct tcphdr *th = tcp_hdr(skb); in __tcp_v6_send_check()
72 if (skb->ip_summed == CHECKSUM_PARTIAL) { in __tcp_v6_send_check()
[all …]
Dllc_pdu.h206 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) in llc_pdu_sn_hdr() argument
208 return (struct llc_pdu_sn *)skb_network_header(skb); in llc_pdu_sn_hdr()
218 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) in llc_pdu_un_hdr() argument
220 return (struct llc_pdu_un *)skb_network_header(skb); in llc_pdu_un_hdr()
233 static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, in llc_pdu_header_init() argument
248 skb_push(skb, hlen); in llc_pdu_header_init()
249 skb_reset_network_header(skb); in llc_pdu_header_init()
250 pdu = llc_pdu_un_hdr(skb); in llc_pdu_header_init()
263 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) in llc_pdu_decode_sa() argument
265 if (skb->protocol == htons(ETH_P_802_2)) in llc_pdu_decode_sa()
[all …]
Dinet_ecn.h122 static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) in IP6_ECN_set_ce() argument
132 if (skb->ip_summed == CHECKSUM_COMPLETE) in IP6_ECN_set_ce()
133 skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), in IP6_ECN_set_ce()
144 static inline int INET_ECN_set_ce(struct sk_buff *skb) in INET_ECN_set_ce() argument
146 switch (skb_protocol(skb, true)) { in INET_ECN_set_ce()
148 if (skb_network_header(skb) + sizeof(struct iphdr) <= in INET_ECN_set_ce()
149 skb_tail_pointer(skb)) in INET_ECN_set_ce()
150 return IP_ECN_set_ce(ip_hdr(skb)); in INET_ECN_set_ce()
154 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= in INET_ECN_set_ce()
155 skb_tail_pointer(skb)) in INET_ECN_set_ce()
[all …]
Dsch_generic.h58 int (*enqueue)(struct sk_buff *skb,
206 struct sk_buff *skb, struct tcmsg*);
218 int (*enqueue)(struct sk_buff *skb,
302 struct sk_buff *skb, struct tcmsg*);
303 int (*tmplt_dump)(struct sk_buff *skb,
399 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) in qdisc_cb_private_validate() argument
403 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); in qdisc_cb_private_validate()
424 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) in qdisc_skb_cb() argument
426 return (struct qdisc_skb_cb *)skb->cb; in qdisc_skb_cb()
587 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
[all …]
/linux-4.19.296/include/trace/events/
Dnet.h16 TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
18 TP_ARGS(skb, dev),
42 __entry->queue_mapping = skb->queue_mapping;
43 __entry->skbaddr = skb;
44 __entry->vlan_tagged = skb_vlan_tag_present(skb);
45 __entry->vlan_proto = ntohs(skb->vlan_proto);
46 __entry->vlan_tci = skb_vlan_tag_get(skb);
47 __entry->protocol = ntohs(skb->protocol);
48 __entry->ip_summed = skb->ip_summed;
49 __entry->len = skb->len;
[all …]
/linux-4.19.296/include/net/netfilter/
Dbr_netfilter.h7 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) in nf_bridge_alloc() argument
9 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC); in nf_bridge_alloc()
11 if (likely(skb->nf_bridge)) in nf_bridge_alloc()
12 refcount_set(&(skb->nf_bridge->use), 1); in nf_bridge_alloc()
14 return skb->nf_bridge; in nf_bridge_alloc()
17 void nf_bridge_update_protocol(struct sk_buff *skb);
20 struct sk_buff *skb, struct net_device *indev,
26 nf_bridge_info_get(const struct sk_buff *skb) in nf_bridge_info_get() argument
28 return skb->nf_bridge; in nf_bridge_info_get()
31 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
[all …]
/linux-4.19.296/drivers/misc/sgi-xp/
Dxpnet.c95 struct sk_buff *skb; member
151 struct sk_buff *skb; in xpnet_receive() local
169 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); in xpnet_receive()
170 if (!skb) { in xpnet_receive()
186 skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & in xpnet_receive()
194 skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); in xpnet_receive()
202 "%lu)\n", skb->data, &msg->data, in xpnet_receive()
205 skb_copy_to_linear_data(skb, &msg->data, in xpnet_receive()
208 dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); in xpnet_receive()
233 "skb->end=0x%p skb->len=%d\n", (void *)skb->head, in xpnet_receive()
[all …]
/linux-4.19.296/drivers/isdn/gigaset/
Dasyncdata.c144 struct sk_buff *skb = bcs->rx_skb; in hdlc_loop() local
204 if (!skb) { in hdlc_loop()
207 } else if (skb->len < 2) { in hdlc_loop()
211 skb->len); in hdlc_loop()
213 dev_kfree_skb_any(skb); in hdlc_loop()
218 skb->len); in hdlc_loop()
220 dev_kfree_skb_any(skb); in hdlc_loop()
223 __skb_trim(skb, skb->len - 2); in hdlc_loop()
224 gigaset_skb_rcvd(bcs, skb); in hdlc_loop()
229 skb = gigaset_new_rx_skb(bcs); in hdlc_loop()
[all …]
/linux-4.19.296/drivers/isdn/mISDN/
Dlayer2.c147 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb) in l2up() argument
153 mISDN_HEAD_PRIM(skb) = prim; in l2up()
154 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; in l2up()
155 err = l2->up->send(l2->up, skb); in l2up()
159 dev_kfree_skb(skb); in l2up()
166 struct sk_buff *skb; in l2up_create() local
172 skb = mI_alloc_skb(len, GFP_ATOMIC); in l2up_create()
173 if (!skb) in l2up_create()
175 hh = mISDN_HEAD_P(skb); in l2up_create()
179 skb_put_data(skb, arg, len); in l2up_create()
[all …]

12345678910>>...17