1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the IP module.
7 *
8 * Version: @(#)ip.h 1.0.2 05/07/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 *
14 * Changes:
15 * Mike McLagan : Routing by source
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22 #ifndef _IP_H
23 #define _IP_H
24
25 #include <linux/types.h>
26 #include <linux/ip.h>
27 #include <linux/in.h>
28 #include <linux/skbuff.h>
29 #include <linux/jhash.h>
30
31 #include <net/inet_sock.h>
32 #include <net/route.h>
33 #include <net/snmp.h>
34 #include <net/flow.h>
35 #include <net/flow_dissector.h>
36 #include <net/netns/hash.h>
37 #include <net/lwtunnel.h>
38
39 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
40 #define IPV4_MIN_MTU 68 /* RFC 791 */
41
42 struct sock;
43
44 struct inet_skb_parm {
45 int iif;
46 struct ip_options opt; /* Compiled IP options */
47 u16 flags;
48
49 #define IPSKB_FORWARDED BIT(0)
50 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
51 #define IPSKB_XFRM_TRANSFORMED BIT(2)
52 #define IPSKB_FRAG_COMPLETE BIT(3)
53 #define IPSKB_REROUTED BIT(4)
54 #define IPSKB_DOREDIRECT BIT(5)
55 #define IPSKB_FRAG_PMTU BIT(6)
56 #define IPSKB_L3SLAVE BIT(7)
57
58 u16 frag_max_size;
59 };
60
ipv4_l3mdev_skb(u16 flags)61 static inline bool ipv4_l3mdev_skb(u16 flags)
62 {
63 return !!(flags & IPSKB_L3SLAVE);
64 }
65
ip_hdrlen(const struct sk_buff * skb)66 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
67 {
68 return ip_hdr(skb)->ihl * 4;
69 }
70
71 struct ipcm_cookie {
72 struct sockcm_cookie sockc;
73 __be32 addr;
74 int oif;
75 struct ip_options_rcu *opt;
76 __u8 protocol;
77 __u8 ttl;
78 __s16 tos;
79 char priority;
80 __u16 gso_size;
81 };
82
ipcm_init(struct ipcm_cookie * ipcm)83 static inline void ipcm_init(struct ipcm_cookie *ipcm)
84 {
85 *ipcm = (struct ipcm_cookie) { .tos = -1 };
86 }
87
ipcm_init_sk(struct ipcm_cookie * ipcm,const struct inet_sock * inet)88 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
89 const struct inet_sock *inet)
90 {
91 ipcm_init(ipcm);
92
93 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
94 ipcm->oif = inet->sk.sk_bound_dev_if;
95 ipcm->addr = inet->inet_saddr;
96 ipcm->protocol = inet->inet_num;
97 }
98
99 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
100 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
101
102 /* return enslaved device index if relevant */
inet_sdif(struct sk_buff * skb)103 static inline int inet_sdif(struct sk_buff *skb)
104 {
105 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
106 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
107 return IPCB(skb)->iif;
108 #endif
109 return 0;
110 }
111
112 /* Special input handler for packets caught by router alert option.
113 They are selected only by protocol field, and then processed likely
114 local ones; but only if someone wants them! Otherwise, router
115 not running rsvpd will kill RSVP.
116
117 It is user level problem, what it will make with them.
118 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
119 but receiver should be enough clever f.e. to forward mtrace requests,
120 sent to multicast group to reach destination designated router.
121 */
122
123 struct ip_ra_chain {
124 struct ip_ra_chain __rcu *next;
125 struct sock *sk;
126 union {
127 void (*destructor)(struct sock *);
128 struct sock *saved_sk;
129 };
130 struct rcu_head rcu;
131 };
132
133 /* IP flags. */
134 #define IP_CE 0x8000 /* Flag: "Congestion" */
135 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
136 #define IP_MF 0x2000 /* Flag: "More Fragments" */
137 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
138
139 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
140
141 struct msghdr;
142 struct net_device;
143 struct packet_type;
144 struct rtable;
145 struct sockaddr;
146
147 int igmp_mc_init(void);
148
149 /*
150 * Functions provided by ip.c
151 */
152
153 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
154 __be32 saddr, __be32 daddr,
155 struct ip_options_rcu *opt);
156 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
157 struct net_device *orig_dev);
158 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
159 struct net_device *orig_dev);
160 int ip_local_deliver(struct sk_buff *skb);
161 int ip_mr_input(struct sk_buff *skb);
162 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
163 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
164 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
165 int (*output)(struct net *, struct sock *, struct sk_buff *));
166 void ip_send_check(struct iphdr *ip);
167 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
168 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
169
170 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
171 __u8 tos);
172 void ip_init(void);
173 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
174 int getfrag(void *from, char *to, int offset, int len,
175 int odd, struct sk_buff *skb),
176 void *from, int len, int protolen,
177 struct ipcm_cookie *ipc,
178 struct rtable **rt,
179 unsigned int flags);
180 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
181 struct sk_buff *skb);
182 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
183 int offset, size_t size, int flags);
184 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
185 struct sk_buff_head *queue,
186 struct inet_cork *cork);
187 int ip_send_skb(struct net *net, struct sk_buff *skb);
188 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
189 void ip_flush_pending_frames(struct sock *sk);
190 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
191 int getfrag(void *from, char *to, int offset,
192 int len, int odd, struct sk_buff *skb),
193 void *from, int length, int transhdrlen,
194 struct ipcm_cookie *ipc, struct rtable **rtp,
195 struct inet_cork *cork, unsigned int flags);
196
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)197 static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
198 struct flowi *fl)
199 {
200 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
201 }
202
ip_finish_skb(struct sock * sk,struct flowi4 * fl4)203 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
204 {
205 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
206 }
207
get_rttos(struct ipcm_cookie * ipc,struct inet_sock * inet)208 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
209 {
210 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
211 }
212
get_rtconn_flags(struct ipcm_cookie * ipc,struct sock * sk)213 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
214 {
215 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
216 }
217
218 /* datagram.c */
219 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
220 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
221
222 void ip4_datagram_release_cb(struct sock *sk);
223
224 struct ip_reply_arg {
225 struct kvec iov[1];
226 int flags;
227 __wsum csum;
228 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
229 /* -1 if not needed */
230 int bound_dev_if;
231 u8 tos;
232 kuid_t uid;
233 };
234
235 #define IP_REPLY_ARG_NOSRCCHECK 1
236
ip_reply_arg_flowi_flags(const struct ip_reply_arg * arg)237 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
238 {
239 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
240 }
241
242 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
243 const struct ip_options *sopt,
244 __be32 daddr, __be32 saddr,
245 const struct ip_reply_arg *arg,
246 unsigned int len);
247
248 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
249 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
250 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
251 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
252 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
253 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
254 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
255 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
256 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
257 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
258
259 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
260 unsigned long snmp_fold_field(void __percpu *mib, int offt);
261 #if BITS_PER_LONG==32
262 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
263 size_t syncp_offset);
264 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
265 #else
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offct,size_t syncp_offset)266 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
267 size_t syncp_offset)
268 {
269 return snmp_get_cpu_field(mib, cpu, offct);
270
271 }
272
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_off)273 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
274 {
275 return snmp_fold_field(mib, offt);
276 }
277 #endif
278
279 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
280 { \
281 int i, c; \
282 for_each_possible_cpu(c) { \
283 for (i = 0; stats_list[i].name; i++) \
284 buff64[i] += snmp_get_cpu_field64( \
285 mib_statistic, \
286 c, stats_list[i].entry, \
287 offset); \
288 } \
289 }
290
291 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
292 { \
293 int i, c; \
294 for_each_possible_cpu(c) { \
295 for (i = 0; stats_list[i].name; i++) \
296 buff[i] += snmp_get_cpu_field( \
297 mib_statistic, \
298 c, stats_list[i].entry); \
299 } \
300 }
301
302 void inet_get_local_port_range(struct net *net, int *low, int *high);
303
304 #ifdef CONFIG_SYSCTL
inet_is_local_reserved_port(struct net * net,int port)305 static inline int inet_is_local_reserved_port(struct net *net, int port)
306 {
307 if (!net->ipv4.sysctl_local_reserved_ports)
308 return 0;
309 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
310 }
311
sysctl_dev_name_is_allowed(const char * name)312 static inline bool sysctl_dev_name_is_allowed(const char *name)
313 {
314 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
315 }
316
inet_prot_sock(struct net * net)317 static inline int inet_prot_sock(struct net *net)
318 {
319 return net->ipv4.sysctl_ip_prot_sock;
320 }
321
322 #else
inet_is_local_reserved_port(struct net * net,int port)323 static inline int inet_is_local_reserved_port(struct net *net, int port)
324 {
325 return 0;
326 }
327
inet_prot_sock(struct net * net)328 static inline int inet_prot_sock(struct net *net)
329 {
330 return PROT_SOCK;
331 }
332 #endif
333
334 __be32 inet_current_timestamp(void);
335
336 /* From inetpeer.c */
337 extern int inet_peer_threshold;
338 extern int inet_peer_minttl;
339 extern int inet_peer_maxttl;
340
341 void ipfrag_init(void);
342
343 void ip_static_sysctl_init(void);
344
345 #define IP4_REPLY_MARK(net, mark) \
346 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
347
ip_is_fragment(const struct iphdr * iph)348 static inline bool ip_is_fragment(const struct iphdr *iph)
349 {
350 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
351 }
352
353 #ifdef CONFIG_INET
354 #include <net/dst.h>
355
356 /* The function in 2.2 was invalid, producing wrong result for
357 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
358 static inline
ip_decrease_ttl(struct iphdr * iph)359 int ip_decrease_ttl(struct iphdr *iph)
360 {
361 u32 check = (__force u32)iph->check;
362 check += (__force u32)htons(0x0100);
363 iph->check = (__force __sum16)(check + (check>=0xFFFF));
364 return --iph->ttl;
365 }
366
ip_mtu_locked(const struct dst_entry * dst)367 static inline int ip_mtu_locked(const struct dst_entry *dst)
368 {
369 const struct rtable *rt = (const struct rtable *)dst;
370
371 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
372 }
373
374 static inline
ip_dont_fragment(const struct sock * sk,const struct dst_entry * dst)375 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
376 {
377 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
378
379 return pmtudisc == IP_PMTUDISC_DO ||
380 (pmtudisc == IP_PMTUDISC_WANT &&
381 !ip_mtu_locked(dst));
382 }
383
ip_sk_accept_pmtu(const struct sock * sk)384 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
385 {
386 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
387 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
388 }
389
ip_sk_use_pmtu(const struct sock * sk)390 static inline bool ip_sk_use_pmtu(const struct sock *sk)
391 {
392 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
393 }
394
ip_sk_ignore_df(const struct sock * sk)395 static inline bool ip_sk_ignore_df(const struct sock *sk)
396 {
397 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
398 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
399 }
400
ip_dst_mtu_maybe_forward(const struct dst_entry * dst,bool forwarding)401 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
402 bool forwarding)
403 {
404 struct net *net = dev_net(dst->dev);
405 unsigned int mtu;
406
407 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
408 ip_mtu_locked(dst) ||
409 !forwarding)
410 return dst_mtu(dst);
411
412 /* 'forwarding = true' case should always honour route mtu */
413 mtu = dst_metric_raw(dst, RTAX_MTU);
414 if (!mtu)
415 mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
416
417 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
418 }
419
ip_skb_dst_mtu(struct sock * sk,const struct sk_buff * skb)420 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
421 const struct sk_buff *skb)
422 {
423 unsigned int mtu;
424
425 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
426 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
427
428 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
429 }
430
431 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
432 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
433 }
434
435 int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
436 u32 *metrics);
437
438 u32 ip_idents_reserve(u32 hash, int segs);
439 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
440
ip_select_ident_segs(struct net * net,struct sk_buff * skb,struct sock * sk,int segs)441 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
442 struct sock *sk, int segs)
443 {
444 struct iphdr *iph = ip_hdr(skb);
445
446 /* We had many attacks based on IPID, use the private
447 * generator as much as we can.
448 */
449 if (sk && inet_sk(sk)->inet_daddr) {
450 iph->id = htons(inet_sk(sk)->inet_id);
451 inet_sk(sk)->inet_id += segs;
452 return;
453 }
454 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
455 iph->id = 0;
456 } else {
457 /* Unfortunately we need the big hammer to get a suitable IPID */
458 __ip_select_ident(net, iph, segs);
459 }
460 }
461
ip_select_ident(struct net * net,struct sk_buff * skb,struct sock * sk)462 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
463 struct sock *sk)
464 {
465 ip_select_ident_segs(net, skb, sk, 1);
466 }
467
inet_compute_pseudo(struct sk_buff * skb,int proto)468 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
469 {
470 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
471 skb->len, proto, 0);
472 }
473
474 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
475 * Equivalent to : flow->v4addrs.src = iph->saddr;
476 * flow->v4addrs.dst = iph->daddr;
477 */
iph_to_flow_copy_v4addrs(struct flow_keys * flow,const struct iphdr * iph)478 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
479 const struct iphdr *iph)
480 {
481 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
482 offsetof(typeof(flow->addrs), v4addrs.src) +
483 sizeof(flow->addrs.v4addrs.src));
484 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
485 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
486 }
487
inet_gro_compute_pseudo(struct sk_buff * skb,int proto)488 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
489 {
490 const struct iphdr *iph = skb_gro_network_header(skb);
491
492 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
493 skb_gro_len(skb), proto, 0);
494 }
495
496 /*
497 * Map a multicast IP onto multicast MAC for type ethernet.
498 */
499
ip_eth_mc_map(__be32 naddr,char * buf)500 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
501 {
502 __u32 addr=ntohl(naddr);
503 buf[0]=0x01;
504 buf[1]=0x00;
505 buf[2]=0x5e;
506 buf[5]=addr&0xFF;
507 addr>>=8;
508 buf[4]=addr&0xFF;
509 addr>>=8;
510 buf[3]=addr&0x7F;
511 }
512
513 /*
514 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
515 * Leave P_Key as 0 to be filled in by driver.
516 */
517
ip_ib_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)518 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
519 {
520 __u32 addr;
521 unsigned char scope = broadcast[5] & 0xF;
522
523 buf[0] = 0; /* Reserved */
524 buf[1] = 0xff; /* Multicast QPN */
525 buf[2] = 0xff;
526 buf[3] = 0xff;
527 addr = ntohl(naddr);
528 buf[4] = 0xff;
529 buf[5] = 0x10 | scope; /* scope from broadcast address */
530 buf[6] = 0x40; /* IPv4 signature */
531 buf[7] = 0x1b;
532 buf[8] = broadcast[8]; /* P_Key */
533 buf[9] = broadcast[9];
534 buf[10] = 0;
535 buf[11] = 0;
536 buf[12] = 0;
537 buf[13] = 0;
538 buf[14] = 0;
539 buf[15] = 0;
540 buf[19] = addr & 0xff;
541 addr >>= 8;
542 buf[18] = addr & 0xff;
543 addr >>= 8;
544 buf[17] = addr & 0xff;
545 addr >>= 8;
546 buf[16] = addr & 0x0f;
547 }
548
ip_ipgre_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)549 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
550 {
551 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
552 memcpy(buf, broadcast, 4);
553 else
554 memcpy(buf, &naddr, sizeof(naddr));
555 }
556
557 #if IS_ENABLED(CONFIG_IPV6)
558 #include <linux/ipv6.h>
559 #endif
560
inet_reset_saddr(struct sock * sk)561 static __inline__ void inet_reset_saddr(struct sock *sk)
562 {
563 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
564 #if IS_ENABLED(CONFIG_IPV6)
565 if (sk->sk_family == PF_INET6) {
566 struct ipv6_pinfo *np = inet6_sk(sk);
567
568 memset(&np->saddr, 0, sizeof(np->saddr));
569 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
570 }
571 #endif
572 }
573
574 #endif
575
ipv4_addr_hash(__be32 ip)576 static inline unsigned int ipv4_addr_hash(__be32 ip)
577 {
578 return (__force unsigned int) ip;
579 }
580
ipv4_portaddr_hash(const struct net * net,__be32 saddr,unsigned int port)581 static inline u32 ipv4_portaddr_hash(const struct net *net,
582 __be32 saddr,
583 unsigned int port)
584 {
585 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
586 }
587
588 bool ip_call_ra_chain(struct sk_buff *skb);
589
590 /*
591 * Functions provided by ip_fragment.c
592 */
593
594 enum ip_defrag_users {
595 IP_DEFRAG_LOCAL_DELIVER,
596 IP_DEFRAG_CALL_RA_CHAIN,
597 IP_DEFRAG_CONNTRACK_IN,
598 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
599 IP_DEFRAG_CONNTRACK_OUT,
600 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
601 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
602 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
603 IP_DEFRAG_VS_IN,
604 IP_DEFRAG_VS_OUT,
605 IP_DEFRAG_VS_FWD,
606 IP_DEFRAG_AF_PACKET,
607 IP_DEFRAG_MACVLAN,
608 };
609
610 /* Return true if the value of 'user' is between 'lower_bond'
611 * and 'upper_bond' inclusively.
612 */
ip_defrag_user_in_between(u32 user,enum ip_defrag_users lower_bond,enum ip_defrag_users upper_bond)613 static inline bool ip_defrag_user_in_between(u32 user,
614 enum ip_defrag_users lower_bond,
615 enum ip_defrag_users upper_bond)
616 {
617 return user >= lower_bond && user <= upper_bond;
618 }
619
620 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
621 #ifdef CONFIG_INET
622 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
623 #else
ip_check_defrag(struct net * net,struct sk_buff * skb,u32 user)624 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
625 {
626 return skb;
627 }
628 #endif
629
630 /*
631 * Functions provided by ip_forward.c
632 */
633
634 int ip_forward(struct sk_buff *skb);
635
636 /*
637 * Functions provided by ip_options.c
638 */
639
640 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
641 __be32 daddr, struct rtable *rt, int is_frag);
642
643 int __ip_options_echo(struct net *net, struct ip_options *dopt,
644 struct sk_buff *skb, const struct ip_options *sopt);
ip_options_echo(struct net * net,struct ip_options * dopt,struct sk_buff * skb)645 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
646 struct sk_buff *skb)
647 {
648 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
649 }
650
651 void ip_options_fragment(struct sk_buff *skb);
652 int __ip_options_compile(struct net *net, struct ip_options *opt,
653 struct sk_buff *skb, __be32 *info);
654 int ip_options_compile(struct net *net, struct ip_options *opt,
655 struct sk_buff *skb);
656 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
657 unsigned char *data, int optlen);
658 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
659 unsigned char __user *data, int optlen);
660 void ip_options_undo(struct ip_options *opt);
661 void ip_forward_options(struct sk_buff *skb);
662 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
663
664 /*
665 * Functions provided by ip_sockglue.c
666 */
667
668 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
669 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
670 struct sk_buff *skb, int tlen, int offset);
671 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
672 struct ipcm_cookie *ipc, bool allow_ipv6);
673 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
674 unsigned int optlen);
675 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
676 int __user *optlen);
677 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
678 char __user *optval, unsigned int optlen);
679 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
680 char __user *optval, int __user *optlen);
681 int ip_ra_control(struct sock *sk, unsigned char on,
682 void (*destructor)(struct sock *));
683
684 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
685 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
686 u32 info, u8 *payload);
687 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
688 u32 info);
689
ip_cmsg_recv(struct msghdr * msg,struct sk_buff * skb)690 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
691 {
692 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
693 }
694
695 bool icmp_global_allow(void);
696 extern int sysctl_icmp_msgs_per_sec;
697 extern int sysctl_icmp_msgs_burst;
698
699 #ifdef CONFIG_PROC_FS
700 int ip_misc_proc_init(void);
701 #endif
702
703 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
704 struct netlink_ext_ack *extack);
705
inetdev_valid_mtu(unsigned int mtu)706 static inline bool inetdev_valid_mtu(unsigned int mtu)
707 {
708 return likely(mtu >= IPV4_MIN_MTU);
709 }
710
711 #endif /* _IP_H */
712