ipv4: Merge ip_local_out and ip_local_out_sk
[deliverable/linux.git] / include / net / ip_tunnels.h
1 #ifndef __NET_IP_TUNNELS_H
2 #define __NET_IP_TUNNELS_H 1
3
4 #include <linux/if_tunnel.h>
5 #include <linux/netdevice.h>
6 #include <linux/skbuff.h>
7 #include <linux/socket.h>
8 #include <linux/types.h>
9 #include <linux/u64_stats_sync.h>
10 #include <net/dsfield.h>
11 #include <net/gro_cells.h>
12 #include <net/inet_ecn.h>
13 #include <net/netns/generic.h>
14 #include <net/rtnetlink.h>
15 #include <net/lwtunnel.h>
16
17 #if IS_ENABLED(CONFIG_IPV6)
18 #include <net/ipv6.h>
19 #include <net/ip6_fib.h>
20 #include <net/ip6_route.h>
21 #endif
22
23 /* Keep error state on tunnel for 30 sec */
24 #define IPTUNNEL_ERR_TIMEO (30*HZ)
25
26 /* Used to memset ip_tunnel padding. */
27 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
28
29 /* Used to memset ipv4 address padding. */
30 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
31 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
32 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
33 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
34
35 struct ip_tunnel_key {
36 __be64 tun_id;
37 union {
38 struct {
39 __be32 src;
40 __be32 dst;
41 } ipv4;
42 struct {
43 struct in6_addr src;
44 struct in6_addr dst;
45 } ipv6;
46 } u;
47 __be16 tun_flags;
48 u8 tos; /* TOS for IPv4, TC for IPv6 */
49 u8 ttl; /* TTL for IPv4, HL for IPv6 */
50 __be16 tp_src;
51 __be16 tp_dst;
52 };
53
54 /* Flags for ip_tunnel_info mode. */
55 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
56 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
57
58 struct ip_tunnel_info {
59 struct ip_tunnel_key key;
60 u8 options_len;
61 u8 mode;
62 };
63
64 /* 6rd prefix/relay information */
65 #ifdef CONFIG_IPV6_SIT_6RD
66 struct ip_tunnel_6rd_parm {
67 struct in6_addr prefix;
68 __be32 relay_prefix;
69 u16 prefixlen;
70 u16 relay_prefixlen;
71 };
72 #endif
73
74 struct ip_tunnel_encap {
75 u16 type;
76 u16 flags;
77 __be16 sport;
78 __be16 dport;
79 };
80
81 struct ip_tunnel_prl_entry {
82 struct ip_tunnel_prl_entry __rcu *next;
83 __be32 addr;
84 u16 flags;
85 struct rcu_head rcu_head;
86 };
87
88 struct ip_tunnel_dst {
89 struct dst_entry __rcu *dst;
90 __be32 saddr;
91 };
92
93 struct metadata_dst;
94
95 struct ip_tunnel {
96 struct ip_tunnel __rcu *next;
97 struct hlist_node hash_node;
98 struct net_device *dev;
99 struct net *net; /* netns for packet i/o */
100
101 int err_count; /* Number of arrived ICMP errors */
102 unsigned long err_time; /* Time when the last ICMP error
103 * arrived */
104
105 /* These four fields used only by GRE */
106 u32 i_seqno; /* The last seen seqno */
107 u32 o_seqno; /* The last output seqno */
108 int tun_hlen; /* Precalculated header length */
109 int mlink;
110
111 struct ip_tunnel_dst __percpu *dst_cache;
112
113 struct ip_tunnel_parm parms;
114
115 int encap_hlen; /* Encap header length (FOU,GUE) */
116 struct ip_tunnel_encap encap;
117
118 int hlen; /* tun_hlen + encap_hlen */
119
120 /* for SIT */
121 #ifdef CONFIG_IPV6_SIT_6RD
122 struct ip_tunnel_6rd_parm ip6rd;
123 #endif
124 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
125 unsigned int prl_count; /* # of entries in PRL */
126 int ip_tnl_net_id;
127 struct gro_cells gro_cells;
128 bool collect_md;
129 };
130
131 #define TUNNEL_CSUM __cpu_to_be16(0x01)
132 #define TUNNEL_ROUTING __cpu_to_be16(0x02)
133 #define TUNNEL_KEY __cpu_to_be16(0x04)
134 #define TUNNEL_SEQ __cpu_to_be16(0x08)
135 #define TUNNEL_STRICT __cpu_to_be16(0x10)
136 #define TUNNEL_REC __cpu_to_be16(0x20)
137 #define TUNNEL_VERSION __cpu_to_be16(0x40)
138 #define TUNNEL_NO_KEY __cpu_to_be16(0x80)
139 #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
140 #define TUNNEL_OAM __cpu_to_be16(0x0200)
141 #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
142 #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
143 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
144
145 #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
146
147 struct tnl_ptk_info {
148 __be16 flags;
149 __be16 proto;
150 __be32 key;
151 __be32 seq;
152 };
153
154 #define PACKET_RCVD 0
155 #define PACKET_REJECT 1
156
157 #define IP_TNL_HASH_BITS 7
158 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
159
160 struct ip_tunnel_net {
161 struct net_device *fb_tunnel_dev;
162 struct hlist_head tunnels[IP_TNL_HASH_SIZE];
163 struct ip_tunnel __rcu *collect_md_tun;
164 };
165
166 struct ip_tunnel_encap_ops {
167 size_t (*encap_hlen)(struct ip_tunnel_encap *e);
168 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
169 u8 *protocol, struct flowi4 *fl4);
170 };
171
172 #define MAX_IPTUN_ENCAP_OPS 8
173
174 extern const struct ip_tunnel_encap_ops __rcu *
175 iptun_encaps[MAX_IPTUN_ENCAP_OPS];
176
177 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
178 unsigned int num);
179 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
180 unsigned int num);
181
182 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
183 __be32 saddr, __be32 daddr,
184 u8 tos, u8 ttl,
185 __be16 tp_src, __be16 tp_dst,
186 __be64 tun_id, __be16 tun_flags)
187 {
188 key->tun_id = tun_id;
189 key->u.ipv4.src = saddr;
190 key->u.ipv4.dst = daddr;
191 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
192 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
193 key->tos = tos;
194 key->ttl = ttl;
195 key->tun_flags = tun_flags;
196
197 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
198 * the upper tunnel are used.
199 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
200 */
201 key->tp_src = tp_src;
202 key->tp_dst = tp_dst;
203
204 /* Clear struct padding. */
205 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
206 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
207 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
208 }
209
210 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
211 *tun_info)
212 {
213 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
214 }
215
216 #ifdef CONFIG_INET
217
218 int ip_tunnel_init(struct net_device *dev);
219 void ip_tunnel_uninit(struct net_device *dev);
220 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
221 struct net *ip_tunnel_get_link_net(const struct net_device *dev);
222 int ip_tunnel_get_iflink(const struct net_device *dev);
223 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
224 struct rtnl_link_ops *ops, char *devname);
225
226 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
227
228 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
229 const struct iphdr *tnl_params, const u8 protocol);
230 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
231 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
232 u8 *protocol, struct flowi4 *fl4);
233 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
234
235 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
236 struct rtnl_link_stats64 *tot);
237 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
238 int link, __be16 flags,
239 __be32 remote, __be32 local,
240 __be32 key);
241
242 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
243 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
244 bool log_ecn_error);
245 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
246 struct ip_tunnel_parm *p);
247 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
248 struct ip_tunnel_parm *p);
249 void ip_tunnel_setup(struct net_device *dev, int net_id);
250 void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
251 int ip_tunnel_encap_setup(struct ip_tunnel *t,
252 struct ip_tunnel_encap *ipencap);
253
254 /* Extract dsfield from inner protocol */
255 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
256 const struct sk_buff *skb)
257 {
258 if (skb->protocol == htons(ETH_P_IP))
259 return iph->tos;
260 else if (skb->protocol == htons(ETH_P_IPV6))
261 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
262 else
263 return 0;
264 }
265
266 /* Propogate ECN bits out */
267 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
268 const struct sk_buff *skb)
269 {
270 u8 inner = ip_tunnel_get_dsfield(iph, skb);
271
272 return INET_ECN_encapsulate(tos, inner);
273 }
274
275 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
276 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
277 __be32 src, __be32 dst, u8 proto,
278 u8 tos, u8 ttl, __be16 df, bool xnet);
279 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
280 gfp_t flags);
281
282 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
283 int gso_type_mask);
284
285 static inline void iptunnel_xmit_stats(int err,
286 struct net_device_stats *err_stats,
287 struct pcpu_sw_netstats __percpu *stats)
288 {
289 if (err > 0) {
290 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
291
292 u64_stats_update_begin(&tstats->syncp);
293 tstats->tx_bytes += err;
294 tstats->tx_packets++;
295 u64_stats_update_end(&tstats->syncp);
296 } else if (err < 0) {
297 err_stats->tx_errors++;
298 err_stats->tx_aborted_errors++;
299 } else {
300 err_stats->tx_dropped++;
301 }
302 }
303
304 static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
305 {
306 return info + 1;
307 }
308
309 static inline void ip_tunnel_info_opts_get(void *to,
310 const struct ip_tunnel_info *info)
311 {
312 memcpy(to, info + 1, info->options_len);
313 }
314
315 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
316 const void *from, int len)
317 {
318 memcpy(ip_tunnel_info_opts(info), from, len);
319 info->options_len = len;
320 }
321
322 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
323 {
324 return (struct ip_tunnel_info *)lwtstate->data;
325 }
326
327 extern struct static_key ip_tunnel_metadata_cnt;
328
329 /* Returns > 0 if metadata should be collected */
330 static inline int ip_tunnel_collect_metadata(void)
331 {
332 return static_key_false(&ip_tunnel_metadata_cnt);
333 }
334
335 void __init ip_tunnel_core_init(void);
336
337 void ip_tunnel_need_metadata(void);
338 void ip_tunnel_unneed_metadata(void);
339
340 #else /* CONFIG_INET */
341
342 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
343 {
344 return NULL;
345 }
346
347 static inline void ip_tunnel_need_metadata(void)
348 {
349 }
350
351 static inline void ip_tunnel_unneed_metadata(void)
352 {
353 }
354
355 #endif /* CONFIG_INET */
356
357 #endif /* __NET_IP_TUNNELS_H */
This page took 0.039193 seconds and 5 git commands to generate.