ipv4: support for fib route lwtunnel encap attributes
[deliverable/linux.git] / net / ipv4 / fib_frontend.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IPv4 Forwarding Information Base: FIB frontend.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <linux/bitops.h>
19 #include <linux/capability.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/errno.h>
27 #include <linux/in.h>
28 #include <linux/inet.h>
29 #include <linux/inetdevice.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_addr.h>
32 #include <linux/if_arp.h>
33 #include <linux/skbuff.h>
34 #include <linux/cache.h>
35 #include <linux/init.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38
39 #include <net/ip.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
42 #include <net/tcp.h>
43 #include <net/sock.h>
44 #include <net/arp.h>
45 #include <net/ip_fib.h>
46 #include <net/rtnetlink.h>
47 #include <net/xfrm.h>
48
49 #ifndef CONFIG_IP_MULTIPLE_TABLES
50
51 static int __net_init fib4_rules_init(struct net *net)
52 {
53 struct fib_table *local_table, *main_table;
54
55 main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
56 if (!main_table)
57 return -ENOMEM;
58
59 local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
60 if (!local_table)
61 goto fail;
62
63 hlist_add_head_rcu(&local_table->tb_hlist,
64 &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
65 hlist_add_head_rcu(&main_table->tb_hlist,
66 &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
67 return 0;
68
69 fail:
70 fib_free_table(main_table);
71 return -ENOMEM;
72 }
73 #else
74
75 struct fib_table *fib_new_table(struct net *net, u32 id)
76 {
77 struct fib_table *tb, *alias = NULL;
78 unsigned int h;
79
80 if (id == 0)
81 id = RT_TABLE_MAIN;
82 tb = fib_get_table(net, id);
83 if (tb)
84 return tb;
85
86 if (id == RT_TABLE_LOCAL)
87 alias = fib_new_table(net, RT_TABLE_MAIN);
88
89 tb = fib_trie_table(id, alias);
90 if (!tb)
91 return NULL;
92
93 switch (id) {
94 case RT_TABLE_LOCAL:
95 rcu_assign_pointer(net->ipv4.fib_local, tb);
96 break;
97 case RT_TABLE_MAIN:
98 rcu_assign_pointer(net->ipv4.fib_main, tb);
99 break;
100 case RT_TABLE_DEFAULT:
101 rcu_assign_pointer(net->ipv4.fib_default, tb);
102 break;
103 default:
104 break;
105 }
106
107 h = id & (FIB_TABLE_HASHSZ - 1);
108 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
109 return tb;
110 }
111
112 /* caller must hold either rtnl or rcu read lock */
113 struct fib_table *fib_get_table(struct net *net, u32 id)
114 {
115 struct fib_table *tb;
116 struct hlist_head *head;
117 unsigned int h;
118
119 if (id == 0)
120 id = RT_TABLE_MAIN;
121 h = id & (FIB_TABLE_HASHSZ - 1);
122
123 head = &net->ipv4.fib_table_hash[h];
124 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
125 if (tb->tb_id == id)
126 return tb;
127 }
128 return NULL;
129 }
130 #endif /* CONFIG_IP_MULTIPLE_TABLES */
131
132 static void fib_replace_table(struct net *net, struct fib_table *old,
133 struct fib_table *new)
134 {
135 #ifdef CONFIG_IP_MULTIPLE_TABLES
136 switch (new->tb_id) {
137 case RT_TABLE_LOCAL:
138 rcu_assign_pointer(net->ipv4.fib_local, new);
139 break;
140 case RT_TABLE_MAIN:
141 rcu_assign_pointer(net->ipv4.fib_main, new);
142 break;
143 case RT_TABLE_DEFAULT:
144 rcu_assign_pointer(net->ipv4.fib_default, new);
145 break;
146 default:
147 break;
148 }
149
150 #endif
151 /* replace the old table in the hlist */
152 hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
153 }
154
155 int fib_unmerge(struct net *net)
156 {
157 struct fib_table *old, *new;
158
159 /* attempt to fetch local table if it has been allocated */
160 old = fib_get_table(net, RT_TABLE_LOCAL);
161 if (!old)
162 return 0;
163
164 new = fib_trie_unmerge(old);
165 if (!new)
166 return -ENOMEM;
167
168 /* replace merged table with clean table */
169 if (new != old) {
170 fib_replace_table(net, old, new);
171 fib_free_table(old);
172 }
173
174 return 0;
175 }
176
177 static void fib_flush(struct net *net)
178 {
179 int flushed = 0;
180 unsigned int h;
181
182 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
183 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
184 struct hlist_node *tmp;
185 struct fib_table *tb;
186
187 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
188 flushed += fib_table_flush(tb);
189 }
190
191 if (flushed)
192 rt_cache_flush(net);
193 }
194
195 void fib_flush_external(struct net *net)
196 {
197 struct fib_table *tb;
198 struct hlist_head *head;
199 unsigned int h;
200
201 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
202 head = &net->ipv4.fib_table_hash[h];
203 hlist_for_each_entry(tb, head, tb_hlist)
204 fib_table_flush_external(tb);
205 }
206 }
207
208 /*
209 * Find address type as if only "dev" was present in the system. If
210 * on_dev is NULL then all interfaces are taken into consideration.
211 */
212 static inline unsigned int __inet_dev_addr_type(struct net *net,
213 const struct net_device *dev,
214 __be32 addr)
215 {
216 struct flowi4 fl4 = { .daddr = addr };
217 struct fib_result res;
218 unsigned int ret = RTN_BROADCAST;
219 struct fib_table *local_table;
220
221 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
222 return RTN_BROADCAST;
223 if (ipv4_is_multicast(addr))
224 return RTN_MULTICAST;
225
226 rcu_read_lock();
227
228 local_table = fib_get_table(net, RT_TABLE_LOCAL);
229 if (local_table) {
230 ret = RTN_UNICAST;
231 if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
232 if (!dev || dev == res.fi->fib_dev)
233 ret = res.type;
234 }
235 }
236
237 rcu_read_unlock();
238 return ret;
239 }
240
241 unsigned int inet_addr_type(struct net *net, __be32 addr)
242 {
243 return __inet_dev_addr_type(net, NULL, addr);
244 }
245 EXPORT_SYMBOL(inet_addr_type);
246
247 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
248 __be32 addr)
249 {
250 return __inet_dev_addr_type(net, dev, addr);
251 }
252 EXPORT_SYMBOL(inet_dev_addr_type);
253
254 __be32 fib_compute_spec_dst(struct sk_buff *skb)
255 {
256 struct net_device *dev = skb->dev;
257 struct in_device *in_dev;
258 struct fib_result res;
259 struct rtable *rt;
260 struct flowi4 fl4;
261 struct net *net;
262 int scope;
263
264 rt = skb_rtable(skb);
265 if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
266 RTCF_LOCAL)
267 return ip_hdr(skb)->daddr;
268
269 in_dev = __in_dev_get_rcu(dev);
270 BUG_ON(!in_dev);
271
272 net = dev_net(dev);
273
274 scope = RT_SCOPE_UNIVERSE;
275 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
276 fl4.flowi4_oif = 0;
277 fl4.flowi4_iif = LOOPBACK_IFINDEX;
278 fl4.daddr = ip_hdr(skb)->saddr;
279 fl4.saddr = 0;
280 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
281 fl4.flowi4_scope = scope;
282 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
283 if (!fib_lookup(net, &fl4, &res, 0))
284 return FIB_RES_PREFSRC(net, res);
285 } else {
286 scope = RT_SCOPE_LINK;
287 }
288
289 return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
290 }
291
292 /* Given (packet source, input interface) and optional (dst, oif, tos):
293 * - (main) check, that source is valid i.e. not broadcast or our local
294 * address.
295 * - figure out what "logical" interface this packet arrived
296 * and calculate "specific destination" address.
297 * - check, that packet arrived from expected physical interface.
298 * called with rcu_read_lock()
299 */
300 static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
301 u8 tos, int oif, struct net_device *dev,
302 int rpf, struct in_device *idev, u32 *itag)
303 {
304 int ret, no_addr;
305 struct fib_result res;
306 struct flowi4 fl4;
307 struct net *net;
308 bool dev_match;
309
310 fl4.flowi4_oif = 0;
311 fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
312 fl4.daddr = src;
313 fl4.saddr = dst;
314 fl4.flowi4_tos = tos;
315 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
316
317 no_addr = idev->ifa_list == NULL;
318
319 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
320
321 net = dev_net(dev);
322 if (fib_lookup(net, &fl4, &res, 0))
323 goto last_resort;
324 if (res.type != RTN_UNICAST &&
325 (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
326 goto e_inval;
327 if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
328 (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
329 goto last_resort;
330 fib_combine_itag(itag, &res);
331 dev_match = false;
332
333 #ifdef CONFIG_IP_ROUTE_MULTIPATH
334 for (ret = 0; ret < res.fi->fib_nhs; ret++) {
335 struct fib_nh *nh = &res.fi->fib_nh[ret];
336
337 if (nh->nh_dev == dev) {
338 dev_match = true;
339 break;
340 }
341 }
342 #else
343 if (FIB_RES_DEV(res) == dev)
344 dev_match = true;
345 #endif
346 if (dev_match) {
347 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
348 return ret;
349 }
350 if (no_addr)
351 goto last_resort;
352 if (rpf == 1)
353 goto e_rpf;
354 fl4.flowi4_oif = dev->ifindex;
355
356 ret = 0;
357 if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
358 if (res.type == RTN_UNICAST)
359 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
360 }
361 return ret;
362
363 last_resort:
364 if (rpf)
365 goto e_rpf;
366 *itag = 0;
367 return 0;
368
369 e_inval:
370 return -EINVAL;
371 e_rpf:
372 return -EXDEV;
373 }
374
375 /* Ignore rp_filter for packets protected by IPsec. */
376 int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
377 u8 tos, int oif, struct net_device *dev,
378 struct in_device *idev, u32 *itag)
379 {
380 int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
381
382 if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
383 IN_DEV_ACCEPT_LOCAL(idev) &&
384 (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
385 *itag = 0;
386 return 0;
387 }
388 return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
389 }
390
391 static inline __be32 sk_extract_addr(struct sockaddr *addr)
392 {
393 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
394 }
395
396 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
397 {
398 struct nlattr *nla;
399
400 nla = (struct nlattr *) ((char *) mx + len);
401 nla->nla_type = type;
402 nla->nla_len = nla_attr_size(4);
403 *(u32 *) nla_data(nla) = value;
404
405 return len + nla_total_size(4);
406 }
407
408 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
409 struct fib_config *cfg)
410 {
411 __be32 addr;
412 int plen;
413
414 memset(cfg, 0, sizeof(*cfg));
415 cfg->fc_nlinfo.nl_net = net;
416
417 if (rt->rt_dst.sa_family != AF_INET)
418 return -EAFNOSUPPORT;
419
420 /*
421 * Check mask for validity:
422 * a) it must be contiguous.
423 * b) destination must have all host bits clear.
424 * c) if application forgot to set correct family (AF_INET),
425 * reject request unless it is absolutely clear i.e.
426 * both family and mask are zero.
427 */
428 plen = 32;
429 addr = sk_extract_addr(&rt->rt_dst);
430 if (!(rt->rt_flags & RTF_HOST)) {
431 __be32 mask = sk_extract_addr(&rt->rt_genmask);
432
433 if (rt->rt_genmask.sa_family != AF_INET) {
434 if (mask || rt->rt_genmask.sa_family)
435 return -EAFNOSUPPORT;
436 }
437
438 if (bad_mask(mask, addr))
439 return -EINVAL;
440
441 plen = inet_mask_len(mask);
442 }
443
444 cfg->fc_dst_len = plen;
445 cfg->fc_dst = addr;
446
447 if (cmd != SIOCDELRT) {
448 cfg->fc_nlflags = NLM_F_CREATE;
449 cfg->fc_protocol = RTPROT_BOOT;
450 }
451
452 if (rt->rt_metric)
453 cfg->fc_priority = rt->rt_metric - 1;
454
455 if (rt->rt_flags & RTF_REJECT) {
456 cfg->fc_scope = RT_SCOPE_HOST;
457 cfg->fc_type = RTN_UNREACHABLE;
458 return 0;
459 }
460
461 cfg->fc_scope = RT_SCOPE_NOWHERE;
462 cfg->fc_type = RTN_UNICAST;
463
464 if (rt->rt_dev) {
465 char *colon;
466 struct net_device *dev;
467 char devname[IFNAMSIZ];
468
469 if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
470 return -EFAULT;
471
472 devname[IFNAMSIZ-1] = 0;
473 colon = strchr(devname, ':');
474 if (colon)
475 *colon = 0;
476 dev = __dev_get_by_name(net, devname);
477 if (!dev)
478 return -ENODEV;
479 cfg->fc_oif = dev->ifindex;
480 if (colon) {
481 struct in_ifaddr *ifa;
482 struct in_device *in_dev = __in_dev_get_rtnl(dev);
483 if (!in_dev)
484 return -ENODEV;
485 *colon = ':';
486 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
487 if (strcmp(ifa->ifa_label, devname) == 0)
488 break;
489 if (!ifa)
490 return -ENODEV;
491 cfg->fc_prefsrc = ifa->ifa_local;
492 }
493 }
494
495 addr = sk_extract_addr(&rt->rt_gateway);
496 if (rt->rt_gateway.sa_family == AF_INET && addr) {
497 cfg->fc_gw = addr;
498 if (rt->rt_flags & RTF_GATEWAY &&
499 inet_addr_type(net, addr) == RTN_UNICAST)
500 cfg->fc_scope = RT_SCOPE_UNIVERSE;
501 }
502
503 if (cmd == SIOCDELRT)
504 return 0;
505
506 if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
507 return -EINVAL;
508
509 if (cfg->fc_scope == RT_SCOPE_NOWHERE)
510 cfg->fc_scope = RT_SCOPE_LINK;
511
512 if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
513 struct nlattr *mx;
514 int len = 0;
515
516 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
517 if (!mx)
518 return -ENOMEM;
519
520 if (rt->rt_flags & RTF_MTU)
521 len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
522
523 if (rt->rt_flags & RTF_WINDOW)
524 len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
525
526 if (rt->rt_flags & RTF_IRTT)
527 len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
528
529 cfg->fc_mx = mx;
530 cfg->fc_mx_len = len;
531 }
532
533 return 0;
534 }
535
536 /*
537 * Handle IP routing ioctl calls.
538 * These are used to manipulate the routing tables
539 */
540 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
541 {
542 struct fib_config cfg;
543 struct rtentry rt;
544 int err;
545
546 switch (cmd) {
547 case SIOCADDRT: /* Add a route */
548 case SIOCDELRT: /* Delete a route */
549 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
550 return -EPERM;
551
552 if (copy_from_user(&rt, arg, sizeof(rt)))
553 return -EFAULT;
554
555 rtnl_lock();
556 err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
557 if (err == 0) {
558 struct fib_table *tb;
559
560 if (cmd == SIOCDELRT) {
561 tb = fib_get_table(net, cfg.fc_table);
562 if (tb)
563 err = fib_table_delete(tb, &cfg);
564 else
565 err = -ESRCH;
566 } else {
567 tb = fib_new_table(net, cfg.fc_table);
568 if (tb)
569 err = fib_table_insert(tb, &cfg);
570 else
571 err = -ENOBUFS;
572 }
573
574 /* allocated by rtentry_to_fib_config() */
575 kfree(cfg.fc_mx);
576 }
577 rtnl_unlock();
578 return err;
579 }
580 return -EINVAL;
581 }
582
583 const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
584 [RTA_DST] = { .type = NLA_U32 },
585 [RTA_SRC] = { .type = NLA_U32 },
586 [RTA_IIF] = { .type = NLA_U32 },
587 [RTA_OIF] = { .type = NLA_U32 },
588 [RTA_GATEWAY] = { .type = NLA_U32 },
589 [RTA_PRIORITY] = { .type = NLA_U32 },
590 [RTA_PREFSRC] = { .type = NLA_U32 },
591 [RTA_METRICS] = { .type = NLA_NESTED },
592 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
593 [RTA_FLOW] = { .type = NLA_U32 },
594 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
595 [RTA_ENCAP] = { .type = NLA_NESTED },
596 };
597
598 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
599 struct nlmsghdr *nlh, struct fib_config *cfg)
600 {
601 struct nlattr *attr;
602 int err, remaining;
603 struct rtmsg *rtm;
604
605 err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
606 if (err < 0)
607 goto errout;
608
609 memset(cfg, 0, sizeof(*cfg));
610
611 rtm = nlmsg_data(nlh);
612 cfg->fc_dst_len = rtm->rtm_dst_len;
613 cfg->fc_tos = rtm->rtm_tos;
614 cfg->fc_table = rtm->rtm_table;
615 cfg->fc_protocol = rtm->rtm_protocol;
616 cfg->fc_scope = rtm->rtm_scope;
617 cfg->fc_type = rtm->rtm_type;
618 cfg->fc_flags = rtm->rtm_flags;
619 cfg->fc_nlflags = nlh->nlmsg_flags;
620
621 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
622 cfg->fc_nlinfo.nlh = nlh;
623 cfg->fc_nlinfo.nl_net = net;
624
625 if (cfg->fc_type > RTN_MAX) {
626 err = -EINVAL;
627 goto errout;
628 }
629
630 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
631 switch (nla_type(attr)) {
632 case RTA_DST:
633 cfg->fc_dst = nla_get_be32(attr);
634 break;
635 case RTA_OIF:
636 cfg->fc_oif = nla_get_u32(attr);
637 break;
638 case RTA_GATEWAY:
639 cfg->fc_gw = nla_get_be32(attr);
640 break;
641 case RTA_PRIORITY:
642 cfg->fc_priority = nla_get_u32(attr);
643 break;
644 case RTA_PREFSRC:
645 cfg->fc_prefsrc = nla_get_be32(attr);
646 break;
647 case RTA_METRICS:
648 cfg->fc_mx = nla_data(attr);
649 cfg->fc_mx_len = nla_len(attr);
650 break;
651 case RTA_MULTIPATH:
652 cfg->fc_mp = nla_data(attr);
653 cfg->fc_mp_len = nla_len(attr);
654 break;
655 case RTA_FLOW:
656 cfg->fc_flow = nla_get_u32(attr);
657 break;
658 case RTA_TABLE:
659 cfg->fc_table = nla_get_u32(attr);
660 break;
661 case RTA_ENCAP:
662 cfg->fc_encap = attr;
663 break;
664 case RTA_ENCAP_TYPE:
665 cfg->fc_encap_type = nla_get_u16(attr);
666 break;
667 }
668 }
669
670 return 0;
671 errout:
672 return err;
673 }
674
675 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
676 {
677 struct net *net = sock_net(skb->sk);
678 struct fib_config cfg;
679 struct fib_table *tb;
680 int err;
681
682 err = rtm_to_fib_config(net, skb, nlh, &cfg);
683 if (err < 0)
684 goto errout;
685
686 tb = fib_get_table(net, cfg.fc_table);
687 if (!tb) {
688 err = -ESRCH;
689 goto errout;
690 }
691
692 err = fib_table_delete(tb, &cfg);
693 errout:
694 return err;
695 }
696
697 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
698 {
699 struct net *net = sock_net(skb->sk);
700 struct fib_config cfg;
701 struct fib_table *tb;
702 int err;
703
704 err = rtm_to_fib_config(net, skb, nlh, &cfg);
705 if (err < 0)
706 goto errout;
707
708 tb = fib_new_table(net, cfg.fc_table);
709 if (!tb) {
710 err = -ENOBUFS;
711 goto errout;
712 }
713
714 err = fib_table_insert(tb, &cfg);
715 errout:
716 return err;
717 }
718
719 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
720 {
721 struct net *net = sock_net(skb->sk);
722 unsigned int h, s_h;
723 unsigned int e = 0, s_e;
724 struct fib_table *tb;
725 struct hlist_head *head;
726 int dumped = 0;
727
728 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
729 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
730 return skb->len;
731
732 s_h = cb->args[0];
733 s_e = cb->args[1];
734
735 rcu_read_lock();
736
737 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
738 e = 0;
739 head = &net->ipv4.fib_table_hash[h];
740 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
741 if (e < s_e)
742 goto next;
743 if (dumped)
744 memset(&cb->args[2], 0, sizeof(cb->args) -
745 2 * sizeof(cb->args[0]));
746 if (fib_table_dump(tb, skb, cb) < 0)
747 goto out;
748 dumped = 1;
749 next:
750 e++;
751 }
752 }
753 out:
754 rcu_read_unlock();
755
756 cb->args[1] = e;
757 cb->args[0] = h;
758
759 return skb->len;
760 }
761
762 /* Prepare and feed intra-kernel routing request.
763 * Really, it should be netlink message, but :-( netlink
764 * can be not configured, so that we feed it directly
765 * to fib engine. It is legal, because all events occur
766 * only when netlink is already locked.
767 */
768 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
769 {
770 struct net *net = dev_net(ifa->ifa_dev->dev);
771 struct fib_table *tb;
772 struct fib_config cfg = {
773 .fc_protocol = RTPROT_KERNEL,
774 .fc_type = type,
775 .fc_dst = dst,
776 .fc_dst_len = dst_len,
777 .fc_prefsrc = ifa->ifa_local,
778 .fc_oif = ifa->ifa_dev->dev->ifindex,
779 .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
780 .fc_nlinfo = {
781 .nl_net = net,
782 },
783 };
784
785 if (type == RTN_UNICAST)
786 tb = fib_new_table(net, RT_TABLE_MAIN);
787 else
788 tb = fib_new_table(net, RT_TABLE_LOCAL);
789
790 if (!tb)
791 return;
792
793 cfg.fc_table = tb->tb_id;
794
795 if (type != RTN_LOCAL)
796 cfg.fc_scope = RT_SCOPE_LINK;
797 else
798 cfg.fc_scope = RT_SCOPE_HOST;
799
800 if (cmd == RTM_NEWROUTE)
801 fib_table_insert(tb, &cfg);
802 else
803 fib_table_delete(tb, &cfg);
804 }
805
806 void fib_add_ifaddr(struct in_ifaddr *ifa)
807 {
808 struct in_device *in_dev = ifa->ifa_dev;
809 struct net_device *dev = in_dev->dev;
810 struct in_ifaddr *prim = ifa;
811 __be32 mask = ifa->ifa_mask;
812 __be32 addr = ifa->ifa_local;
813 __be32 prefix = ifa->ifa_address & mask;
814
815 if (ifa->ifa_flags & IFA_F_SECONDARY) {
816 prim = inet_ifa_byprefix(in_dev, prefix, mask);
817 if (!prim) {
818 pr_warn("%s: bug: prim == NULL\n", __func__);
819 return;
820 }
821 }
822
823 fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
824
825 if (!(dev->flags & IFF_UP))
826 return;
827
828 /* Add broadcast address, if it is explicitly assigned. */
829 if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
830 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
831
832 if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
833 (prefix != addr || ifa->ifa_prefixlen < 32)) {
834 fib_magic(RTM_NEWROUTE,
835 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
836 prefix, ifa->ifa_prefixlen, prim);
837
838 /* Add network specific broadcasts, when it takes a sense */
839 if (ifa->ifa_prefixlen < 31) {
840 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
841 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
842 32, prim);
843 }
844 }
845 }
846
847 /* Delete primary or secondary address.
848 * Optionally, on secondary address promotion consider the addresses
849 * from subnet iprim as deleted, even if they are in device list.
850 * In this case the secondary ifa can be in device list.
851 */
852 void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
853 {
854 struct in_device *in_dev = ifa->ifa_dev;
855 struct net_device *dev = in_dev->dev;
856 struct in_ifaddr *ifa1;
857 struct in_ifaddr *prim = ifa, *prim1 = NULL;
858 __be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
859 __be32 any = ifa->ifa_address & ifa->ifa_mask;
860 #define LOCAL_OK 1
861 #define BRD_OK 2
862 #define BRD0_OK 4
863 #define BRD1_OK 8
864 unsigned int ok = 0;
865 int subnet = 0; /* Primary network */
866 int gone = 1; /* Address is missing */
867 int same_prefsrc = 0; /* Another primary with same IP */
868
869 if (ifa->ifa_flags & IFA_F_SECONDARY) {
870 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
871 if (!prim) {
872 pr_warn("%s: bug: prim == NULL\n", __func__);
873 return;
874 }
875 if (iprim && iprim != prim) {
876 pr_warn("%s: bug: iprim != prim\n", __func__);
877 return;
878 }
879 } else if (!ipv4_is_zeronet(any) &&
880 (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
881 fib_magic(RTM_DELROUTE,
882 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
883 any, ifa->ifa_prefixlen, prim);
884 subnet = 1;
885 }
886
887 /* Deletion is more complicated than add.
888 * We should take care of not to delete too much :-)
889 *
890 * Scan address list to be sure that addresses are really gone.
891 */
892
893 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
894 if (ifa1 == ifa) {
895 /* promotion, keep the IP */
896 gone = 0;
897 continue;
898 }
899 /* Ignore IFAs from our subnet */
900 if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
901 inet_ifa_match(ifa1->ifa_address, iprim))
902 continue;
903
904 /* Ignore ifa1 if it uses different primary IP (prefsrc) */
905 if (ifa1->ifa_flags & IFA_F_SECONDARY) {
906 /* Another address from our subnet? */
907 if (ifa1->ifa_mask == prim->ifa_mask &&
908 inet_ifa_match(ifa1->ifa_address, prim))
909 prim1 = prim;
910 else {
911 /* We reached the secondaries, so
912 * same_prefsrc should be determined.
913 */
914 if (!same_prefsrc)
915 continue;
916 /* Search new prim1 if ifa1 is not
917 * using the current prim1
918 */
919 if (!prim1 ||
920 ifa1->ifa_mask != prim1->ifa_mask ||
921 !inet_ifa_match(ifa1->ifa_address, prim1))
922 prim1 = inet_ifa_byprefix(in_dev,
923 ifa1->ifa_address,
924 ifa1->ifa_mask);
925 if (!prim1)
926 continue;
927 if (prim1->ifa_local != prim->ifa_local)
928 continue;
929 }
930 } else {
931 if (prim->ifa_local != ifa1->ifa_local)
932 continue;
933 prim1 = ifa1;
934 if (prim != prim1)
935 same_prefsrc = 1;
936 }
937 if (ifa->ifa_local == ifa1->ifa_local)
938 ok |= LOCAL_OK;
939 if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
940 ok |= BRD_OK;
941 if (brd == ifa1->ifa_broadcast)
942 ok |= BRD1_OK;
943 if (any == ifa1->ifa_broadcast)
944 ok |= BRD0_OK;
945 /* primary has network specific broadcasts */
946 if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
947 __be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
948 __be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
949
950 if (!ipv4_is_zeronet(any1)) {
951 if (ifa->ifa_broadcast == brd1 ||
952 ifa->ifa_broadcast == any1)
953 ok |= BRD_OK;
954 if (brd == brd1 || brd == any1)
955 ok |= BRD1_OK;
956 if (any == brd1 || any == any1)
957 ok |= BRD0_OK;
958 }
959 }
960 }
961
962 if (!(ok & BRD_OK))
963 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
964 if (subnet && ifa->ifa_prefixlen < 31) {
965 if (!(ok & BRD1_OK))
966 fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
967 if (!(ok & BRD0_OK))
968 fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
969 }
970 if (!(ok & LOCAL_OK)) {
971 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
972
973 /* Check, that this local address finally disappeared. */
974 if (gone &&
975 inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
976 /* And the last, but not the least thing.
977 * We must flush stray FIB entries.
978 *
979 * First of all, we scan fib_info list searching
980 * for stray nexthop entries, then ignite fib_flush.
981 */
982 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
983 fib_flush(dev_net(dev));
984 }
985 }
986 #undef LOCAL_OK
987 #undef BRD_OK
988 #undef BRD0_OK
989 #undef BRD1_OK
990 }
991
992 static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
993 {
994
995 struct fib_result res;
996 struct flowi4 fl4 = {
997 .flowi4_mark = frn->fl_mark,
998 .daddr = frn->fl_addr,
999 .flowi4_tos = frn->fl_tos,
1000 .flowi4_scope = frn->fl_scope,
1001 };
1002 struct fib_table *tb;
1003
1004 rcu_read_lock();
1005
1006 tb = fib_get_table(net, frn->tb_id_in);
1007
1008 frn->err = -ENOENT;
1009 if (tb) {
1010 local_bh_disable();
1011
1012 frn->tb_id = tb->tb_id;
1013 frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
1014
1015 if (!frn->err) {
1016 frn->prefixlen = res.prefixlen;
1017 frn->nh_sel = res.nh_sel;
1018 frn->type = res.type;
1019 frn->scope = res.scope;
1020 }
1021 local_bh_enable();
1022 }
1023
1024 rcu_read_unlock();
1025 }
1026
1027 static void nl_fib_input(struct sk_buff *skb)
1028 {
1029 struct net *net;
1030 struct fib_result_nl *frn;
1031 struct nlmsghdr *nlh;
1032 u32 portid;
1033
1034 net = sock_net(skb->sk);
1035 nlh = nlmsg_hdr(skb);
1036 if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
1037 nlmsg_len(nlh) < sizeof(*frn))
1038 return;
1039
1040 skb = netlink_skb_clone(skb, GFP_KERNEL);
1041 if (!skb)
1042 return;
1043 nlh = nlmsg_hdr(skb);
1044
1045 frn = (struct fib_result_nl *) nlmsg_data(nlh);
1046 nl_fib_lookup(net, frn);
1047
1048 portid = NETLINK_CB(skb).portid; /* netlink portid */
1049 NETLINK_CB(skb).portid = 0; /* from kernel */
1050 NETLINK_CB(skb).dst_group = 0; /* unicast */
1051 netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
1052 }
1053
1054 static int __net_init nl_fib_lookup_init(struct net *net)
1055 {
1056 struct sock *sk;
1057 struct netlink_kernel_cfg cfg = {
1058 .input = nl_fib_input,
1059 };
1060
1061 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
1062 if (!sk)
1063 return -EAFNOSUPPORT;
1064 net->ipv4.fibnl = sk;
1065 return 0;
1066 }
1067
1068 static void nl_fib_lookup_exit(struct net *net)
1069 {
1070 netlink_kernel_release(net->ipv4.fibnl);
1071 net->ipv4.fibnl = NULL;
1072 }
1073
1074 static void fib_disable_ip(struct net_device *dev, unsigned long event)
1075 {
1076 if (fib_sync_down_dev(dev, event))
1077 fib_flush(dev_net(dev));
1078 rt_cache_flush(dev_net(dev));
1079 arp_ifdown(dev);
1080 }
1081
1082 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
1083 {
1084 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1085 struct net_device *dev = ifa->ifa_dev->dev;
1086 struct net *net = dev_net(dev);
1087
1088 switch (event) {
1089 case NETDEV_UP:
1090 fib_add_ifaddr(ifa);
1091 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1092 fib_sync_up(dev, RTNH_F_DEAD);
1093 #endif
1094 atomic_inc(&net->ipv4.dev_addr_genid);
1095 rt_cache_flush(dev_net(dev));
1096 break;
1097 case NETDEV_DOWN:
1098 fib_del_ifaddr(ifa, NULL);
1099 atomic_inc(&net->ipv4.dev_addr_genid);
1100 if (!ifa->ifa_dev->ifa_list) {
1101 /* Last address was deleted from this interface.
1102 * Disable IP.
1103 */
1104 fib_disable_ip(dev, event);
1105 } else {
1106 rt_cache_flush(dev_net(dev));
1107 }
1108 break;
1109 }
1110 return NOTIFY_DONE;
1111 }
1112
1113 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1114 {
1115 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1116 struct in_device *in_dev;
1117 struct net *net = dev_net(dev);
1118 unsigned int flags;
1119
1120 if (event == NETDEV_UNREGISTER) {
1121 fib_disable_ip(dev, event);
1122 rt_flush_dev(dev);
1123 return NOTIFY_DONE;
1124 }
1125
1126 in_dev = __in_dev_get_rtnl(dev);
1127 if (!in_dev)
1128 return NOTIFY_DONE;
1129
1130 switch (event) {
1131 case NETDEV_UP:
1132 for_ifa(in_dev) {
1133 fib_add_ifaddr(ifa);
1134 } endfor_ifa(in_dev);
1135 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1136 fib_sync_up(dev, RTNH_F_DEAD);
1137 #endif
1138 atomic_inc(&net->ipv4.dev_addr_genid);
1139 rt_cache_flush(net);
1140 break;
1141 case NETDEV_DOWN:
1142 fib_disable_ip(dev, event);
1143 break;
1144 case NETDEV_CHANGE:
1145 flags = dev_get_flags(dev);
1146 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1147 fib_sync_up(dev, RTNH_F_LINKDOWN);
1148 else
1149 fib_sync_down_dev(dev, event);
1150 /* fall through */
1151 case NETDEV_CHANGEMTU:
1152 rt_cache_flush(net);
1153 break;
1154 }
1155 return NOTIFY_DONE;
1156 }
1157
1158 static struct notifier_block fib_inetaddr_notifier = {
1159 .notifier_call = fib_inetaddr_event,
1160 };
1161
1162 static struct notifier_block fib_netdev_notifier = {
1163 .notifier_call = fib_netdev_event,
1164 };
1165
1166 static int __net_init ip_fib_net_init(struct net *net)
1167 {
1168 int err;
1169 size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
1170
1171 /* Avoid false sharing : Use at least a full cache line */
1172 size = max_t(size_t, size, L1_CACHE_BYTES);
1173
1174 net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
1175 if (!net->ipv4.fib_table_hash)
1176 return -ENOMEM;
1177
1178 err = fib4_rules_init(net);
1179 if (err < 0)
1180 goto fail;
1181 return 0;
1182
1183 fail:
1184 kfree(net->ipv4.fib_table_hash);
1185 return err;
1186 }
1187
1188 static void ip_fib_net_exit(struct net *net)
1189 {
1190 unsigned int i;
1191
1192 rtnl_lock();
1193 #ifdef CONFIG_IP_MULTIPLE_TABLES
1194 RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
1195 RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
1196 RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
1197 #endif
1198 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1199 struct hlist_head *head = &net->ipv4.fib_table_hash[i];
1200 struct hlist_node *tmp;
1201 struct fib_table *tb;
1202
1203 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1204 hlist_del(&tb->tb_hlist);
1205 fib_table_flush(tb);
1206 fib_free_table(tb);
1207 }
1208 }
1209
1210 #ifdef CONFIG_IP_MULTIPLE_TABLES
1211 fib4_rules_exit(net);
1212 #endif
1213 rtnl_unlock();
1214 kfree(net->ipv4.fib_table_hash);
1215 }
1216
1217 static int __net_init fib_net_init(struct net *net)
1218 {
1219 int error;
1220
1221 #ifdef CONFIG_IP_ROUTE_CLASSID
1222 net->ipv4.fib_num_tclassid_users = 0;
1223 #endif
1224 error = ip_fib_net_init(net);
1225 if (error < 0)
1226 goto out;
1227 error = nl_fib_lookup_init(net);
1228 if (error < 0)
1229 goto out_nlfl;
1230 error = fib_proc_init(net);
1231 if (error < 0)
1232 goto out_proc;
1233 out:
1234 return error;
1235
1236 out_proc:
1237 nl_fib_lookup_exit(net);
1238 out_nlfl:
1239 ip_fib_net_exit(net);
1240 goto out;
1241 }
1242
1243 static void __net_exit fib_net_exit(struct net *net)
1244 {
1245 fib_proc_exit(net);
1246 nl_fib_lookup_exit(net);
1247 ip_fib_net_exit(net);
1248 }
1249
1250 static struct pernet_operations fib_net_ops = {
1251 .init = fib_net_init,
1252 .exit = fib_net_exit,
1253 };
1254
1255 void __init ip_fib_init(void)
1256 {
1257 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
1258 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1259 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1260
1261 register_pernet_subsys(&fib_net_ops);
1262 register_netdevice_notifier(&fib_netdev_notifier);
1263 register_inetaddr_notifier(&fib_inetaddr_notifier);
1264
1265 fib_trie_init();
1266 }
This page took 0.057225 seconds and 5 git commands to generate.