Merge tag 'iio-for-v4.2a' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[deliverable/linux.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71
72 static int ip_vs_net_id __read_mostly;
73 /* netns cnt used for uniqueness */
74 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
75
76 /* ID used in ICMP lookups */
77 #define icmp_id(icmph) (((icmph)->un).echo.id)
78 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
79
80 const char *ip_vs_proto_name(unsigned int proto)
81 {
82 static char buf[20];
83
84 switch (proto) {
85 case IPPROTO_IP:
86 return "IP";
87 case IPPROTO_UDP:
88 return "UDP";
89 case IPPROTO_TCP:
90 return "TCP";
91 case IPPROTO_SCTP:
92 return "SCTP";
93 case IPPROTO_ICMP:
94 return "ICMP";
95 #ifdef CONFIG_IP_VS_IPV6
96 case IPPROTO_ICMPV6:
97 return "ICMPv6";
98 #endif
99 default:
100 sprintf(buf, "IP_%u", proto);
101 return buf;
102 }
103 }
104
105 void ip_vs_init_hash_table(struct list_head *table, int rows)
106 {
107 while (--rows >= 0)
108 INIT_LIST_HEAD(&table[rows]);
109 }
110
111 static inline void
112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
113 {
114 struct ip_vs_dest *dest = cp->dest;
115 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
120
121 s = this_cpu_ptr(dest->stats.cpustats);
122 u64_stats_update_begin(&s->syncp);
123 s->cnt.inpkts++;
124 s->cnt.inbytes += skb->len;
125 u64_stats_update_end(&s->syncp);
126
127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
130 u64_stats_update_begin(&s->syncp);
131 s->cnt.inpkts++;
132 s->cnt.inbytes += skb->len;
133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
135
136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
137 u64_stats_update_begin(&s->syncp);
138 s->cnt.inpkts++;
139 s->cnt.inbytes += skb->len;
140 u64_stats_update_end(&s->syncp);
141 }
142 }
143
144
145 static inline void
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
147 {
148 struct ip_vs_dest *dest = cp->dest;
149 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
150
151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
154
155 s = this_cpu_ptr(dest->stats.cpustats);
156 u64_stats_update_begin(&s->syncp);
157 s->cnt.outpkts++;
158 s->cnt.outbytes += skb->len;
159 u64_stats_update_end(&s->syncp);
160
161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
164 u64_stats_update_begin(&s->syncp);
165 s->cnt.outpkts++;
166 s->cnt.outbytes += skb->len;
167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
169
170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
171 u64_stats_update_begin(&s->syncp);
172 s->cnt.outpkts++;
173 s->cnt.outbytes += skb->len;
174 u64_stats_update_end(&s->syncp);
175 }
176 }
177
178
179 static inline void
180 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
181 {
182 struct netns_ipvs *ipvs = net_ipvs(svc->net);
183 struct ip_vs_cpu_stats *s;
184
185 s = this_cpu_ptr(cp->dest->stats.cpustats);
186 u64_stats_update_begin(&s->syncp);
187 s->cnt.conns++;
188 u64_stats_update_end(&s->syncp);
189
190 s = this_cpu_ptr(svc->stats.cpustats);
191 u64_stats_update_begin(&s->syncp);
192 s->cnt.conns++;
193 u64_stats_update_end(&s->syncp);
194
195 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
196 u64_stats_update_begin(&s->syncp);
197 s->cnt.conns++;
198 u64_stats_update_end(&s->syncp);
199 }
200
201
202 static inline void
203 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
204 const struct sk_buff *skb,
205 struct ip_vs_proto_data *pd)
206 {
207 if (likely(pd->pp->state_transition))
208 pd->pp->state_transition(cp, direction, skb, pd);
209 }
210
211 static inline int
212 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
213 struct sk_buff *skb, int protocol,
214 const union nf_inet_addr *caddr, __be16 cport,
215 const union nf_inet_addr *vaddr, __be16 vport,
216 struct ip_vs_conn_param *p)
217 {
218 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
219 vport, p);
220 p->pe = rcu_dereference(svc->pe);
221 if (p->pe && p->pe->fill_param)
222 return p->pe->fill_param(p, skb);
223
224 return 0;
225 }
226
227 /*
228 * IPVS persistent scheduling function
229 * It creates a connection entry according to its template if exists,
230 * or selects a server and creates a connection entry plus a template.
231 * Locking: we are svc user (svc->refcnt), so we hold all dests too
232 * Protocols supported: TCP, UDP
233 */
234 static struct ip_vs_conn *
235 ip_vs_sched_persist(struct ip_vs_service *svc,
236 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
237 int *ignored, struct ip_vs_iphdr *iph)
238 {
239 struct ip_vs_conn *cp = NULL;
240 struct ip_vs_dest *dest;
241 struct ip_vs_conn *ct;
242 __be16 dport = 0; /* destination port to forward */
243 unsigned int flags;
244 struct ip_vs_conn_param param;
245 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
246 union nf_inet_addr snet; /* source network of the client,
247 after masking */
248
249 /* Mask saddr with the netmask to adjust template granularity */
250 #ifdef CONFIG_IP_VS_IPV6
251 if (svc->af == AF_INET6)
252 ipv6_addr_prefix(&snet.in6, &iph->saddr.in6,
253 (__force __u32) svc->netmask);
254 else
255 #endif
256 snet.ip = iph->saddr.ip & svc->netmask;
257
258 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
259 "mnet %s\n",
260 IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
261 IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
262 IP_VS_DBG_ADDR(svc->af, &snet));
263
264 /*
265 * As far as we know, FTP is a very complicated network protocol, and
266 * it uses control connection and data connections. For active FTP,
267 * FTP server initialize data connection to the client, its source port
268 * is often 20. For passive FTP, FTP server tells the clients the port
269 * that it passively listens to, and the client issues the data
270 * connection. In the tunneling or direct routing mode, the load
271 * balancer is on the client-to-server half of connection, the port
272 * number is unknown to the load balancer. So, a conn template like
273 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
274 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
275 * is created for other persistent services.
276 */
277 {
278 int protocol = iph->protocol;
279 const union nf_inet_addr *vaddr = &iph->daddr;
280 __be16 vport = 0;
281
282 if (dst_port == svc->port) {
283 /* non-FTP template:
284 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
285 * FTP template:
286 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
287 */
288 if (svc->port != FTPPORT)
289 vport = dst_port;
290 } else {
291 /* Note: persistent fwmark-based services and
292 * persistent port zero service are handled here.
293 * fwmark template:
294 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
295 * port zero template:
296 * <protocol,caddr,0,vaddr,0,daddr,0>
297 */
298 if (svc->fwmark) {
299 protocol = IPPROTO_IP;
300 vaddr = &fwmark;
301 }
302 }
303 /* return *ignored = -1 so NF_DROP can be used */
304 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
305 vaddr, vport, &param) < 0) {
306 *ignored = -1;
307 return NULL;
308 }
309 }
310
311 /* Check if a template already exists */
312 ct = ip_vs_ct_in_get(&param);
313 if (!ct || !ip_vs_check_template(ct)) {
314 struct ip_vs_scheduler *sched;
315
316 /*
317 * No template found or the dest of the connection
318 * template is not available.
319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */
321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph);
323 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data);
326 *ignored = 0;
327 return NULL;
328 }
329
330 if (dst_port == svc->port && svc->port != FTPPORT)
331 dport = dest->port;
332
333 /* Create a template
334 * This adds param.pe_data to the template,
335 * and thus param.pe_data will be destroyed
336 * when the template expires */
337 ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
338 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
339 if (ct == NULL) {
340 kfree(param.pe_data);
341 *ignored = -1;
342 return NULL;
343 }
344
345 ct->timeout = svc->timeout;
346 } else {
347 /* set destination with the found template */
348 dest = ct->dest;
349 kfree(param.pe_data);
350 }
351
352 dport = dst_port;
353 if (dport == svc->port && dest->port)
354 dport = dest->port;
355
356 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
357 && iph->protocol == IPPROTO_UDP) ?
358 IP_VS_CONN_F_ONE_PACKET : 0;
359
360 /*
361 * Create a new connection according to the template
362 */
363 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
364 src_port, &iph->daddr, dst_port, &param);
365
366 cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
367 skb->mark);
368 if (cp == NULL) {
369 ip_vs_conn_put(ct);
370 *ignored = -1;
371 return NULL;
372 }
373
374 /*
375 * Add its control
376 */
377 ip_vs_control_add(cp, ct);
378 ip_vs_conn_put(ct);
379
380 ip_vs_conn_stats(cp, svc);
381 return cp;
382 }
383
384
385 /*
386 * IPVS main scheduling function
387 * It selects a server according to the virtual service, and
388 * creates a connection entry.
389 * Protocols supported: TCP, UDP
390 *
391 * Usage of *ignored
392 *
393 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
394 * svc/scheduler decides that this packet should be accepted with
395 * NF_ACCEPT because it must not be scheduled.
396 *
397 * 0 : scheduler can not find destination, so try bypass or
398 * return ICMP and then NF_DROP (ip_vs_leave).
399 *
400 * -1 : scheduler tried to schedule but fatal error occurred, eg.
401 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
402 * failure such as missing Call-ID, ENOMEM on skb_linearize
403 * or pe_data. In this case we should return NF_DROP without
404 * any attempts to send ICMP with ip_vs_leave.
405 */
406 struct ip_vs_conn *
407 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
408 struct ip_vs_proto_data *pd, int *ignored,
409 struct ip_vs_iphdr *iph)
410 {
411 struct ip_vs_protocol *pp = pd->pp;
412 struct ip_vs_conn *cp = NULL;
413 struct ip_vs_scheduler *sched;
414 struct ip_vs_dest *dest;
415 __be16 _ports[2], *pptr;
416 unsigned int flags;
417
418 *ignored = 1;
419 /*
420 * IPv6 frags, only the first hit here.
421 */
422 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
423 if (pptr == NULL)
424 return NULL;
425
426 /*
427 * FTPDATA needs this check when using local real server.
428 * Never schedule Active FTPDATA connections from real server.
429 * For LVS-NAT they must be already created. For other methods
430 * with persistence the connection is created on SYN+ACK.
431 */
432 if (pptr[0] == FTPDATA) {
433 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
434 "Not scheduling FTPDATA");
435 return NULL;
436 }
437
438 /*
439 * Do not schedule replies from local real server.
440 */
441 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
442 (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
443 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
444 "Not scheduling reply for existing connection");
445 __ip_vs_conn_put(cp);
446 return NULL;
447 }
448
449 /*
450 * Persistent service
451 */
452 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
453 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
454 iph);
455
456 *ignored = 0;
457
458 /*
459 * Non-persistent service
460 */
461 if (!svc->fwmark && pptr[1] != svc->port) {
462 if (!svc->port)
463 pr_err("Schedule: port zero only supported "
464 "in persistent services, "
465 "check your ipvs configuration\n");
466 return NULL;
467 }
468
469 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph);
471 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL;
474 }
475
476 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
477 && iph->protocol == IPPROTO_UDP) ?
478 IP_VS_CONN_F_ONE_PACKET : 0;
479
480 /*
481 * Create a connection entry.
482 */
483 {
484 struct ip_vs_conn_param p;
485
486 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
487 &iph->saddr, pptr[0], &iph->daddr,
488 pptr[1], &p);
489 cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
490 dest->port ? dest->port : pptr[1],
491 flags, dest, skb->mark);
492 if (!cp) {
493 *ignored = -1;
494 return NULL;
495 }
496 }
497
498 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
499 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
500 ip_vs_fwd_tag(cp),
501 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
502 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
503 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
504 cp->flags, atomic_read(&cp->refcnt));
505
506 ip_vs_conn_stats(cp, svc);
507 return cp;
508 }
509
510
511 /*
512 * Pass or drop the packet.
513 * Called by ip_vs_in, when the virtual service is available but
514 * no destination is available for a new connection.
515 */
516 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
517 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
518 {
519 __be16 _ports[2], *pptr;
520 #ifdef CONFIG_SYSCTL
521 struct net *net;
522 struct netns_ipvs *ipvs;
523 int unicast;
524 #endif
525
526 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
527 if (pptr == NULL) {
528 return NF_DROP;
529 }
530
531 #ifdef CONFIG_SYSCTL
532 net = skb_net(skb);
533
534 #ifdef CONFIG_IP_VS_IPV6
535 if (svc->af == AF_INET6)
536 unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
537 else
538 #endif
539 unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
540
541 /* if it is fwmark-based service, the cache_bypass sysctl is up
542 and the destination is a non-local unicast, then create
543 a cache_bypass connection entry */
544 ipvs = net_ipvs(net);
545 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
546 int ret;
547 struct ip_vs_conn *cp;
548 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
549 iph->protocol == IPPROTO_UDP) ?
550 IP_VS_CONN_F_ONE_PACKET : 0;
551 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
552
553 /* create a new connection entry */
554 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
555 {
556 struct ip_vs_conn_param p;
557 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
558 &iph->saddr, pptr[0],
559 &iph->daddr, pptr[1], &p);
560 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
561 IP_VS_CONN_F_BYPASS | flags,
562 NULL, skb->mark);
563 if (!cp)
564 return NF_DROP;
565 }
566
567 /* statistics */
568 ip_vs_in_stats(cp, skb);
569
570 /* set state */
571 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
572
573 /* transmit the first SYN packet */
574 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
575 /* do not touch skb anymore */
576
577 atomic_inc(&cp->in_pkts);
578 ip_vs_conn_put(cp);
579 return ret;
580 }
581 #endif
582
583 /*
584 * When the virtual ftp service is presented, packets destined
585 * for other services on the VIP may get here (except services
586 * listed in the ipvs table), pass the packets, because it is
587 * not ipvs job to decide to drop the packets.
588 */
589 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT))
590 return NF_ACCEPT;
591
592 /*
593 * Notify the client that the destination is unreachable, and
594 * release the socket buffer.
595 * Since it is in IP layer, the TCP socket is not actually
596 * created, the TCP RST packet cannot be sent, instead that
597 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
598 */
599 #ifdef CONFIG_IP_VS_IPV6
600 if (svc->af == AF_INET6) {
601 if (!skb->dev) {
602 struct net *net_ = dev_net(skb_dst(skb)->dev);
603
604 skb->dev = net_->loopback_dev;
605 }
606 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
607 } else
608 #endif
609 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
610
611 return NF_DROP;
612 }
613
614 #ifdef CONFIG_SYSCTL
615
616 static int sysctl_snat_reroute(struct sk_buff *skb)
617 {
618 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
619 return ipvs->sysctl_snat_reroute;
620 }
621
622 static int sysctl_nat_icmp_send(struct net *net)
623 {
624 struct netns_ipvs *ipvs = net_ipvs(net);
625 return ipvs->sysctl_nat_icmp_send;
626 }
627
628 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
629 {
630 return ipvs->sysctl_expire_nodest_conn;
631 }
632
633 #else
634
635 static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
636 static int sysctl_nat_icmp_send(struct net *net) { return 0; }
637 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
638
639 #endif
640
641 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
642 {
643 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
644 }
645
646 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
647 {
648 if (NF_INET_LOCAL_IN == hooknum)
649 return IP_DEFRAG_VS_IN;
650 if (NF_INET_FORWARD == hooknum)
651 return IP_DEFRAG_VS_FWD;
652 return IP_DEFRAG_VS_OUT;
653 }
654
655 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
656 {
657 int err;
658
659 local_bh_disable();
660 err = ip_defrag(skb, user);
661 local_bh_enable();
662 if (!err)
663 ip_send_check(ip_hdr(skb));
664
665 return err;
666 }
667
668 static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
669 unsigned int hooknum)
670 {
671 if (!sysctl_snat_reroute(skb))
672 return 0;
673 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
674 if (NF_INET_LOCAL_IN == hooknum)
675 return 0;
676 #ifdef CONFIG_IP_VS_IPV6
677 if (af == AF_INET6) {
678 struct dst_entry *dst = skb_dst(skb);
679
680 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
681 ip6_route_me_harder(skb) != 0)
682 return 1;
683 } else
684 #endif
685 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
686 ip_route_me_harder(skb, RTN_LOCAL) != 0)
687 return 1;
688
689 return 0;
690 }
691
692 /*
693 * Packet has been made sufficiently writable in caller
694 * - inout: 1=in->out, 0=out->in
695 */
696 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
697 struct ip_vs_conn *cp, int inout)
698 {
699 struct iphdr *iph = ip_hdr(skb);
700 unsigned int icmp_offset = iph->ihl*4;
701 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
702 icmp_offset);
703 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
704
705 if (inout) {
706 iph->saddr = cp->vaddr.ip;
707 ip_send_check(iph);
708 ciph->daddr = cp->vaddr.ip;
709 ip_send_check(ciph);
710 } else {
711 iph->daddr = cp->daddr.ip;
712 ip_send_check(iph);
713 ciph->saddr = cp->daddr.ip;
714 ip_send_check(ciph);
715 }
716
717 /* the TCP/UDP/SCTP port */
718 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
719 IPPROTO_SCTP == ciph->protocol) {
720 __be16 *ports = (void *)ciph + ciph->ihl*4;
721
722 if (inout)
723 ports[1] = cp->vport;
724 else
725 ports[0] = cp->dport;
726 }
727
728 /* And finally the ICMP checksum */
729 icmph->checksum = 0;
730 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
731 skb->ip_summed = CHECKSUM_UNNECESSARY;
732
733 if (inout)
734 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
735 "Forwarding altered outgoing ICMP");
736 else
737 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
738 "Forwarding altered incoming ICMP");
739 }
740
741 #ifdef CONFIG_IP_VS_IPV6
742 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
743 struct ip_vs_conn *cp, int inout)
744 {
745 struct ipv6hdr *iph = ipv6_hdr(skb);
746 unsigned int icmp_offset = 0;
747 unsigned int offs = 0; /* header offset*/
748 int protocol;
749 struct icmp6hdr *icmph;
750 struct ipv6hdr *ciph;
751 unsigned short fragoffs;
752
753 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
754 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
755 offs = icmp_offset + sizeof(struct icmp6hdr);
756 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
757
758 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
759
760 if (inout) {
761 iph->saddr = cp->vaddr.in6;
762 ciph->daddr = cp->vaddr.in6;
763 } else {
764 iph->daddr = cp->daddr.in6;
765 ciph->saddr = cp->daddr.in6;
766 }
767
768 /* the TCP/UDP/SCTP port */
769 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
770 IPPROTO_SCTP == protocol)) {
771 __be16 *ports = (void *)(skb_network_header(skb) + offs);
772
773 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
774 ntohs(inout ? ports[1] : ports[0]),
775 ntohs(inout ? cp->vport : cp->dport));
776 if (inout)
777 ports[1] = cp->vport;
778 else
779 ports[0] = cp->dport;
780 }
781
782 /* And finally the ICMP checksum */
783 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
784 skb->len - icmp_offset,
785 IPPROTO_ICMPV6, 0);
786 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
787 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
788 skb->ip_summed = CHECKSUM_PARTIAL;
789
790 if (inout)
791 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
792 (void *)ciph - (void *)iph,
793 "Forwarding altered outgoing ICMPv6");
794 else
795 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
796 (void *)ciph - (void *)iph,
797 "Forwarding altered incoming ICMPv6");
798 }
799 #endif
800
801 /* Handle relevant response ICMP messages - forward to the right
802 * destination host.
803 */
804 static int handle_response_icmp(int af, struct sk_buff *skb,
805 union nf_inet_addr *snet,
806 __u8 protocol, struct ip_vs_conn *cp,
807 struct ip_vs_protocol *pp,
808 unsigned int offset, unsigned int ihl,
809 unsigned int hooknum)
810 {
811 unsigned int verdict = NF_DROP;
812
813 if (IP_VS_FWD_METHOD(cp) != 0) {
814 pr_err("shouldn't reach here, because the box is on the "
815 "half connection in the tun/dr module.\n");
816 }
817
818 /* Ensure the checksum is correct */
819 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
820 /* Failed checksum! */
821 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
822 IP_VS_DBG_ADDR(af, snet));
823 goto out;
824 }
825
826 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
827 IPPROTO_SCTP == protocol)
828 offset += 2 * sizeof(__u16);
829 if (!skb_make_writable(skb, offset))
830 goto out;
831
832 #ifdef CONFIG_IP_VS_IPV6
833 if (af == AF_INET6)
834 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
835 else
836 #endif
837 ip_vs_nat_icmp(skb, pp, cp, 1);
838
839 if (ip_vs_route_me_harder(af, skb, hooknum))
840 goto out;
841
842 /* do the statistics and put it back */
843 ip_vs_out_stats(cp, skb);
844
845 skb->ipvs_property = 1;
846 if (!(cp->flags & IP_VS_CONN_F_NFCT))
847 ip_vs_notrack(skb);
848 else
849 ip_vs_update_conntrack(skb, cp, 0);
850 verdict = NF_ACCEPT;
851
852 out:
853 __ip_vs_conn_put(cp);
854
855 return verdict;
856 }
857
858 /*
859 * Handle ICMP messages in the inside-to-outside direction (outgoing).
860 * Find any that might be relevant, check against existing connections.
861 * Currently handles error types - unreachable, quench, ttl exceeded.
862 */
863 static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
864 unsigned int hooknum)
865 {
866 struct iphdr *iph;
867 struct icmphdr _icmph, *ic;
868 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
869 struct ip_vs_iphdr ciph;
870 struct ip_vs_conn *cp;
871 struct ip_vs_protocol *pp;
872 unsigned int offset, ihl;
873 union nf_inet_addr snet;
874
875 *related = 1;
876
877 /* reassemble IP fragments */
878 if (ip_is_fragment(ip_hdr(skb))) {
879 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
880 return NF_STOLEN;
881 }
882
883 iph = ip_hdr(skb);
884 offset = ihl = iph->ihl * 4;
885 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
886 if (ic == NULL)
887 return NF_DROP;
888
889 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
890 ic->type, ntohs(icmp_id(ic)),
891 &iph->saddr, &iph->daddr);
892
893 /*
894 * Work through seeing if this is for us.
895 * These checks are supposed to be in an order that means easy
896 * things are checked first to speed up processing.... however
897 * this means that some packets will manage to get a long way
898 * down this stack and then be rejected, but that's life.
899 */
900 if ((ic->type != ICMP_DEST_UNREACH) &&
901 (ic->type != ICMP_SOURCE_QUENCH) &&
902 (ic->type != ICMP_TIME_EXCEEDED)) {
903 *related = 0;
904 return NF_ACCEPT;
905 }
906
907 /* Now find the contained IP header */
908 offset += sizeof(_icmph);
909 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
910 if (cih == NULL)
911 return NF_ACCEPT; /* The packet looks wrong, ignore */
912
913 pp = ip_vs_proto_get(cih->protocol);
914 if (!pp)
915 return NF_ACCEPT;
916
917 /* Is the embedded protocol header present? */
918 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
919 pp->dont_defrag))
920 return NF_ACCEPT;
921
922 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
923 "Checking outgoing ICMP for");
924
925 ip_vs_fill_ip4hdr(cih, &ciph);
926 ciph.len += offset;
927 /* The embedded headers contain source and dest in reverse order */
928 cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
929 if (!cp)
930 return NF_ACCEPT;
931
932 snet.ip = iph->saddr;
933 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
934 pp, ciph.len, ihl, hooknum);
935 }
936
937 #ifdef CONFIG_IP_VS_IPV6
938 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
939 unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
940 {
941 struct icmp6hdr _icmph, *ic;
942 struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
943 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
944 struct ip_vs_conn *cp;
945 struct ip_vs_protocol *pp;
946 union nf_inet_addr snet;
947 unsigned int writable;
948
949 *related = 1;
950 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
951 if (ic == NULL)
952 return NF_DROP;
953
954 /*
955 * Work through seeing if this is for us.
956 * These checks are supposed to be in an order that means easy
957 * things are checked first to speed up processing.... however
958 * this means that some packets will manage to get a long way
959 * down this stack and then be rejected, but that's life.
960 */
961 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
962 *related = 0;
963 return NF_ACCEPT;
964 }
965 /* Fragment header that is before ICMP header tells us that:
966 * it's not an error message since they can't be fragmented.
967 */
968 if (ipvsh->flags & IP6_FH_F_FRAG)
969 return NF_DROP;
970
971 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
972 ic->icmp6_type, ntohs(icmpv6_id(ic)),
973 &ipvsh->saddr, &ipvsh->daddr);
974
975 /* Now find the contained IP header */
976 ciph.len = ipvsh->len + sizeof(_icmph);
977 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
978 if (ip6h == NULL)
979 return NF_ACCEPT; /* The packet looks wrong, ignore */
980 ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
981 ciph.daddr.in6 = ip6h->daddr;
982 /* skip possible IPv6 exthdrs of contained IPv6 packet */
983 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
984 if (ciph.protocol < 0)
985 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
986
987 pp = ip_vs_proto_get(ciph.protocol);
988 if (!pp)
989 return NF_ACCEPT;
990
991 /* The embedded headers contain source and dest in reverse order */
992 cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
993 if (!cp)
994 return NF_ACCEPT;
995
996 snet.in6 = ciph.saddr.in6;
997 writable = ciph.len;
998 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
999 pp, writable, sizeof(struct ipv6hdr),
1000 hooknum);
1001 }
1002 #endif
1003
1004 /*
1005 * Check if sctp chunc is ABORT chunk
1006 */
1007 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1008 {
1009 sctp_chunkhdr_t *sch, schunk;
1010 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
1011 sizeof(schunk), &schunk);
1012 if (sch == NULL)
1013 return 0;
1014 if (sch->type == SCTP_CID_ABORT)
1015 return 1;
1016 return 0;
1017 }
1018
1019 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1020 {
1021 struct tcphdr _tcph, *th;
1022
1023 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1024 if (th == NULL)
1025 return 0;
1026 return th->rst;
1027 }
1028
1029 static inline bool is_new_conn(const struct sk_buff *skb,
1030 struct ip_vs_iphdr *iph)
1031 {
1032 switch (iph->protocol) {
1033 case IPPROTO_TCP: {
1034 struct tcphdr _tcph, *th;
1035
1036 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1037 if (th == NULL)
1038 return false;
1039 return th->syn;
1040 }
1041 case IPPROTO_SCTP: {
1042 sctp_chunkhdr_t *sch, schunk;
1043
1044 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1045 sizeof(schunk), &schunk);
1046 if (sch == NULL)
1047 return false;
1048 return sch->type == SCTP_CID_INIT;
1049 }
1050 default:
1051 return false;
1052 }
1053 }
1054
1055 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1056 int conn_reuse_mode)
1057 {
1058 /* Controlled (FTP DATA or persistence)? */
1059 if (cp->control)
1060 return false;
1061
1062 switch (cp->protocol) {
1063 case IPPROTO_TCP:
1064 return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1065 ((conn_reuse_mode & 2) &&
1066 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1067 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1068 case IPPROTO_SCTP:
1069 return cp->state == IP_VS_SCTP_S_CLOSED;
1070 default:
1071 return false;
1072 }
1073 }
1074
1075 /* Handle response packets: rewrite addresses and send away...
1076 */
1077 static unsigned int
1078 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1079 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1080 unsigned int hooknum)
1081 {
1082 struct ip_vs_protocol *pp = pd->pp;
1083
1084 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1085
1086 if (!skb_make_writable(skb, iph->len))
1087 goto drop;
1088
1089 /* mangle the packet */
1090 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1091 goto drop;
1092
1093 #ifdef CONFIG_IP_VS_IPV6
1094 if (af == AF_INET6)
1095 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1096 else
1097 #endif
1098 {
1099 ip_hdr(skb)->saddr = cp->vaddr.ip;
1100 ip_send_check(ip_hdr(skb));
1101 }
1102
1103 /*
1104 * nf_iterate does not expect change in the skb->dst->dev.
1105 * It looks like it is not fatal to enable this code for hooks
1106 * where our handlers are at the end of the chain list and
1107 * when all next handlers use skb->dst->dev and not outdev.
1108 * It will definitely route properly the inout NAT traffic
1109 * when multiple paths are used.
1110 */
1111
1112 /* For policy routing, packets originating from this
1113 * machine itself may be routed differently to packets
1114 * passing through. We want this packet to be routed as
1115 * if it came from this machine itself. So re-compute
1116 * the routing information.
1117 */
1118 if (ip_vs_route_me_harder(af, skb, hooknum))
1119 goto drop;
1120
1121 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1122
1123 ip_vs_out_stats(cp, skb);
1124 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1125 skb->ipvs_property = 1;
1126 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1127 ip_vs_notrack(skb);
1128 else
1129 ip_vs_update_conntrack(skb, cp, 0);
1130 ip_vs_conn_put(cp);
1131
1132 LeaveFunction(11);
1133 return NF_ACCEPT;
1134
1135 drop:
1136 ip_vs_conn_put(cp);
1137 kfree_skb(skb);
1138 LeaveFunction(11);
1139 return NF_STOLEN;
1140 }
1141
1142 /*
1143 * Check if outgoing packet belongs to the established ip_vs_conn.
1144 */
1145 static unsigned int
1146 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1147 {
1148 struct net *net = NULL;
1149 struct ip_vs_iphdr iph;
1150 struct ip_vs_protocol *pp;
1151 struct ip_vs_proto_data *pd;
1152 struct ip_vs_conn *cp;
1153
1154 EnterFunction(11);
1155
1156 /* Already marked as IPVS request or reply? */
1157 if (skb->ipvs_property)
1158 return NF_ACCEPT;
1159
1160 /* Bad... Do not break raw sockets */
1161 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1162 af == AF_INET)) {
1163 struct sock *sk = skb->sk;
1164 struct inet_sock *inet = inet_sk(skb->sk);
1165
1166 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1167 return NF_ACCEPT;
1168 }
1169
1170 if (unlikely(!skb_dst(skb)))
1171 return NF_ACCEPT;
1172
1173 net = skb_net(skb);
1174 if (!net_ipvs(net)->enable)
1175 return NF_ACCEPT;
1176
1177 ip_vs_fill_iph_skb(af, skb, &iph);
1178 #ifdef CONFIG_IP_VS_IPV6
1179 if (af == AF_INET6) {
1180 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1181 int related;
1182 int verdict = ip_vs_out_icmp_v6(skb, &related,
1183 hooknum, &iph);
1184
1185 if (related)
1186 return verdict;
1187 }
1188 } else
1189 #endif
1190 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1191 int related;
1192 int verdict = ip_vs_out_icmp(skb, &related, hooknum);
1193
1194 if (related)
1195 return verdict;
1196 }
1197
1198 pd = ip_vs_proto_data_get(net, iph.protocol);
1199 if (unlikely(!pd))
1200 return NF_ACCEPT;
1201 pp = pd->pp;
1202
1203 /* reassemble IP fragments */
1204 #ifdef CONFIG_IP_VS_IPV6
1205 if (af == AF_INET)
1206 #endif
1207 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1208 if (ip_vs_gather_frags(skb,
1209 ip_vs_defrag_user(hooknum)))
1210 return NF_STOLEN;
1211
1212 ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
1213 }
1214
1215 /*
1216 * Check if the packet belongs to an existing entry
1217 */
1218 cp = pp->conn_out_get(af, skb, &iph, 0);
1219
1220 if (likely(cp))
1221 return handle_response(af, skb, pd, cp, &iph, hooknum);
1222 if (sysctl_nat_icmp_send(net) &&
1223 (pp->protocol == IPPROTO_TCP ||
1224 pp->protocol == IPPROTO_UDP ||
1225 pp->protocol == IPPROTO_SCTP)) {
1226 __be16 _ports[2], *pptr;
1227
1228 pptr = frag_safe_skb_hp(skb, iph.len,
1229 sizeof(_ports), _ports, &iph);
1230 if (pptr == NULL)
1231 return NF_ACCEPT; /* Not for me */
1232 if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
1233 pptr[0])) {
1234 /*
1235 * Notify the real server: there is no
1236 * existing entry if it is not RST
1237 * packet or not TCP packet.
1238 */
1239 if ((iph.protocol != IPPROTO_TCP &&
1240 iph.protocol != IPPROTO_SCTP)
1241 || ((iph.protocol == IPPROTO_TCP
1242 && !is_tcp_reset(skb, iph.len))
1243 || (iph.protocol == IPPROTO_SCTP
1244 && !is_sctp_abort(skb,
1245 iph.len)))) {
1246 #ifdef CONFIG_IP_VS_IPV6
1247 if (af == AF_INET6) {
1248 if (!skb->dev)
1249 skb->dev = net->loopback_dev;
1250 icmpv6_send(skb,
1251 ICMPV6_DEST_UNREACH,
1252 ICMPV6_PORT_UNREACH,
1253 0);
1254 } else
1255 #endif
1256 icmp_send(skb,
1257 ICMP_DEST_UNREACH,
1258 ICMP_PORT_UNREACH, 0);
1259 return NF_DROP;
1260 }
1261 }
1262 }
1263 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1264 "ip_vs_out: packet continues traversal as normal");
1265 return NF_ACCEPT;
1266 }
1267
1268 /*
1269 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1270 * used only for VS/NAT.
1271 * Check if packet is reply for established ip_vs_conn.
1272 */
1273 static unsigned int
1274 ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1275 const struct nf_hook_state *state)
1276 {
1277 return ip_vs_out(ops->hooknum, skb, AF_INET);
1278 }
1279
1280 /*
1281 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1282 * Check if packet is reply for established ip_vs_conn.
1283 */
1284 static unsigned int
1285 ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1286 const struct nf_hook_state *state)
1287 {
1288 return ip_vs_out(ops->hooknum, skb, AF_INET);
1289 }
1290
1291 #ifdef CONFIG_IP_VS_IPV6
1292
1293 /*
1294 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1295 * used only for VS/NAT.
1296 * Check if packet is reply for established ip_vs_conn.
1297 */
1298 static unsigned int
1299 ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1300 const struct nf_hook_state *state)
1301 {
1302 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1303 }
1304
1305 /*
1306 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1307 * Check if packet is reply for established ip_vs_conn.
1308 */
1309 static unsigned int
1310 ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1311 const struct nf_hook_state *state)
1312 {
1313 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1314 }
1315
1316 #endif
1317
1318 /*
1319 * Handle ICMP messages in the outside-to-inside direction (incoming).
1320 * Find any that might be relevant, check against existing connections,
1321 * forward to the right destination host if relevant.
1322 * Currently handles error types - unreachable, quench, ttl exceeded.
1323 */
1324 static int
1325 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1326 {
1327 struct net *net = NULL;
1328 struct iphdr *iph;
1329 struct icmphdr _icmph, *ic;
1330 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1331 struct ip_vs_iphdr ciph;
1332 struct ip_vs_conn *cp;
1333 struct ip_vs_protocol *pp;
1334 struct ip_vs_proto_data *pd;
1335 unsigned int offset, offset2, ihl, verdict;
1336 bool ipip;
1337
1338 *related = 1;
1339
1340 /* reassemble IP fragments */
1341 if (ip_is_fragment(ip_hdr(skb))) {
1342 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1343 return NF_STOLEN;
1344 }
1345
1346 iph = ip_hdr(skb);
1347 offset = ihl = iph->ihl * 4;
1348 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1349 if (ic == NULL)
1350 return NF_DROP;
1351
1352 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1353 ic->type, ntohs(icmp_id(ic)),
1354 &iph->saddr, &iph->daddr);
1355
1356 /*
1357 * Work through seeing if this is for us.
1358 * These checks are supposed to be in an order that means easy
1359 * things are checked first to speed up processing.... however
1360 * this means that some packets will manage to get a long way
1361 * down this stack and then be rejected, but that's life.
1362 */
1363 if ((ic->type != ICMP_DEST_UNREACH) &&
1364 (ic->type != ICMP_SOURCE_QUENCH) &&
1365 (ic->type != ICMP_TIME_EXCEEDED)) {
1366 *related = 0;
1367 return NF_ACCEPT;
1368 }
1369
1370 /* Now find the contained IP header */
1371 offset += sizeof(_icmph);
1372 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1373 if (cih == NULL)
1374 return NF_ACCEPT; /* The packet looks wrong, ignore */
1375
1376 net = skb_net(skb);
1377
1378 /* Special case for errors for IPIP packets */
1379 ipip = false;
1380 if (cih->protocol == IPPROTO_IPIP) {
1381 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1382 return NF_ACCEPT;
1383 /* Error for our IPIP must arrive at LOCAL_IN */
1384 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1385 return NF_ACCEPT;
1386 offset += cih->ihl * 4;
1387 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1388 if (cih == NULL)
1389 return NF_ACCEPT; /* The packet looks wrong, ignore */
1390 ipip = true;
1391 }
1392
1393 pd = ip_vs_proto_data_get(net, cih->protocol);
1394 if (!pd)
1395 return NF_ACCEPT;
1396 pp = pd->pp;
1397
1398 /* Is the embedded protocol header present? */
1399 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1400 pp->dont_defrag))
1401 return NF_ACCEPT;
1402
1403 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1404 "Checking incoming ICMP for");
1405
1406 offset2 = offset;
1407 ip_vs_fill_ip4hdr(cih, &ciph);
1408 ciph.len += offset;
1409 offset = ciph.len;
1410 /* The embedded headers contain source and dest in reverse order.
1411 * For IPIP this is error for request, not for reply.
1412 */
1413 cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
1414 if (!cp)
1415 return NF_ACCEPT;
1416
1417 verdict = NF_DROP;
1418
1419 /* Ensure the checksum is correct */
1420 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1421 /* Failed checksum! */
1422 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1423 &iph->saddr);
1424 goto out;
1425 }
1426
1427 if (ipip) {
1428 __be32 info = ic->un.gateway;
1429 __u8 type = ic->type;
1430 __u8 code = ic->code;
1431
1432 /* Update the MTU */
1433 if (ic->type == ICMP_DEST_UNREACH &&
1434 ic->code == ICMP_FRAG_NEEDED) {
1435 struct ip_vs_dest *dest = cp->dest;
1436 u32 mtu = ntohs(ic->un.frag.mtu);
1437 __be16 frag_off = cih->frag_off;
1438
1439 /* Strip outer IP and ICMP, go to IPIP header */
1440 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1441 goto ignore_ipip;
1442 offset2 -= ihl + sizeof(_icmph);
1443 skb_reset_network_header(skb);
1444 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1445 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1446 ipv4_update_pmtu(skb, dev_net(skb->dev),
1447 mtu, 0, 0, 0, 0);
1448 /* Client uses PMTUD? */
1449 if (!(frag_off & htons(IP_DF)))
1450 goto ignore_ipip;
1451 /* Prefer the resulting PMTU */
1452 if (dest) {
1453 struct ip_vs_dest_dst *dest_dst;
1454
1455 rcu_read_lock();
1456 dest_dst = rcu_dereference(dest->dest_dst);
1457 if (dest_dst)
1458 mtu = dst_mtu(dest_dst->dst_cache);
1459 rcu_read_unlock();
1460 }
1461 if (mtu > 68 + sizeof(struct iphdr))
1462 mtu -= sizeof(struct iphdr);
1463 info = htonl(mtu);
1464 }
1465 /* Strip outer IP, ICMP and IPIP, go to IP header of
1466 * original request.
1467 */
1468 if (pskb_pull(skb, offset2) == NULL)
1469 goto ignore_ipip;
1470 skb_reset_network_header(skb);
1471 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1472 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1473 type, code, ntohl(info));
1474 icmp_send(skb, type, code, info);
1475 /* ICMP can be shorter but anyways, account it */
1476 ip_vs_out_stats(cp, skb);
1477
1478 ignore_ipip:
1479 consume_skb(skb);
1480 verdict = NF_STOLEN;
1481 goto out;
1482 }
1483
1484 /* do the statistics and put it back */
1485 ip_vs_in_stats(cp, skb);
1486 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1487 IPPROTO_SCTP == cih->protocol)
1488 offset += 2 * sizeof(__u16);
1489 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1490
1491 out:
1492 __ip_vs_conn_put(cp);
1493
1494 return verdict;
1495 }
1496
1497 #ifdef CONFIG_IP_VS_IPV6
1498 static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
1499 unsigned int hooknum, struct ip_vs_iphdr *iph)
1500 {
1501 struct net *net = NULL;
1502 struct ipv6hdr _ip6h, *ip6h;
1503 struct icmp6hdr _icmph, *ic;
1504 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1505 struct ip_vs_conn *cp;
1506 struct ip_vs_protocol *pp;
1507 struct ip_vs_proto_data *pd;
1508 unsigned int offs_ciph, writable, verdict;
1509
1510 *related = 1;
1511
1512 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1513 if (ic == NULL)
1514 return NF_DROP;
1515
1516 /*
1517 * Work through seeing if this is for us.
1518 * These checks are supposed to be in an order that means easy
1519 * things are checked first to speed up processing.... however
1520 * this means that some packets will manage to get a long way
1521 * down this stack and then be rejected, but that's life.
1522 */
1523 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1524 *related = 0;
1525 return NF_ACCEPT;
1526 }
1527 /* Fragment header that is before ICMP header tells us that:
1528 * it's not an error message since they can't be fragmented.
1529 */
1530 if (iph->flags & IP6_FH_F_FRAG)
1531 return NF_DROP;
1532
1533 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1534 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1535 &iph->saddr, &iph->daddr);
1536
1537 /* Now find the contained IP header */
1538 ciph.len = iph->len + sizeof(_icmph);
1539 offs_ciph = ciph.len; /* Save ip header offset */
1540 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
1541 if (ip6h == NULL)
1542 return NF_ACCEPT; /* The packet looks wrong, ignore */
1543 ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
1544 ciph.daddr.in6 = ip6h->daddr;
1545 /* skip possible IPv6 exthdrs of contained IPv6 packet */
1546 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
1547 if (ciph.protocol < 0)
1548 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
1549
1550 net = skb_net(skb);
1551 pd = ip_vs_proto_data_get(net, ciph.protocol);
1552 if (!pd)
1553 return NF_ACCEPT;
1554 pp = pd->pp;
1555
1556 /* Cannot handle fragmented embedded protocol */
1557 if (ciph.fragoffs)
1558 return NF_ACCEPT;
1559
1560 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
1561 "Checking incoming ICMPv6 for");
1562
1563 /* The embedded headers contain source and dest in reverse order
1564 * if not from localhost
1565 */
1566 cp = pp->conn_in_get(AF_INET6, skb, &ciph,
1567 (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
1568
1569 if (!cp)
1570 return NF_ACCEPT;
1571 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1572 if ((hooknum == NF_INET_LOCAL_OUT) &&
1573 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1574 __ip_vs_conn_put(cp);
1575 return NF_ACCEPT;
1576 }
1577
1578 /* do the statistics and put it back */
1579 ip_vs_in_stats(cp, skb);
1580
1581 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1582 writable = ciph.len;
1583 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1584 IPPROTO_SCTP == ciph.protocol)
1585 writable += 2 * sizeof(__u16); /* Also mangle ports */
1586
1587 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
1588
1589 __ip_vs_conn_put(cp);
1590
1591 return verdict;
1592 }
1593 #endif
1594
1595
1596 /*
1597 * Check if it's for virtual services, look it up,
1598 * and send it on its way...
1599 */
1600 static unsigned int
1601 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1602 {
1603 struct net *net;
1604 struct ip_vs_iphdr iph;
1605 struct ip_vs_protocol *pp;
1606 struct ip_vs_proto_data *pd;
1607 struct ip_vs_conn *cp;
1608 int ret, pkts;
1609 struct netns_ipvs *ipvs;
1610 int conn_reuse_mode;
1611
1612 /* Already marked as IPVS request or reply? */
1613 if (skb->ipvs_property)
1614 return NF_ACCEPT;
1615
1616 /*
1617 * Big tappo:
1618 * - remote client: only PACKET_HOST
1619 * - route: used for struct net when skb->dev is unset
1620 */
1621 if (unlikely((skb->pkt_type != PACKET_HOST &&
1622 hooknum != NF_INET_LOCAL_OUT) ||
1623 !skb_dst(skb))) {
1624 ip_vs_fill_iph_skb(af, skb, &iph);
1625 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1626 " ignored in hook %u\n",
1627 skb->pkt_type, iph.protocol,
1628 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1629 return NF_ACCEPT;
1630 }
1631 /* ipvs enabled in this netns ? */
1632 net = skb_net(skb);
1633 ipvs = net_ipvs(net);
1634 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1635 return NF_ACCEPT;
1636
1637 ip_vs_fill_iph_skb(af, skb, &iph);
1638
1639 /* Bad... Do not break raw sockets */
1640 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1641 af == AF_INET)) {
1642 struct sock *sk = skb->sk;
1643 struct inet_sock *inet = inet_sk(skb->sk);
1644
1645 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1646 return NF_ACCEPT;
1647 }
1648
1649 #ifdef CONFIG_IP_VS_IPV6
1650 if (af == AF_INET6) {
1651 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1652 int related;
1653 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
1654 &iph);
1655
1656 if (related)
1657 return verdict;
1658 }
1659 } else
1660 #endif
1661 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1662 int related;
1663 int verdict = ip_vs_in_icmp(skb, &related, hooknum);
1664
1665 if (related)
1666 return verdict;
1667 }
1668
1669 /* Protocol supported? */
1670 pd = ip_vs_proto_data_get(net, iph.protocol);
1671 if (unlikely(!pd))
1672 return NF_ACCEPT;
1673 pp = pd->pp;
1674 /*
1675 * Check if the packet belongs to an existing connection entry
1676 */
1677 cp = pp->conn_in_get(af, skb, &iph, 0);
1678
1679 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
1680 if (conn_reuse_mode && !iph.fragoffs &&
1681 is_new_conn(skb, &iph) && cp &&
1682 ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
1683 unlikely(!atomic_read(&cp->dest->weight))) ||
1684 unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
1685 if (!atomic_read(&cp->n_control))
1686 ip_vs_conn_expire_now(cp);
1687 __ip_vs_conn_put(cp);
1688 cp = NULL;
1689 }
1690
1691 if (unlikely(!cp) && !iph.fragoffs) {
1692 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1693 * replayed fragment zero will already have created the cp
1694 */
1695 int v;
1696
1697 /* Schedule and create new connection entry into &cp */
1698 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
1699 return v;
1700 }
1701
1702 if (unlikely(!cp)) {
1703 /* sorry, all this trouble for a no-hit :) */
1704 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1705 "ip_vs_in: packet continues traversal as normal");
1706 if (iph.fragoffs) {
1707 /* Fragment that couldn't be mapped to a conn entry
1708 * is missing module nf_defrag_ipv6
1709 */
1710 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1711 IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
1712 }
1713 return NF_ACCEPT;
1714 }
1715
1716 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1717 /* Check the server status */
1718 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1719 /* the destination server is not available */
1720
1721 if (sysctl_expire_nodest_conn(ipvs)) {
1722 /* try to expire the connection immediately */
1723 ip_vs_conn_expire_now(cp);
1724 }
1725 /* don't restart its timer, and silently
1726 drop the packet. */
1727 __ip_vs_conn_put(cp);
1728 return NF_DROP;
1729 }
1730
1731 ip_vs_in_stats(cp, skb);
1732 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1733 if (cp->packet_xmit)
1734 ret = cp->packet_xmit(skb, cp, pp, &iph);
1735 /* do not touch skb anymore */
1736 else {
1737 IP_VS_DBG_RL("warning: packet_xmit is null");
1738 ret = NF_ACCEPT;
1739 }
1740
1741 /* Increase its packet counter and check if it is needed
1742 * to be synchronized
1743 *
1744 * Sync connection if it is about to close to
1745 * encorage the standby servers to update the connections timeout
1746 *
1747 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1748 */
1749
1750 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1751 pkts = sysctl_sync_threshold(ipvs);
1752 else
1753 pkts = atomic_add_return(1, &cp->in_pkts);
1754
1755 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1756 ip_vs_sync_conn(net, cp, pkts);
1757
1758 ip_vs_conn_put(cp);
1759 return ret;
1760 }
1761
1762 /*
1763 * AF_INET handler in NF_INET_LOCAL_IN chain
1764 * Schedule and forward packets from remote clients
1765 */
1766 static unsigned int
1767 ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1768 const struct nf_hook_state *state)
1769 {
1770 return ip_vs_in(ops->hooknum, skb, AF_INET);
1771 }
1772
1773 /*
1774 * AF_INET handler in NF_INET_LOCAL_OUT chain
1775 * Schedule and forward packets from local clients
1776 */
1777 static unsigned int
1778 ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1779 const struct nf_hook_state *state)
1780 {
1781 return ip_vs_in(ops->hooknum, skb, AF_INET);
1782 }
1783
1784 #ifdef CONFIG_IP_VS_IPV6
1785
1786 /*
1787 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1788 * Schedule and forward packets from remote clients
1789 */
1790 static unsigned int
1791 ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1792 const struct nf_hook_state *state)
1793 {
1794 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1795 }
1796
1797 /*
1798 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1799 * Schedule and forward packets from local clients
1800 */
1801 static unsigned int
1802 ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1803 const struct nf_hook_state *state)
1804 {
1805 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1806 }
1807
1808 #endif
1809
1810
1811 /*
1812 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1813 * related packets destined for 0.0.0.0/0.
1814 * When fwmark-based virtual service is used, such as transparent
1815 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1816 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1817 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1818 * and send them to ip_vs_in_icmp.
1819 */
1820 static unsigned int
1821 ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
1822 const struct nf_hook_state *state)
1823 {
1824 int r;
1825 struct net *net;
1826 struct netns_ipvs *ipvs;
1827
1828 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1829 return NF_ACCEPT;
1830
1831 /* ipvs enabled in this netns ? */
1832 net = skb_net(skb);
1833 ipvs = net_ipvs(net);
1834 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1835 return NF_ACCEPT;
1836
1837 return ip_vs_in_icmp(skb, &r, ops->hooknum);
1838 }
1839
1840 #ifdef CONFIG_IP_VS_IPV6
1841 static unsigned int
1842 ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1843 const struct nf_hook_state *state)
1844 {
1845 int r;
1846 struct net *net;
1847 struct netns_ipvs *ipvs;
1848 struct ip_vs_iphdr iphdr;
1849
1850 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
1851 if (iphdr.protocol != IPPROTO_ICMPV6)
1852 return NF_ACCEPT;
1853
1854 /* ipvs enabled in this netns ? */
1855 net = skb_net(skb);
1856 ipvs = net_ipvs(net);
1857 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1858 return NF_ACCEPT;
1859
1860 return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
1861 }
1862 #endif
1863
1864
1865 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1866 /* After packet filtering, change source only for VS/NAT */
1867 {
1868 .hook = ip_vs_reply4,
1869 .owner = THIS_MODULE,
1870 .pf = NFPROTO_IPV4,
1871 .hooknum = NF_INET_LOCAL_IN,
1872 .priority = NF_IP_PRI_NAT_SRC - 2,
1873 },
1874 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1875 * or VS/NAT(change destination), so that filtering rules can be
1876 * applied to IPVS. */
1877 {
1878 .hook = ip_vs_remote_request4,
1879 .owner = THIS_MODULE,
1880 .pf = NFPROTO_IPV4,
1881 .hooknum = NF_INET_LOCAL_IN,
1882 .priority = NF_IP_PRI_NAT_SRC - 1,
1883 },
1884 /* Before ip_vs_in, change source only for VS/NAT */
1885 {
1886 .hook = ip_vs_local_reply4,
1887 .owner = THIS_MODULE,
1888 .pf = NFPROTO_IPV4,
1889 .hooknum = NF_INET_LOCAL_OUT,
1890 .priority = NF_IP_PRI_NAT_DST + 1,
1891 },
1892 /* After mangle, schedule and forward local requests */
1893 {
1894 .hook = ip_vs_local_request4,
1895 .owner = THIS_MODULE,
1896 .pf = NFPROTO_IPV4,
1897 .hooknum = NF_INET_LOCAL_OUT,
1898 .priority = NF_IP_PRI_NAT_DST + 2,
1899 },
1900 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1901 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1902 {
1903 .hook = ip_vs_forward_icmp,
1904 .owner = THIS_MODULE,
1905 .pf = NFPROTO_IPV4,
1906 .hooknum = NF_INET_FORWARD,
1907 .priority = 99,
1908 },
1909 /* After packet filtering, change source only for VS/NAT */
1910 {
1911 .hook = ip_vs_reply4,
1912 .owner = THIS_MODULE,
1913 .pf = NFPROTO_IPV4,
1914 .hooknum = NF_INET_FORWARD,
1915 .priority = 100,
1916 },
1917 #ifdef CONFIG_IP_VS_IPV6
1918 /* After packet filtering, change source only for VS/NAT */
1919 {
1920 .hook = ip_vs_reply6,
1921 .owner = THIS_MODULE,
1922 .pf = NFPROTO_IPV6,
1923 .hooknum = NF_INET_LOCAL_IN,
1924 .priority = NF_IP6_PRI_NAT_SRC - 2,
1925 },
1926 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1927 * or VS/NAT(change destination), so that filtering rules can be
1928 * applied to IPVS. */
1929 {
1930 .hook = ip_vs_remote_request6,
1931 .owner = THIS_MODULE,
1932 .pf = NFPROTO_IPV6,
1933 .hooknum = NF_INET_LOCAL_IN,
1934 .priority = NF_IP6_PRI_NAT_SRC - 1,
1935 },
1936 /* Before ip_vs_in, change source only for VS/NAT */
1937 {
1938 .hook = ip_vs_local_reply6,
1939 .owner = THIS_MODULE,
1940 .pf = NFPROTO_IPV6,
1941 .hooknum = NF_INET_LOCAL_OUT,
1942 .priority = NF_IP6_PRI_NAT_DST + 1,
1943 },
1944 /* After mangle, schedule and forward local requests */
1945 {
1946 .hook = ip_vs_local_request6,
1947 .owner = THIS_MODULE,
1948 .pf = NFPROTO_IPV6,
1949 .hooknum = NF_INET_LOCAL_OUT,
1950 .priority = NF_IP6_PRI_NAT_DST + 2,
1951 },
1952 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1953 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1954 {
1955 .hook = ip_vs_forward_icmp_v6,
1956 .owner = THIS_MODULE,
1957 .pf = NFPROTO_IPV6,
1958 .hooknum = NF_INET_FORWARD,
1959 .priority = 99,
1960 },
1961 /* After packet filtering, change source only for VS/NAT */
1962 {
1963 .hook = ip_vs_reply6,
1964 .owner = THIS_MODULE,
1965 .pf = NFPROTO_IPV6,
1966 .hooknum = NF_INET_FORWARD,
1967 .priority = 100,
1968 },
1969 #endif
1970 };
1971 /*
1972 * Initialize IP Virtual Server netns mem.
1973 */
1974 static int __net_init __ip_vs_init(struct net *net)
1975 {
1976 struct netns_ipvs *ipvs;
1977
1978 ipvs = net_generic(net, ip_vs_net_id);
1979 if (ipvs == NULL)
1980 return -ENOMEM;
1981
1982 /* Hold the beast until a service is registerd */
1983 ipvs->enable = 0;
1984 ipvs->net = net;
1985 /* Counters used for creating unique names */
1986 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1987 atomic_inc(&ipvs_netns_cnt);
1988 net->ipvs = ipvs;
1989
1990 if (ip_vs_estimator_net_init(net) < 0)
1991 goto estimator_fail;
1992
1993 if (ip_vs_control_net_init(net) < 0)
1994 goto control_fail;
1995
1996 if (ip_vs_protocol_net_init(net) < 0)
1997 goto protocol_fail;
1998
1999 if (ip_vs_app_net_init(net) < 0)
2000 goto app_fail;
2001
2002 if (ip_vs_conn_net_init(net) < 0)
2003 goto conn_fail;
2004
2005 if (ip_vs_sync_net_init(net) < 0)
2006 goto sync_fail;
2007
2008 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2009 sizeof(struct netns_ipvs), ipvs->gen);
2010 return 0;
2011 /*
2012 * Error handling
2013 */
2014
2015 sync_fail:
2016 ip_vs_conn_net_cleanup(net);
2017 conn_fail:
2018 ip_vs_app_net_cleanup(net);
2019 app_fail:
2020 ip_vs_protocol_net_cleanup(net);
2021 protocol_fail:
2022 ip_vs_control_net_cleanup(net);
2023 control_fail:
2024 ip_vs_estimator_net_cleanup(net);
2025 estimator_fail:
2026 net->ipvs = NULL;
2027 return -ENOMEM;
2028 }
2029
2030 static void __net_exit __ip_vs_cleanup(struct net *net)
2031 {
2032 ip_vs_service_net_cleanup(net); /* ip_vs_flush() with locks */
2033 ip_vs_conn_net_cleanup(net);
2034 ip_vs_app_net_cleanup(net);
2035 ip_vs_protocol_net_cleanup(net);
2036 ip_vs_control_net_cleanup(net);
2037 ip_vs_estimator_net_cleanup(net);
2038 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
2039 net->ipvs = NULL;
2040 }
2041
2042 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2043 {
2044 EnterFunction(2);
2045 net_ipvs(net)->enable = 0; /* Disable packet reception */
2046 smp_wmb();
2047 ip_vs_sync_net_cleanup(net);
2048 LeaveFunction(2);
2049 }
2050
2051 static struct pernet_operations ipvs_core_ops = {
2052 .init = __ip_vs_init,
2053 .exit = __ip_vs_cleanup,
2054 .id = &ip_vs_net_id,
2055 .size = sizeof(struct netns_ipvs),
2056 };
2057
2058 static struct pernet_operations ipvs_core_dev_ops = {
2059 .exit = __ip_vs_dev_cleanup,
2060 };
2061
2062 /*
2063 * Initialize IP Virtual Server
2064 */
2065 static int __init ip_vs_init(void)
2066 {
2067 int ret;
2068
2069 ret = ip_vs_control_init();
2070 if (ret < 0) {
2071 pr_err("can't setup control.\n");
2072 goto exit;
2073 }
2074
2075 ip_vs_protocol_init();
2076
2077 ret = ip_vs_conn_init();
2078 if (ret < 0) {
2079 pr_err("can't setup connection table.\n");
2080 goto cleanup_protocol;
2081 }
2082
2083 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2084 if (ret < 0)
2085 goto cleanup_conn;
2086
2087 ret = register_pernet_device(&ipvs_core_dev_ops);
2088 if (ret < 0)
2089 goto cleanup_sub;
2090
2091 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2092 if (ret < 0) {
2093 pr_err("can't register hooks.\n");
2094 goto cleanup_dev;
2095 }
2096
2097 ret = ip_vs_register_nl_ioctl();
2098 if (ret < 0) {
2099 pr_err("can't register netlink/ioctl.\n");
2100 goto cleanup_hooks;
2101 }
2102
2103 pr_info("ipvs loaded.\n");
2104
2105 return ret;
2106
2107 cleanup_hooks:
2108 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2109 cleanup_dev:
2110 unregister_pernet_device(&ipvs_core_dev_ops);
2111 cleanup_sub:
2112 unregister_pernet_subsys(&ipvs_core_ops);
2113 cleanup_conn:
2114 ip_vs_conn_cleanup();
2115 cleanup_protocol:
2116 ip_vs_protocol_cleanup();
2117 ip_vs_control_cleanup();
2118 exit:
2119 return ret;
2120 }
2121
2122 static void __exit ip_vs_cleanup(void)
2123 {
2124 ip_vs_unregister_nl_ioctl();
2125 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2126 unregister_pernet_device(&ipvs_core_dev_ops);
2127 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2128 ip_vs_conn_cleanup();
2129 ip_vs_protocol_cleanup();
2130 ip_vs_control_cleanup();
2131 pr_info("ipvs unloaded.\n");
2132 }
2133
2134 module_init(ip_vs_init);
2135 module_exit(ip_vs_cleanup);
2136 MODULE_LICENSE("GPL");
This page took 0.116262 seconds and 6 git commands to generate.