regmap-i2c: Use i2c block command only if register value width is 8 bit
[deliverable/linux.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71 EXPORT_SYMBOL(ip_vs_new_conn_out);
72
73 static int ip_vs_net_id __read_mostly;
74 /* netns cnt used for uniqueness */
75 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
76
77 /* ID used in ICMP lookups */
78 #define icmp_id(icmph) (((icmph)->un).echo.id)
79 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
80
81 const char *ip_vs_proto_name(unsigned int proto)
82 {
83 static char buf[20];
84
85 switch (proto) {
86 case IPPROTO_IP:
87 return "IP";
88 case IPPROTO_UDP:
89 return "UDP";
90 case IPPROTO_TCP:
91 return "TCP";
92 case IPPROTO_SCTP:
93 return "SCTP";
94 case IPPROTO_ICMP:
95 return "ICMP";
96 #ifdef CONFIG_IP_VS_IPV6
97 case IPPROTO_ICMPV6:
98 return "ICMPv6";
99 #endif
100 default:
101 sprintf(buf, "IP_%u", proto);
102 return buf;
103 }
104 }
105
106 void ip_vs_init_hash_table(struct list_head *table, int rows)
107 {
108 while (--rows >= 0)
109 INIT_LIST_HEAD(&table[rows]);
110 }
111
112 static inline void
113 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
114 {
115 struct ip_vs_dest *dest = cp->dest;
116 struct netns_ipvs *ipvs = cp->ipvs;
117
118 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
119 struct ip_vs_cpu_stats *s;
120 struct ip_vs_service *svc;
121
122 s = this_cpu_ptr(dest->stats.cpustats);
123 u64_stats_update_begin(&s->syncp);
124 s->cnt.inpkts++;
125 s->cnt.inbytes += skb->len;
126 u64_stats_update_end(&s->syncp);
127
128 rcu_read_lock();
129 svc = rcu_dereference(dest->svc);
130 s = this_cpu_ptr(svc->stats.cpustats);
131 u64_stats_update_begin(&s->syncp);
132 s->cnt.inpkts++;
133 s->cnt.inbytes += skb->len;
134 u64_stats_update_end(&s->syncp);
135 rcu_read_unlock();
136
137 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
138 u64_stats_update_begin(&s->syncp);
139 s->cnt.inpkts++;
140 s->cnt.inbytes += skb->len;
141 u64_stats_update_end(&s->syncp);
142 }
143 }
144
145
146 static inline void
147 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
148 {
149 struct ip_vs_dest *dest = cp->dest;
150 struct netns_ipvs *ipvs = cp->ipvs;
151
152 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
153 struct ip_vs_cpu_stats *s;
154 struct ip_vs_service *svc;
155
156 s = this_cpu_ptr(dest->stats.cpustats);
157 u64_stats_update_begin(&s->syncp);
158 s->cnt.outpkts++;
159 s->cnt.outbytes += skb->len;
160 u64_stats_update_end(&s->syncp);
161
162 rcu_read_lock();
163 svc = rcu_dereference(dest->svc);
164 s = this_cpu_ptr(svc->stats.cpustats);
165 u64_stats_update_begin(&s->syncp);
166 s->cnt.outpkts++;
167 s->cnt.outbytes += skb->len;
168 u64_stats_update_end(&s->syncp);
169 rcu_read_unlock();
170
171 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
172 u64_stats_update_begin(&s->syncp);
173 s->cnt.outpkts++;
174 s->cnt.outbytes += skb->len;
175 u64_stats_update_end(&s->syncp);
176 }
177 }
178
179
180 static inline void
181 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
182 {
183 struct netns_ipvs *ipvs = svc->ipvs;
184 struct ip_vs_cpu_stats *s;
185
186 s = this_cpu_ptr(cp->dest->stats.cpustats);
187 u64_stats_update_begin(&s->syncp);
188 s->cnt.conns++;
189 u64_stats_update_end(&s->syncp);
190
191 s = this_cpu_ptr(svc->stats.cpustats);
192 u64_stats_update_begin(&s->syncp);
193 s->cnt.conns++;
194 u64_stats_update_end(&s->syncp);
195
196 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
197 u64_stats_update_begin(&s->syncp);
198 s->cnt.conns++;
199 u64_stats_update_end(&s->syncp);
200 }
201
202
203 static inline void
204 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
205 const struct sk_buff *skb,
206 struct ip_vs_proto_data *pd)
207 {
208 if (likely(pd->pp->state_transition))
209 pd->pp->state_transition(cp, direction, skb, pd);
210 }
211
212 static inline int
213 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
214 struct sk_buff *skb, int protocol,
215 const union nf_inet_addr *caddr, __be16 cport,
216 const union nf_inet_addr *vaddr, __be16 vport,
217 struct ip_vs_conn_param *p)
218 {
219 ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
220 vport, p);
221 p->pe = rcu_dereference(svc->pe);
222 if (p->pe && p->pe->fill_param)
223 return p->pe->fill_param(p, skb);
224
225 return 0;
226 }
227
228 /*
229 * IPVS persistent scheduling function
230 * It creates a connection entry according to its template if exists,
231 * or selects a server and creates a connection entry plus a template.
232 * Locking: we are svc user (svc->refcnt), so we hold all dests too
233 * Protocols supported: TCP, UDP
234 */
235 static struct ip_vs_conn *
236 ip_vs_sched_persist(struct ip_vs_service *svc,
237 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
238 int *ignored, struct ip_vs_iphdr *iph)
239 {
240 struct ip_vs_conn *cp = NULL;
241 struct ip_vs_dest *dest;
242 struct ip_vs_conn *ct;
243 __be16 dport = 0; /* destination port to forward */
244 unsigned int flags;
245 struct ip_vs_conn_param param;
246 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
247 union nf_inet_addr snet; /* source network of the client,
248 after masking */
249 const union nf_inet_addr *src_addr, *dst_addr;
250
251 if (likely(!ip_vs_iph_inverse(iph))) {
252 src_addr = &iph->saddr;
253 dst_addr = &iph->daddr;
254 } else {
255 src_addr = &iph->daddr;
256 dst_addr = &iph->saddr;
257 }
258
259
260 /* Mask saddr with the netmask to adjust template granularity */
261 #ifdef CONFIG_IP_VS_IPV6
262 if (svc->af == AF_INET6)
263 ipv6_addr_prefix(&snet.in6, &src_addr->in6,
264 (__force __u32) svc->netmask);
265 else
266 #endif
267 snet.ip = src_addr->ip & svc->netmask;
268
269 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
270 "mnet %s\n",
271 IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
272 IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
273 IP_VS_DBG_ADDR(svc->af, &snet));
274
275 /*
276 * As far as we know, FTP is a very complicated network protocol, and
277 * it uses control connection and data connections. For active FTP,
278 * FTP server initialize data connection to the client, its source port
279 * is often 20. For passive FTP, FTP server tells the clients the port
280 * that it passively listens to, and the client issues the data
281 * connection. In the tunneling or direct routing mode, the load
282 * balancer is on the client-to-server half of connection, the port
283 * number is unknown to the load balancer. So, a conn template like
284 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
285 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
286 * is created for other persistent services.
287 */
288 {
289 int protocol = iph->protocol;
290 const union nf_inet_addr *vaddr = dst_addr;
291 __be16 vport = 0;
292
293 if (dst_port == svc->port) {
294 /* non-FTP template:
295 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
296 * FTP template:
297 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
298 */
299 if (svc->port != FTPPORT)
300 vport = dst_port;
301 } else {
302 /* Note: persistent fwmark-based services and
303 * persistent port zero service are handled here.
304 * fwmark template:
305 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
306 * port zero template:
307 * <protocol,caddr,0,vaddr,0,daddr,0>
308 */
309 if (svc->fwmark) {
310 protocol = IPPROTO_IP;
311 vaddr = &fwmark;
312 }
313 }
314 /* return *ignored = -1 so NF_DROP can be used */
315 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
316 vaddr, vport, &param) < 0) {
317 *ignored = -1;
318 return NULL;
319 }
320 }
321
322 /* Check if a template already exists */
323 ct = ip_vs_ct_in_get(&param);
324 if (!ct || !ip_vs_check_template(ct)) {
325 struct ip_vs_scheduler *sched;
326
327 /*
328 * No template found or the dest of the connection
329 * template is not available.
330 * return *ignored=0 i.e. ICMP and NF_DROP
331 */
332 sched = rcu_dereference(svc->scheduler);
333 if (sched) {
334 /* read svc->sched_data after svc->scheduler */
335 smp_rmb();
336 dest = sched->schedule(svc, skb, iph);
337 } else {
338 dest = NULL;
339 }
340 if (!dest) {
341 IP_VS_DBG(1, "p-schedule: no dest found.\n");
342 kfree(param.pe_data);
343 *ignored = 0;
344 return NULL;
345 }
346
347 if (dst_port == svc->port && svc->port != FTPPORT)
348 dport = dest->port;
349
350 /* Create a template
351 * This adds param.pe_data to the template,
352 * and thus param.pe_data will be destroyed
353 * when the template expires */
354 ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
355 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
356 if (ct == NULL) {
357 kfree(param.pe_data);
358 *ignored = -1;
359 return NULL;
360 }
361
362 ct->timeout = svc->timeout;
363 } else {
364 /* set destination with the found template */
365 dest = ct->dest;
366 kfree(param.pe_data);
367 }
368
369 dport = dst_port;
370 if (dport == svc->port && dest->port)
371 dport = dest->port;
372
373 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
374 && iph->protocol == IPPROTO_UDP) ?
375 IP_VS_CONN_F_ONE_PACKET : 0;
376
377 /*
378 * Create a new connection according to the template
379 */
380 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
381 src_port, dst_addr, dst_port, &param);
382
383 cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
384 skb->mark);
385 if (cp == NULL) {
386 ip_vs_conn_put(ct);
387 *ignored = -1;
388 return NULL;
389 }
390
391 /*
392 * Add its control
393 */
394 ip_vs_control_add(cp, ct);
395 ip_vs_conn_put(ct);
396
397 ip_vs_conn_stats(cp, svc);
398 return cp;
399 }
400
401
402 /*
403 * IPVS main scheduling function
404 * It selects a server according to the virtual service, and
405 * creates a connection entry.
406 * Protocols supported: TCP, UDP
407 *
408 * Usage of *ignored
409 *
410 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
411 * svc/scheduler decides that this packet should be accepted with
412 * NF_ACCEPT because it must not be scheduled.
413 *
414 * 0 : scheduler can not find destination, so try bypass or
415 * return ICMP and then NF_DROP (ip_vs_leave).
416 *
417 * -1 : scheduler tried to schedule but fatal error occurred, eg.
418 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
419 * failure such as missing Call-ID, ENOMEM on skb_linearize
420 * or pe_data. In this case we should return NF_DROP without
421 * any attempts to send ICMP with ip_vs_leave.
422 */
423 struct ip_vs_conn *
424 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
425 struct ip_vs_proto_data *pd, int *ignored,
426 struct ip_vs_iphdr *iph)
427 {
428 struct ip_vs_protocol *pp = pd->pp;
429 struct ip_vs_conn *cp = NULL;
430 struct ip_vs_scheduler *sched;
431 struct ip_vs_dest *dest;
432 __be16 _ports[2], *pptr, cport, vport;
433 const void *caddr, *vaddr;
434 unsigned int flags;
435
436 *ignored = 1;
437 /*
438 * IPv6 frags, only the first hit here.
439 */
440 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
441 if (pptr == NULL)
442 return NULL;
443
444 if (likely(!ip_vs_iph_inverse(iph))) {
445 cport = pptr[0];
446 caddr = &iph->saddr;
447 vport = pptr[1];
448 vaddr = &iph->daddr;
449 } else {
450 cport = pptr[1];
451 caddr = &iph->daddr;
452 vport = pptr[0];
453 vaddr = &iph->saddr;
454 }
455
456 /*
457 * FTPDATA needs this check when using local real server.
458 * Never schedule Active FTPDATA connections from real server.
459 * For LVS-NAT they must be already created. For other methods
460 * with persistence the connection is created on SYN+ACK.
461 */
462 if (cport == FTPDATA) {
463 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
464 "Not scheduling FTPDATA");
465 return NULL;
466 }
467
468 /*
469 * Do not schedule replies from local real server.
470 */
471 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
472 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
473 cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
474 iph->hdr_flags ^= IP_VS_HDR_INVERSE;
475
476 if (cp) {
477 IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
478 "Not scheduling reply for existing"
479 " connection");
480 __ip_vs_conn_put(cp);
481 return NULL;
482 }
483 }
484
485 /*
486 * Persistent service
487 */
488 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
489 return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
490 iph);
491
492 *ignored = 0;
493
494 /*
495 * Non-persistent service
496 */
497 if (!svc->fwmark && vport != svc->port) {
498 if (!svc->port)
499 pr_err("Schedule: port zero only supported "
500 "in persistent services, "
501 "check your ipvs configuration\n");
502 return NULL;
503 }
504
505 sched = rcu_dereference(svc->scheduler);
506 if (sched) {
507 /* read svc->sched_data after svc->scheduler */
508 smp_rmb();
509 dest = sched->schedule(svc, skb, iph);
510 } else {
511 dest = NULL;
512 }
513 if (dest == NULL) {
514 IP_VS_DBG(1, "Schedule: no dest found.\n");
515 return NULL;
516 }
517
518 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
519 && iph->protocol == IPPROTO_UDP) ?
520 IP_VS_CONN_F_ONE_PACKET : 0;
521
522 /*
523 * Create a connection entry.
524 */
525 {
526 struct ip_vs_conn_param p;
527
528 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
529 caddr, cport, vaddr, vport, &p);
530 cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
531 dest->port ? dest->port : vport,
532 flags, dest, skb->mark);
533 if (!cp) {
534 *ignored = -1;
535 return NULL;
536 }
537 }
538
539 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
540 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
541 ip_vs_fwd_tag(cp),
542 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
543 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
544 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
545 cp->flags, atomic_read(&cp->refcnt));
546
547 ip_vs_conn_stats(cp, svc);
548 return cp;
549 }
550
551 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
552 union nf_inet_addr *addr)
553 {
554 #ifdef CONFIG_IP_VS_IPV6
555 if (af == AF_INET6)
556 return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
557 #endif
558 return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
559 }
560
561 /*
562 * Pass or drop the packet.
563 * Called by ip_vs_in, when the virtual service is available but
564 * no destination is available for a new connection.
565 */
566 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
567 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
568 {
569 __be16 _ports[2], *pptr, dport;
570 struct netns_ipvs *ipvs = svc->ipvs;
571 struct net *net = ipvs->net;
572
573 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
574 if (!pptr)
575 return NF_DROP;
576 dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
577
578 /* if it is fwmark-based service, the cache_bypass sysctl is up
579 and the destination is a non-local unicast, then create
580 a cache_bypass connection entry */
581 if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
582 !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
583 ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
584 int ret;
585 struct ip_vs_conn *cp;
586 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
587 iph->protocol == IPPROTO_UDP) ?
588 IP_VS_CONN_F_ONE_PACKET : 0;
589 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
590
591 /* create a new connection entry */
592 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
593 {
594 struct ip_vs_conn_param p;
595 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
596 &iph->saddr, pptr[0],
597 &iph->daddr, pptr[1], &p);
598 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
599 IP_VS_CONN_F_BYPASS | flags,
600 NULL, skb->mark);
601 if (!cp)
602 return NF_DROP;
603 }
604
605 /* statistics */
606 ip_vs_in_stats(cp, skb);
607
608 /* set state */
609 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
610
611 /* transmit the first SYN packet */
612 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
613 /* do not touch skb anymore */
614
615 if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
616 atomic_inc(&cp->control->in_pkts);
617 else
618 atomic_inc(&cp->in_pkts);
619 ip_vs_conn_put(cp);
620 return ret;
621 }
622
623 /*
624 * When the virtual ftp service is presented, packets destined
625 * for other services on the VIP may get here (except services
626 * listed in the ipvs table), pass the packets, because it is
627 * not ipvs job to decide to drop the packets.
628 */
629 if (svc->port == FTPPORT && dport != FTPPORT)
630 return NF_ACCEPT;
631
632 if (unlikely(ip_vs_iph_icmp(iph)))
633 return NF_DROP;
634
635 /*
636 * Notify the client that the destination is unreachable, and
637 * release the socket buffer.
638 * Since it is in IP layer, the TCP socket is not actually
639 * created, the TCP RST packet cannot be sent, instead that
640 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
641 */
642 #ifdef CONFIG_IP_VS_IPV6
643 if (svc->af == AF_INET6) {
644 if (!skb->dev)
645 skb->dev = net->loopback_dev;
646 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
647 } else
648 #endif
649 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
650
651 return NF_DROP;
652 }
653
654 #ifdef CONFIG_SYSCTL
655
656 static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
657 {
658 return ipvs->sysctl_snat_reroute;
659 }
660
661 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
662 {
663 return ipvs->sysctl_nat_icmp_send;
664 }
665
666 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
667 {
668 return ipvs->sysctl_expire_nodest_conn;
669 }
670
671 #else
672
673 static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
674 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
675 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
676
677 #endif
678
679 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
680 {
681 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
682 }
683
684 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
685 {
686 if (NF_INET_LOCAL_IN == hooknum)
687 return IP_DEFRAG_VS_IN;
688 if (NF_INET_FORWARD == hooknum)
689 return IP_DEFRAG_VS_FWD;
690 return IP_DEFRAG_VS_OUT;
691 }
692
693 static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
694 struct sk_buff *skb, u_int32_t user)
695 {
696 int err;
697
698 local_bh_disable();
699 err = ip_defrag(ipvs->net, skb, user);
700 local_bh_enable();
701 if (!err)
702 ip_send_check(ip_hdr(skb));
703
704 return err;
705 }
706
707 static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
708 struct sk_buff *skb, unsigned int hooknum)
709 {
710 if (!sysctl_snat_reroute(ipvs))
711 return 0;
712 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
713 if (NF_INET_LOCAL_IN == hooknum)
714 return 0;
715 #ifdef CONFIG_IP_VS_IPV6
716 if (af == AF_INET6) {
717 struct dst_entry *dst = skb_dst(skb);
718
719 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
720 ip6_route_me_harder(ipvs->net, skb) != 0)
721 return 1;
722 } else
723 #endif
724 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
725 ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
726 return 1;
727
728 return 0;
729 }
730
731 /*
732 * Packet has been made sufficiently writable in caller
733 * - inout: 1=in->out, 0=out->in
734 */
735 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
736 struct ip_vs_conn *cp, int inout)
737 {
738 struct iphdr *iph = ip_hdr(skb);
739 unsigned int icmp_offset = iph->ihl*4;
740 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
741 icmp_offset);
742 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
743
744 if (inout) {
745 iph->saddr = cp->vaddr.ip;
746 ip_send_check(iph);
747 ciph->daddr = cp->vaddr.ip;
748 ip_send_check(ciph);
749 } else {
750 iph->daddr = cp->daddr.ip;
751 ip_send_check(iph);
752 ciph->saddr = cp->daddr.ip;
753 ip_send_check(ciph);
754 }
755
756 /* the TCP/UDP/SCTP port */
757 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
758 IPPROTO_SCTP == ciph->protocol) {
759 __be16 *ports = (void *)ciph + ciph->ihl*4;
760
761 if (inout)
762 ports[1] = cp->vport;
763 else
764 ports[0] = cp->dport;
765 }
766
767 /* And finally the ICMP checksum */
768 icmph->checksum = 0;
769 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
770 skb->ip_summed = CHECKSUM_UNNECESSARY;
771
772 if (inout)
773 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
774 "Forwarding altered outgoing ICMP");
775 else
776 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
777 "Forwarding altered incoming ICMP");
778 }
779
780 #ifdef CONFIG_IP_VS_IPV6
781 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
782 struct ip_vs_conn *cp, int inout)
783 {
784 struct ipv6hdr *iph = ipv6_hdr(skb);
785 unsigned int icmp_offset = 0;
786 unsigned int offs = 0; /* header offset*/
787 int protocol;
788 struct icmp6hdr *icmph;
789 struct ipv6hdr *ciph;
790 unsigned short fragoffs;
791
792 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
793 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
794 offs = icmp_offset + sizeof(struct icmp6hdr);
795 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
796
797 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
798
799 if (inout) {
800 iph->saddr = cp->vaddr.in6;
801 ciph->daddr = cp->vaddr.in6;
802 } else {
803 iph->daddr = cp->daddr.in6;
804 ciph->saddr = cp->daddr.in6;
805 }
806
807 /* the TCP/UDP/SCTP port */
808 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
809 IPPROTO_SCTP == protocol)) {
810 __be16 *ports = (void *)(skb_network_header(skb) + offs);
811
812 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
813 ntohs(inout ? ports[1] : ports[0]),
814 ntohs(inout ? cp->vport : cp->dport));
815 if (inout)
816 ports[1] = cp->vport;
817 else
818 ports[0] = cp->dport;
819 }
820
821 /* And finally the ICMP checksum */
822 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
823 skb->len - icmp_offset,
824 IPPROTO_ICMPV6, 0);
825 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
826 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
827 skb->ip_summed = CHECKSUM_PARTIAL;
828
829 if (inout)
830 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
831 (void *)ciph - (void *)iph,
832 "Forwarding altered outgoing ICMPv6");
833 else
834 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
835 (void *)ciph - (void *)iph,
836 "Forwarding altered incoming ICMPv6");
837 }
838 #endif
839
840 /* Handle relevant response ICMP messages - forward to the right
841 * destination host.
842 */
843 static int handle_response_icmp(int af, struct sk_buff *skb,
844 union nf_inet_addr *snet,
845 __u8 protocol, struct ip_vs_conn *cp,
846 struct ip_vs_protocol *pp,
847 unsigned int offset, unsigned int ihl,
848 unsigned int hooknum)
849 {
850 unsigned int verdict = NF_DROP;
851
852 if (IP_VS_FWD_METHOD(cp) != 0) {
853 pr_err("shouldn't reach here, because the box is on the "
854 "half connection in the tun/dr module.\n");
855 }
856
857 /* Ensure the checksum is correct */
858 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
859 /* Failed checksum! */
860 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
861 IP_VS_DBG_ADDR(af, snet));
862 goto out;
863 }
864
865 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
866 IPPROTO_SCTP == protocol)
867 offset += 2 * sizeof(__u16);
868 if (!skb_make_writable(skb, offset))
869 goto out;
870
871 #ifdef CONFIG_IP_VS_IPV6
872 if (af == AF_INET6)
873 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
874 else
875 #endif
876 ip_vs_nat_icmp(skb, pp, cp, 1);
877
878 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
879 goto out;
880
881 /* do the statistics and put it back */
882 ip_vs_out_stats(cp, skb);
883
884 skb->ipvs_property = 1;
885 if (!(cp->flags & IP_VS_CONN_F_NFCT))
886 ip_vs_notrack(skb);
887 else
888 ip_vs_update_conntrack(skb, cp, 0);
889 verdict = NF_ACCEPT;
890
891 out:
892 __ip_vs_conn_put(cp);
893
894 return verdict;
895 }
896
897 /*
898 * Handle ICMP messages in the inside-to-outside direction (outgoing).
899 * Find any that might be relevant, check against existing connections.
900 * Currently handles error types - unreachable, quench, ttl exceeded.
901 */
902 static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
903 int *related, unsigned int hooknum)
904 {
905 struct iphdr *iph;
906 struct icmphdr _icmph, *ic;
907 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
908 struct ip_vs_iphdr ciph;
909 struct ip_vs_conn *cp;
910 struct ip_vs_protocol *pp;
911 unsigned int offset, ihl;
912 union nf_inet_addr snet;
913
914 *related = 1;
915
916 /* reassemble IP fragments */
917 if (ip_is_fragment(ip_hdr(skb))) {
918 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
919 return NF_STOLEN;
920 }
921
922 iph = ip_hdr(skb);
923 offset = ihl = iph->ihl * 4;
924 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
925 if (ic == NULL)
926 return NF_DROP;
927
928 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
929 ic->type, ntohs(icmp_id(ic)),
930 &iph->saddr, &iph->daddr);
931
932 /*
933 * Work through seeing if this is for us.
934 * These checks are supposed to be in an order that means easy
935 * things are checked first to speed up processing.... however
936 * this means that some packets will manage to get a long way
937 * down this stack and then be rejected, but that's life.
938 */
939 if ((ic->type != ICMP_DEST_UNREACH) &&
940 (ic->type != ICMP_SOURCE_QUENCH) &&
941 (ic->type != ICMP_TIME_EXCEEDED)) {
942 *related = 0;
943 return NF_ACCEPT;
944 }
945
946 /* Now find the contained IP header */
947 offset += sizeof(_icmph);
948 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
949 if (cih == NULL)
950 return NF_ACCEPT; /* The packet looks wrong, ignore */
951
952 pp = ip_vs_proto_get(cih->protocol);
953 if (!pp)
954 return NF_ACCEPT;
955
956 /* Is the embedded protocol header present? */
957 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
958 pp->dont_defrag))
959 return NF_ACCEPT;
960
961 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
962 "Checking outgoing ICMP for");
963
964 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
965
966 /* The embedded headers contain source and dest in reverse order */
967 cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
968 if (!cp)
969 return NF_ACCEPT;
970
971 snet.ip = iph->saddr;
972 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
973 pp, ciph.len, ihl, hooknum);
974 }
975
976 #ifdef CONFIG_IP_VS_IPV6
977 static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
978 int *related, unsigned int hooknum,
979 struct ip_vs_iphdr *ipvsh)
980 {
981 struct icmp6hdr _icmph, *ic;
982 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
983 struct ip_vs_conn *cp;
984 struct ip_vs_protocol *pp;
985 union nf_inet_addr snet;
986 unsigned int offset;
987
988 *related = 1;
989 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
990 if (ic == NULL)
991 return NF_DROP;
992
993 /*
994 * Work through seeing if this is for us.
995 * These checks are supposed to be in an order that means easy
996 * things are checked first to speed up processing.... however
997 * this means that some packets will manage to get a long way
998 * down this stack and then be rejected, but that's life.
999 */
1000 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1001 *related = 0;
1002 return NF_ACCEPT;
1003 }
1004 /* Fragment header that is before ICMP header tells us that:
1005 * it's not an error message since they can't be fragmented.
1006 */
1007 if (ipvsh->flags & IP6_FH_F_FRAG)
1008 return NF_DROP;
1009
1010 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1011 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1012 &ipvsh->saddr, &ipvsh->daddr);
1013
1014 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
1015 true, &ciph))
1016 return NF_ACCEPT; /* The packet looks wrong, ignore */
1017
1018 pp = ip_vs_proto_get(ciph.protocol);
1019 if (!pp)
1020 return NF_ACCEPT;
1021
1022 /* The embedded headers contain source and dest in reverse order */
1023 cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
1024 if (!cp)
1025 return NF_ACCEPT;
1026
1027 snet.in6 = ciph.saddr.in6;
1028 offset = ciph.len;
1029 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1030 pp, offset, sizeof(struct ipv6hdr),
1031 hooknum);
1032 }
1033 #endif
1034
1035 /*
1036 * Check if sctp chunc is ABORT chunk
1037 */
1038 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1039 {
1040 sctp_chunkhdr_t *sch, schunk;
1041 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
1042 sizeof(schunk), &schunk);
1043 if (sch == NULL)
1044 return 0;
1045 if (sch->type == SCTP_CID_ABORT)
1046 return 1;
1047 return 0;
1048 }
1049
1050 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1051 {
1052 struct tcphdr _tcph, *th;
1053
1054 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1055 if (th == NULL)
1056 return 0;
1057 return th->rst;
1058 }
1059
1060 static inline bool is_new_conn(const struct sk_buff *skb,
1061 struct ip_vs_iphdr *iph)
1062 {
1063 switch (iph->protocol) {
1064 case IPPROTO_TCP: {
1065 struct tcphdr _tcph, *th;
1066
1067 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1068 if (th == NULL)
1069 return false;
1070 return th->syn;
1071 }
1072 case IPPROTO_SCTP: {
1073 sctp_chunkhdr_t *sch, schunk;
1074
1075 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1076 sizeof(schunk), &schunk);
1077 if (sch == NULL)
1078 return false;
1079 return sch->type == SCTP_CID_INIT;
1080 }
1081 default:
1082 return false;
1083 }
1084 }
1085
1086 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1087 int conn_reuse_mode)
1088 {
1089 /* Controlled (FTP DATA or persistence)? */
1090 if (cp->control)
1091 return false;
1092
1093 switch (cp->protocol) {
1094 case IPPROTO_TCP:
1095 return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1096 (cp->state == IP_VS_TCP_S_CLOSE) ||
1097 ((conn_reuse_mode & 2) &&
1098 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1099 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1100 case IPPROTO_SCTP:
1101 return cp->state == IP_VS_SCTP_S_CLOSED;
1102 default:
1103 return false;
1104 }
1105 }
1106
1107 /* Generic function to create new connections for outgoing RS packets
1108 *
1109 * Pre-requisites for successful connection creation:
1110 * 1) Virtual Service is NOT fwmark based:
1111 * In fwmark-VS actual vaddr and vport are unknown to IPVS
1112 * 2) Real Server and Virtual Service were NOT configured without port:
1113 * This is to allow match of different VS to the same RS ip-addr
1114 */
1115 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
1116 struct ip_vs_dest *dest,
1117 struct sk_buff *skb,
1118 const struct ip_vs_iphdr *iph,
1119 __be16 dport,
1120 __be16 cport)
1121 {
1122 struct ip_vs_conn_param param;
1123 struct ip_vs_conn *ct = NULL, *cp = NULL;
1124 const union nf_inet_addr *vaddr, *daddr, *caddr;
1125 union nf_inet_addr snet;
1126 __be16 vport;
1127 unsigned int flags;
1128
1129 EnterFunction(12);
1130 vaddr = &svc->addr;
1131 vport = svc->port;
1132 daddr = &iph->saddr;
1133 caddr = &iph->daddr;
1134
1135 /* check pre-requisites are satisfied */
1136 if (svc->fwmark)
1137 return NULL;
1138 if (!vport || !dport)
1139 return NULL;
1140
1141 /* for persistent service first create connection template */
1142 if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
1143 /* apply netmask the same way ingress-side does */
1144 #ifdef CONFIG_IP_VS_IPV6
1145 if (svc->af == AF_INET6)
1146 ipv6_addr_prefix(&snet.in6, &caddr->in6,
1147 (__force __u32)svc->netmask);
1148 else
1149 #endif
1150 snet.ip = caddr->ip & svc->netmask;
1151 /* fill params and create template if not existent */
1152 if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
1153 &snet, 0, vaddr,
1154 vport, &param) < 0)
1155 return NULL;
1156 ct = ip_vs_ct_in_get(&param);
1157 if (!ct) {
1158 ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
1159 IP_VS_CONN_F_TEMPLATE, dest, 0);
1160 if (!ct) {
1161 kfree(param.pe_data);
1162 return NULL;
1163 }
1164 ct->timeout = svc->timeout;
1165 } else {
1166 kfree(param.pe_data);
1167 }
1168 }
1169
1170 /* connection flags */
1171 flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) &&
1172 iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0;
1173 /* create connection */
1174 ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
1175 caddr, cport, vaddr, vport, &param);
1176 cp = ip_vs_conn_new(&param, dest->af, daddr, dport, flags, dest, 0);
1177 if (!cp) {
1178 if (ct)
1179 ip_vs_conn_put(ct);
1180 return NULL;
1181 }
1182 if (ct) {
1183 ip_vs_control_add(cp, ct);
1184 ip_vs_conn_put(ct);
1185 }
1186 ip_vs_conn_stats(cp, svc);
1187
1188 /* return connection (will be used to handle outgoing packet) */
1189 IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u "
1190 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
1191 ip_vs_fwd_tag(cp),
1192 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
1193 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
1194 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
1195 cp->flags, atomic_read(&cp->refcnt));
1196 LeaveFunction(12);
1197 return cp;
1198 }
1199
1200 /* Handle outgoing packets which are considered requests initiated by
1201 * real servers, so that subsequent responses from external client can be
1202 * routed to the right real server.
1203 * Used also for outgoing responses in OPS mode.
1204 *
1205 * Connection management is handled by persistent-engine specific callback.
1206 */
1207 static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
1208 struct netns_ipvs *ipvs,
1209 int af, struct sk_buff *skb,
1210 const struct ip_vs_iphdr *iph)
1211 {
1212 struct ip_vs_dest *dest;
1213 struct ip_vs_conn *cp = NULL;
1214 __be16 _ports[2], *pptr;
1215
1216 if (hooknum == NF_INET_LOCAL_IN)
1217 return NULL;
1218
1219 pptr = frag_safe_skb_hp(skb, iph->len,
1220 sizeof(_ports), _ports, iph);
1221 if (!pptr)
1222 return NULL;
1223
1224 rcu_read_lock();
1225 dest = ip_vs_find_real_service(ipvs, af, iph->protocol,
1226 &iph->saddr, pptr[0]);
1227 if (dest) {
1228 struct ip_vs_service *svc;
1229 struct ip_vs_pe *pe;
1230
1231 svc = rcu_dereference(dest->svc);
1232 if (svc) {
1233 pe = rcu_dereference(svc->pe);
1234 if (pe && pe->conn_out)
1235 cp = pe->conn_out(svc, dest, skb, iph,
1236 pptr[0], pptr[1]);
1237 }
1238 }
1239 rcu_read_unlock();
1240
1241 return cp;
1242 }
1243
1244 /* Handle response packets: rewrite addresses and send away...
1245 */
1246 static unsigned int
1247 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1248 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1249 unsigned int hooknum)
1250 {
1251 struct ip_vs_protocol *pp = pd->pp;
1252
1253 IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
1254
1255 if (!skb_make_writable(skb, iph->len))
1256 goto drop;
1257
1258 /* mangle the packet */
1259 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1260 goto drop;
1261
1262 #ifdef CONFIG_IP_VS_IPV6
1263 if (af == AF_INET6)
1264 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1265 else
1266 #endif
1267 {
1268 ip_hdr(skb)->saddr = cp->vaddr.ip;
1269 ip_send_check(ip_hdr(skb));
1270 }
1271
1272 /*
1273 * nf_iterate does not expect change in the skb->dst->dev.
1274 * It looks like it is not fatal to enable this code for hooks
1275 * where our handlers are at the end of the chain list and
1276 * when all next handlers use skb->dst->dev and not outdev.
1277 * It will definitely route properly the inout NAT traffic
1278 * when multiple paths are used.
1279 */
1280
1281 /* For policy routing, packets originating from this
1282 * machine itself may be routed differently to packets
1283 * passing through. We want this packet to be routed as
1284 * if it came from this machine itself. So re-compute
1285 * the routing information.
1286 */
1287 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
1288 goto drop;
1289
1290 IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
1291
1292 ip_vs_out_stats(cp, skb);
1293 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1294 skb->ipvs_property = 1;
1295 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1296 ip_vs_notrack(skb);
1297 else
1298 ip_vs_update_conntrack(skb, cp, 0);
1299 ip_vs_conn_put(cp);
1300
1301 LeaveFunction(11);
1302 return NF_ACCEPT;
1303
1304 drop:
1305 ip_vs_conn_put(cp);
1306 kfree_skb(skb);
1307 LeaveFunction(11);
1308 return NF_STOLEN;
1309 }
1310
1311 /*
1312 * Check if outgoing packet belongs to the established ip_vs_conn.
1313 */
1314 static unsigned int
1315 ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1316 {
1317 struct ip_vs_iphdr iph;
1318 struct ip_vs_protocol *pp;
1319 struct ip_vs_proto_data *pd;
1320 struct ip_vs_conn *cp;
1321 struct sock *sk;
1322
1323 EnterFunction(11);
1324
1325 /* Already marked as IPVS request or reply? */
1326 if (skb->ipvs_property)
1327 return NF_ACCEPT;
1328
1329 sk = skb_to_full_sk(skb);
1330 /* Bad... Do not break raw sockets */
1331 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1332 af == AF_INET)) {
1333
1334 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1335 return NF_ACCEPT;
1336 }
1337
1338 if (unlikely(!skb_dst(skb)))
1339 return NF_ACCEPT;
1340
1341 if (!ipvs->enable)
1342 return NF_ACCEPT;
1343
1344 ip_vs_fill_iph_skb(af, skb, false, &iph);
1345 #ifdef CONFIG_IP_VS_IPV6
1346 if (af == AF_INET6) {
1347 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1348 int related;
1349 int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
1350 hooknum, &iph);
1351
1352 if (related)
1353 return verdict;
1354 }
1355 } else
1356 #endif
1357 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1358 int related;
1359 int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
1360
1361 if (related)
1362 return verdict;
1363 }
1364
1365 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1366 if (unlikely(!pd))
1367 return NF_ACCEPT;
1368 pp = pd->pp;
1369
1370 /* reassemble IP fragments */
1371 #ifdef CONFIG_IP_VS_IPV6
1372 if (af == AF_INET)
1373 #endif
1374 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1375 if (ip_vs_gather_frags(ipvs, skb,
1376 ip_vs_defrag_user(hooknum)))
1377 return NF_STOLEN;
1378
1379 ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
1380 }
1381
1382 /*
1383 * Check if the packet belongs to an existing entry
1384 */
1385 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1386
1387 if (likely(cp))
1388 return handle_response(af, skb, pd, cp, &iph, hooknum);
1389
1390 /* Check for real-server-started requests */
1391 if (atomic_read(&ipvs->conn_out_counter)) {
1392 /* Currently only for UDP:
1393 * connection oriented protocols typically use
1394 * ephemeral ports for outgoing connections, so
1395 * related incoming responses would not match any VS
1396 */
1397 if (pp->protocol == IPPROTO_UDP) {
1398 cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
1399 if (likely(cp))
1400 return handle_response(af, skb, pd, cp, &iph,
1401 hooknum);
1402 }
1403 }
1404
1405 if (sysctl_nat_icmp_send(ipvs) &&
1406 (pp->protocol == IPPROTO_TCP ||
1407 pp->protocol == IPPROTO_UDP ||
1408 pp->protocol == IPPROTO_SCTP)) {
1409 __be16 _ports[2], *pptr;
1410
1411 pptr = frag_safe_skb_hp(skb, iph.len,
1412 sizeof(_ports), _ports, &iph);
1413 if (pptr == NULL)
1414 return NF_ACCEPT; /* Not for me */
1415 if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
1416 pptr[0])) {
1417 /*
1418 * Notify the real server: there is no
1419 * existing entry if it is not RST
1420 * packet or not TCP packet.
1421 */
1422 if ((iph.protocol != IPPROTO_TCP &&
1423 iph.protocol != IPPROTO_SCTP)
1424 || ((iph.protocol == IPPROTO_TCP
1425 && !is_tcp_reset(skb, iph.len))
1426 || (iph.protocol == IPPROTO_SCTP
1427 && !is_sctp_abort(skb,
1428 iph.len)))) {
1429 #ifdef CONFIG_IP_VS_IPV6
1430 if (af == AF_INET6) {
1431 if (!skb->dev)
1432 skb->dev = ipvs->net->loopback_dev;
1433 icmpv6_send(skb,
1434 ICMPV6_DEST_UNREACH,
1435 ICMPV6_PORT_UNREACH,
1436 0);
1437 } else
1438 #endif
1439 icmp_send(skb,
1440 ICMP_DEST_UNREACH,
1441 ICMP_PORT_UNREACH, 0);
1442 return NF_DROP;
1443 }
1444 }
1445 }
1446 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1447 "ip_vs_out: packet continues traversal as normal");
1448 return NF_ACCEPT;
1449 }
1450
1451 /*
1452 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1453 * used only for VS/NAT.
1454 * Check if packet is reply for established ip_vs_conn.
1455 */
1456 static unsigned int
1457 ip_vs_reply4(void *priv, struct sk_buff *skb,
1458 const struct nf_hook_state *state)
1459 {
1460 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1461 }
1462
1463 /*
1464 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1465 * Check if packet is reply for established ip_vs_conn.
1466 */
1467 static unsigned int
1468 ip_vs_local_reply4(void *priv, struct sk_buff *skb,
1469 const struct nf_hook_state *state)
1470 {
1471 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1472 }
1473
1474 #ifdef CONFIG_IP_VS_IPV6
1475
1476 /*
1477 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1478 * used only for VS/NAT.
1479 * Check if packet is reply for established ip_vs_conn.
1480 */
1481 static unsigned int
1482 ip_vs_reply6(void *priv, struct sk_buff *skb,
1483 const struct nf_hook_state *state)
1484 {
1485 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1486 }
1487
1488 /*
1489 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1490 * Check if packet is reply for established ip_vs_conn.
1491 */
1492 static unsigned int
1493 ip_vs_local_reply6(void *priv, struct sk_buff *skb,
1494 const struct nf_hook_state *state)
1495 {
1496 return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1497 }
1498
1499 #endif
1500
1501 static unsigned int
1502 ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1503 struct ip_vs_proto_data *pd,
1504 int *verdict, struct ip_vs_conn **cpp,
1505 struct ip_vs_iphdr *iph)
1506 {
1507 struct ip_vs_protocol *pp = pd->pp;
1508
1509 if (!iph->fragoffs) {
1510 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1511 * replayed fragment zero will already have created the cp
1512 */
1513
1514 /* Schedule and create new connection entry into cpp */
1515 if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
1516 return 0;
1517 }
1518
1519 if (unlikely(!*cpp)) {
1520 /* sorry, all this trouble for a no-hit :) */
1521 IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1522 "ip_vs_in: packet continues traversal as normal");
1523 if (iph->fragoffs) {
1524 /* Fragment that couldn't be mapped to a conn entry
1525 * is missing module nf_defrag_ipv6
1526 */
1527 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1528 IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1529 "unhandled fragment");
1530 }
1531 *verdict = NF_ACCEPT;
1532 return 0;
1533 }
1534
1535 return 1;
1536 }
1537
1538 /*
1539 * Handle ICMP messages in the outside-to-inside direction (incoming).
1540 * Find any that might be relevant, check against existing connections,
1541 * forward to the right destination host if relevant.
1542 * Currently handles error types - unreachable, quench, ttl exceeded.
1543 */
1544 static int
1545 ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
1546 unsigned int hooknum)
1547 {
1548 struct iphdr *iph;
1549 struct icmphdr _icmph, *ic;
1550 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1551 struct ip_vs_iphdr ciph;
1552 struct ip_vs_conn *cp;
1553 struct ip_vs_protocol *pp;
1554 struct ip_vs_proto_data *pd;
1555 unsigned int offset, offset2, ihl, verdict;
1556 bool ipip, new_cp = false;
1557
1558 *related = 1;
1559
1560 /* reassemble IP fragments */
1561 if (ip_is_fragment(ip_hdr(skb))) {
1562 if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
1563 return NF_STOLEN;
1564 }
1565
1566 iph = ip_hdr(skb);
1567 offset = ihl = iph->ihl * 4;
1568 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1569 if (ic == NULL)
1570 return NF_DROP;
1571
1572 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1573 ic->type, ntohs(icmp_id(ic)),
1574 &iph->saddr, &iph->daddr);
1575
1576 /*
1577 * Work through seeing if this is for us.
1578 * These checks are supposed to be in an order that means easy
1579 * things are checked first to speed up processing.... however
1580 * this means that some packets will manage to get a long way
1581 * down this stack and then be rejected, but that's life.
1582 */
1583 if ((ic->type != ICMP_DEST_UNREACH) &&
1584 (ic->type != ICMP_SOURCE_QUENCH) &&
1585 (ic->type != ICMP_TIME_EXCEEDED)) {
1586 *related = 0;
1587 return NF_ACCEPT;
1588 }
1589
1590 /* Now find the contained IP header */
1591 offset += sizeof(_icmph);
1592 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1593 if (cih == NULL)
1594 return NF_ACCEPT; /* The packet looks wrong, ignore */
1595
1596 /* Special case for errors for IPIP packets */
1597 ipip = false;
1598 if (cih->protocol == IPPROTO_IPIP) {
1599 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1600 return NF_ACCEPT;
1601 /* Error for our IPIP must arrive at LOCAL_IN */
1602 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1603 return NF_ACCEPT;
1604 offset += cih->ihl * 4;
1605 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1606 if (cih == NULL)
1607 return NF_ACCEPT; /* The packet looks wrong, ignore */
1608 ipip = true;
1609 }
1610
1611 pd = ip_vs_proto_data_get(ipvs, cih->protocol);
1612 if (!pd)
1613 return NF_ACCEPT;
1614 pp = pd->pp;
1615
1616 /* Is the embedded protocol header present? */
1617 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1618 pp->dont_defrag))
1619 return NF_ACCEPT;
1620
1621 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1622 "Checking incoming ICMP for");
1623
1624 offset2 = offset;
1625 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
1626 offset = ciph.len;
1627
1628 /* The embedded headers contain source and dest in reverse order.
1629 * For IPIP this is error for request, not for reply.
1630 */
1631 cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
1632
1633 if (!cp) {
1634 int v;
1635
1636 if (!sysctl_schedule_icmp(ipvs))
1637 return NF_ACCEPT;
1638
1639 if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
1640 return v;
1641 new_cp = true;
1642 }
1643
1644 verdict = NF_DROP;
1645
1646 /* Ensure the checksum is correct */
1647 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1648 /* Failed checksum! */
1649 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1650 &iph->saddr);
1651 goto out;
1652 }
1653
1654 if (ipip) {
1655 __be32 info = ic->un.gateway;
1656 __u8 type = ic->type;
1657 __u8 code = ic->code;
1658
1659 /* Update the MTU */
1660 if (ic->type == ICMP_DEST_UNREACH &&
1661 ic->code == ICMP_FRAG_NEEDED) {
1662 struct ip_vs_dest *dest = cp->dest;
1663 u32 mtu = ntohs(ic->un.frag.mtu);
1664 __be16 frag_off = cih->frag_off;
1665
1666 /* Strip outer IP and ICMP, go to IPIP header */
1667 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1668 goto ignore_ipip;
1669 offset2 -= ihl + sizeof(_icmph);
1670 skb_reset_network_header(skb);
1671 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1672 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1673 ipv4_update_pmtu(skb, ipvs->net,
1674 mtu, 0, 0, 0, 0);
1675 /* Client uses PMTUD? */
1676 if (!(frag_off & htons(IP_DF)))
1677 goto ignore_ipip;
1678 /* Prefer the resulting PMTU */
1679 if (dest) {
1680 struct ip_vs_dest_dst *dest_dst;
1681
1682 rcu_read_lock();
1683 dest_dst = rcu_dereference(dest->dest_dst);
1684 if (dest_dst)
1685 mtu = dst_mtu(dest_dst->dst_cache);
1686 rcu_read_unlock();
1687 }
1688 if (mtu > 68 + sizeof(struct iphdr))
1689 mtu -= sizeof(struct iphdr);
1690 info = htonl(mtu);
1691 }
1692 /* Strip outer IP, ICMP and IPIP, go to IP header of
1693 * original request.
1694 */
1695 if (pskb_pull(skb, offset2) == NULL)
1696 goto ignore_ipip;
1697 skb_reset_network_header(skb);
1698 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1699 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1700 type, code, ntohl(info));
1701 icmp_send(skb, type, code, info);
1702 /* ICMP can be shorter but anyways, account it */
1703 ip_vs_out_stats(cp, skb);
1704
1705 ignore_ipip:
1706 consume_skb(skb);
1707 verdict = NF_STOLEN;
1708 goto out;
1709 }
1710
1711 /* do the statistics and put it back */
1712 ip_vs_in_stats(cp, skb);
1713 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1714 IPPROTO_SCTP == cih->protocol)
1715 offset += 2 * sizeof(__u16);
1716 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1717
1718 out:
1719 if (likely(!new_cp))
1720 __ip_vs_conn_put(cp);
1721 else
1722 ip_vs_conn_put(cp);
1723
1724 return verdict;
1725 }
1726
1727 #ifdef CONFIG_IP_VS_IPV6
1728 static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
1729 int *related, unsigned int hooknum,
1730 struct ip_vs_iphdr *iph)
1731 {
1732 struct icmp6hdr _icmph, *ic;
1733 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1734 struct ip_vs_conn *cp;
1735 struct ip_vs_protocol *pp;
1736 struct ip_vs_proto_data *pd;
1737 unsigned int offset, verdict;
1738 bool new_cp = false;
1739
1740 *related = 1;
1741
1742 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1743 if (ic == NULL)
1744 return NF_DROP;
1745
1746 /*
1747 * Work through seeing if this is for us.
1748 * These checks are supposed to be in an order that means easy
1749 * things are checked first to speed up processing.... however
1750 * this means that some packets will manage to get a long way
1751 * down this stack and then be rejected, but that's life.
1752 */
1753 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1754 *related = 0;
1755 return NF_ACCEPT;
1756 }
1757 /* Fragment header that is before ICMP header tells us that:
1758 * it's not an error message since they can't be fragmented.
1759 */
1760 if (iph->flags & IP6_FH_F_FRAG)
1761 return NF_DROP;
1762
1763 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1764 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1765 &iph->saddr, &iph->daddr);
1766
1767 offset = iph->len + sizeof(_icmph);
1768 if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
1769 return NF_ACCEPT;
1770
1771 pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
1772 if (!pd)
1773 return NF_ACCEPT;
1774 pp = pd->pp;
1775
1776 /* Cannot handle fragmented embedded protocol */
1777 if (ciph.fragoffs)
1778 return NF_ACCEPT;
1779
1780 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1781 "Checking incoming ICMPv6 for");
1782
1783 /* The embedded headers contain source and dest in reverse order
1784 * if not from localhost
1785 */
1786 cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
1787
1788 if (!cp) {
1789 int v;
1790
1791 if (!sysctl_schedule_icmp(ipvs))
1792 return NF_ACCEPT;
1793
1794 if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
1795 return v;
1796
1797 new_cp = true;
1798 }
1799
1800 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1801 if ((hooknum == NF_INET_LOCAL_OUT) &&
1802 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1803 verdict = NF_ACCEPT;
1804 goto out;
1805 }
1806
1807 /* do the statistics and put it back */
1808 ip_vs_in_stats(cp, skb);
1809
1810 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1811 offset = ciph.len;
1812 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1813 IPPROTO_SCTP == ciph.protocol)
1814 offset += 2 * sizeof(__u16); /* Also mangle ports */
1815
1816 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
1817
1818 out:
1819 if (likely(!new_cp))
1820 __ip_vs_conn_put(cp);
1821 else
1822 ip_vs_conn_put(cp);
1823
1824 return verdict;
1825 }
1826 #endif
1827
1828
1829 /*
1830 * Check if it's for virtual services, look it up,
1831 * and send it on its way...
1832 */
1833 static unsigned int
1834 ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1835 {
1836 struct ip_vs_iphdr iph;
1837 struct ip_vs_protocol *pp;
1838 struct ip_vs_proto_data *pd;
1839 struct ip_vs_conn *cp;
1840 int ret, pkts;
1841 int conn_reuse_mode;
1842 struct sock *sk;
1843
1844 /* Already marked as IPVS request or reply? */
1845 if (skb->ipvs_property)
1846 return NF_ACCEPT;
1847
1848 /*
1849 * Big tappo:
1850 * - remote client: only PACKET_HOST
1851 * - route: used for struct net when skb->dev is unset
1852 */
1853 if (unlikely((skb->pkt_type != PACKET_HOST &&
1854 hooknum != NF_INET_LOCAL_OUT) ||
1855 !skb_dst(skb))) {
1856 ip_vs_fill_iph_skb(af, skb, false, &iph);
1857 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1858 " ignored in hook %u\n",
1859 skb->pkt_type, iph.protocol,
1860 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1861 return NF_ACCEPT;
1862 }
1863 /* ipvs enabled in this netns ? */
1864 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1865 return NF_ACCEPT;
1866
1867 ip_vs_fill_iph_skb(af, skb, false, &iph);
1868
1869 /* Bad... Do not break raw sockets */
1870 sk = skb_to_full_sk(skb);
1871 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1872 af == AF_INET)) {
1873
1874 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1875 return NF_ACCEPT;
1876 }
1877
1878 #ifdef CONFIG_IP_VS_IPV6
1879 if (af == AF_INET6) {
1880 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1881 int related;
1882 int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
1883 hooknum, &iph);
1884
1885 if (related)
1886 return verdict;
1887 }
1888 } else
1889 #endif
1890 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1891 int related;
1892 int verdict = ip_vs_in_icmp(ipvs, skb, &related,
1893 hooknum);
1894
1895 if (related)
1896 return verdict;
1897 }
1898
1899 /* Protocol supported? */
1900 pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1901 if (unlikely(!pd)) {
1902 /* The only way we'll see this packet again is if it's
1903 * encapsulated, so mark it with ipvs_property=1 so we
1904 * skip it if we're ignoring tunneled packets
1905 */
1906 if (sysctl_ignore_tunneled(ipvs))
1907 skb->ipvs_property = 1;
1908
1909 return NF_ACCEPT;
1910 }
1911 pp = pd->pp;
1912 /*
1913 * Check if the packet belongs to an existing connection entry
1914 */
1915 cp = pp->conn_in_get(ipvs, af, skb, &iph);
1916
1917 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
1918 if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
1919 bool uses_ct = false, resched = false;
1920
1921 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
1922 unlikely(!atomic_read(&cp->dest->weight))) {
1923 resched = true;
1924 uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
1925 } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
1926 uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
1927 if (!atomic_read(&cp->n_control)) {
1928 resched = true;
1929 } else {
1930 /* Do not reschedule controlling connection
1931 * that uses conntrack while it is still
1932 * referenced by controlled connection(s).
1933 */
1934 resched = !uses_ct;
1935 }
1936 }
1937
1938 if (resched) {
1939 if (!atomic_read(&cp->n_control))
1940 ip_vs_conn_expire_now(cp);
1941 __ip_vs_conn_put(cp);
1942 if (uses_ct)
1943 return NF_DROP;
1944 cp = NULL;
1945 }
1946 }
1947
1948 if (unlikely(!cp)) {
1949 int v;
1950
1951 if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
1952 return v;
1953 }
1954
1955 IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
1956
1957 /* Check the server status */
1958 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1959 /* the destination server is not available */
1960
1961 if (sysctl_expire_nodest_conn(ipvs)) {
1962 /* try to expire the connection immediately */
1963 ip_vs_conn_expire_now(cp);
1964 }
1965 /* don't restart its timer, and silently
1966 drop the packet. */
1967 __ip_vs_conn_put(cp);
1968 return NF_DROP;
1969 }
1970
1971 ip_vs_in_stats(cp, skb);
1972 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1973 if (cp->packet_xmit)
1974 ret = cp->packet_xmit(skb, cp, pp, &iph);
1975 /* do not touch skb anymore */
1976 else {
1977 IP_VS_DBG_RL("warning: packet_xmit is null");
1978 ret = NF_ACCEPT;
1979 }
1980
1981 /* Increase its packet counter and check if it is needed
1982 * to be synchronized
1983 *
1984 * Sync connection if it is about to close to
1985 * encorage the standby servers to update the connections timeout
1986 *
1987 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1988 */
1989
1990 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1991 pkts = sysctl_sync_threshold(ipvs);
1992 else
1993 pkts = atomic_add_return(1, &cp->in_pkts);
1994
1995 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1996 ip_vs_sync_conn(ipvs, cp, pkts);
1997 else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
1998 /* increment is done inside ip_vs_sync_conn too */
1999 atomic_inc(&cp->control->in_pkts);
2000
2001 ip_vs_conn_put(cp);
2002 return ret;
2003 }
2004
2005 /*
2006 * AF_INET handler in NF_INET_LOCAL_IN chain
2007 * Schedule and forward packets from remote clients
2008 */
2009 static unsigned int
2010 ip_vs_remote_request4(void *priv, struct sk_buff *skb,
2011 const struct nf_hook_state *state)
2012 {
2013 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
2014 }
2015
2016 /*
2017 * AF_INET handler in NF_INET_LOCAL_OUT chain
2018 * Schedule and forward packets from local clients
2019 */
2020 static unsigned int
2021 ip_vs_local_request4(void *priv, struct sk_buff *skb,
2022 const struct nf_hook_state *state)
2023 {
2024 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
2025 }
2026
2027 #ifdef CONFIG_IP_VS_IPV6
2028
2029 /*
2030 * AF_INET6 handler in NF_INET_LOCAL_IN chain
2031 * Schedule and forward packets from remote clients
2032 */
2033 static unsigned int
2034 ip_vs_remote_request6(void *priv, struct sk_buff *skb,
2035 const struct nf_hook_state *state)
2036 {
2037 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
2038 }
2039
2040 /*
2041 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
2042 * Schedule and forward packets from local clients
2043 */
2044 static unsigned int
2045 ip_vs_local_request6(void *priv, struct sk_buff *skb,
2046 const struct nf_hook_state *state)
2047 {
2048 return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
2049 }
2050
2051 #endif
2052
2053
2054 /*
2055 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
2056 * related packets destined for 0.0.0.0/0.
2057 * When fwmark-based virtual service is used, such as transparent
2058 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
2059 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
2060 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
2061 * and send them to ip_vs_in_icmp.
2062 */
2063 static unsigned int
2064 ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
2065 const struct nf_hook_state *state)
2066 {
2067 int r;
2068 struct netns_ipvs *ipvs = net_ipvs(state->net);
2069
2070 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
2071 return NF_ACCEPT;
2072
2073 /* ipvs enabled in this netns ? */
2074 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
2075 return NF_ACCEPT;
2076
2077 return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
2078 }
2079
2080 #ifdef CONFIG_IP_VS_IPV6
2081 static unsigned int
2082 ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
2083 const struct nf_hook_state *state)
2084 {
2085 int r;
2086 struct netns_ipvs *ipvs = net_ipvs(state->net);
2087 struct ip_vs_iphdr iphdr;
2088
2089 ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
2090 if (iphdr.protocol != IPPROTO_ICMPV6)
2091 return NF_ACCEPT;
2092
2093 /* ipvs enabled in this netns ? */
2094 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
2095 return NF_ACCEPT;
2096
2097 return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
2098 }
2099 #endif
2100
2101
2102 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
2103 /* After packet filtering, change source only for VS/NAT */
2104 {
2105 .hook = ip_vs_reply4,
2106 .pf = NFPROTO_IPV4,
2107 .hooknum = NF_INET_LOCAL_IN,
2108 .priority = NF_IP_PRI_NAT_SRC - 2,
2109 },
2110 /* After packet filtering, forward packet through VS/DR, VS/TUN,
2111 * or VS/NAT(change destination), so that filtering rules can be
2112 * applied to IPVS. */
2113 {
2114 .hook = ip_vs_remote_request4,
2115 .pf = NFPROTO_IPV4,
2116 .hooknum = NF_INET_LOCAL_IN,
2117 .priority = NF_IP_PRI_NAT_SRC - 1,
2118 },
2119 /* Before ip_vs_in, change source only for VS/NAT */
2120 {
2121 .hook = ip_vs_local_reply4,
2122 .pf = NFPROTO_IPV4,
2123 .hooknum = NF_INET_LOCAL_OUT,
2124 .priority = NF_IP_PRI_NAT_DST + 1,
2125 },
2126 /* After mangle, schedule and forward local requests */
2127 {
2128 .hook = ip_vs_local_request4,
2129 .pf = NFPROTO_IPV4,
2130 .hooknum = NF_INET_LOCAL_OUT,
2131 .priority = NF_IP_PRI_NAT_DST + 2,
2132 },
2133 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2134 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2135 {
2136 .hook = ip_vs_forward_icmp,
2137 .pf = NFPROTO_IPV4,
2138 .hooknum = NF_INET_FORWARD,
2139 .priority = 99,
2140 },
2141 /* After packet filtering, change source only for VS/NAT */
2142 {
2143 .hook = ip_vs_reply4,
2144 .pf = NFPROTO_IPV4,
2145 .hooknum = NF_INET_FORWARD,
2146 .priority = 100,
2147 },
2148 #ifdef CONFIG_IP_VS_IPV6
2149 /* After packet filtering, change source only for VS/NAT */
2150 {
2151 .hook = ip_vs_reply6,
2152 .pf = NFPROTO_IPV6,
2153 .hooknum = NF_INET_LOCAL_IN,
2154 .priority = NF_IP6_PRI_NAT_SRC - 2,
2155 },
2156 /* After packet filtering, forward packet through VS/DR, VS/TUN,
2157 * or VS/NAT(change destination), so that filtering rules can be
2158 * applied to IPVS. */
2159 {
2160 .hook = ip_vs_remote_request6,
2161 .pf = NFPROTO_IPV6,
2162 .hooknum = NF_INET_LOCAL_IN,
2163 .priority = NF_IP6_PRI_NAT_SRC - 1,
2164 },
2165 /* Before ip_vs_in, change source only for VS/NAT */
2166 {
2167 .hook = ip_vs_local_reply6,
2168 .pf = NFPROTO_IPV6,
2169 .hooknum = NF_INET_LOCAL_OUT,
2170 .priority = NF_IP6_PRI_NAT_DST + 1,
2171 },
2172 /* After mangle, schedule and forward local requests */
2173 {
2174 .hook = ip_vs_local_request6,
2175 .pf = NFPROTO_IPV6,
2176 .hooknum = NF_INET_LOCAL_OUT,
2177 .priority = NF_IP6_PRI_NAT_DST + 2,
2178 },
2179 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
2180 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2181 {
2182 .hook = ip_vs_forward_icmp_v6,
2183 .pf = NFPROTO_IPV6,
2184 .hooknum = NF_INET_FORWARD,
2185 .priority = 99,
2186 },
2187 /* After packet filtering, change source only for VS/NAT */
2188 {
2189 .hook = ip_vs_reply6,
2190 .pf = NFPROTO_IPV6,
2191 .hooknum = NF_INET_FORWARD,
2192 .priority = 100,
2193 },
2194 #endif
2195 };
2196 /*
2197 * Initialize IP Virtual Server netns mem.
2198 */
2199 static int __net_init __ip_vs_init(struct net *net)
2200 {
2201 struct netns_ipvs *ipvs;
2202
2203 ipvs = net_generic(net, ip_vs_net_id);
2204 if (ipvs == NULL)
2205 return -ENOMEM;
2206
2207 /* Hold the beast until a service is registerd */
2208 ipvs->enable = 0;
2209 ipvs->net = net;
2210 /* Counters used for creating unique names */
2211 ipvs->gen = atomic_read(&ipvs_netns_cnt);
2212 atomic_inc(&ipvs_netns_cnt);
2213 net->ipvs = ipvs;
2214
2215 if (ip_vs_estimator_net_init(ipvs) < 0)
2216 goto estimator_fail;
2217
2218 if (ip_vs_control_net_init(ipvs) < 0)
2219 goto control_fail;
2220
2221 if (ip_vs_protocol_net_init(ipvs) < 0)
2222 goto protocol_fail;
2223
2224 if (ip_vs_app_net_init(ipvs) < 0)
2225 goto app_fail;
2226
2227 if (ip_vs_conn_net_init(ipvs) < 0)
2228 goto conn_fail;
2229
2230 if (ip_vs_sync_net_init(ipvs) < 0)
2231 goto sync_fail;
2232
2233 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2234 sizeof(struct netns_ipvs), ipvs->gen);
2235 return 0;
2236 /*
2237 * Error handling
2238 */
2239
2240 sync_fail:
2241 ip_vs_conn_net_cleanup(ipvs);
2242 conn_fail:
2243 ip_vs_app_net_cleanup(ipvs);
2244 app_fail:
2245 ip_vs_protocol_net_cleanup(ipvs);
2246 protocol_fail:
2247 ip_vs_control_net_cleanup(ipvs);
2248 control_fail:
2249 ip_vs_estimator_net_cleanup(ipvs);
2250 estimator_fail:
2251 net->ipvs = NULL;
2252 return -ENOMEM;
2253 }
2254
2255 static void __net_exit __ip_vs_cleanup(struct net *net)
2256 {
2257 struct netns_ipvs *ipvs = net_ipvs(net);
2258
2259 ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
2260 ip_vs_conn_net_cleanup(ipvs);
2261 ip_vs_app_net_cleanup(ipvs);
2262 ip_vs_protocol_net_cleanup(ipvs);
2263 ip_vs_control_net_cleanup(ipvs);
2264 ip_vs_estimator_net_cleanup(ipvs);
2265 IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
2266 net->ipvs = NULL;
2267 }
2268
2269 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2270 {
2271 struct netns_ipvs *ipvs = net_ipvs(net);
2272 EnterFunction(2);
2273 ipvs->enable = 0; /* Disable packet reception */
2274 smp_wmb();
2275 ip_vs_sync_net_cleanup(ipvs);
2276 LeaveFunction(2);
2277 }
2278
2279 static struct pernet_operations ipvs_core_ops = {
2280 .init = __ip_vs_init,
2281 .exit = __ip_vs_cleanup,
2282 .id = &ip_vs_net_id,
2283 .size = sizeof(struct netns_ipvs),
2284 };
2285
2286 static struct pernet_operations ipvs_core_dev_ops = {
2287 .exit = __ip_vs_dev_cleanup,
2288 };
2289
2290 /*
2291 * Initialize IP Virtual Server
2292 */
2293 static int __init ip_vs_init(void)
2294 {
2295 int ret;
2296
2297 ret = ip_vs_control_init();
2298 if (ret < 0) {
2299 pr_err("can't setup control.\n");
2300 goto exit;
2301 }
2302
2303 ip_vs_protocol_init();
2304
2305 ret = ip_vs_conn_init();
2306 if (ret < 0) {
2307 pr_err("can't setup connection table.\n");
2308 goto cleanup_protocol;
2309 }
2310
2311 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2312 if (ret < 0)
2313 goto cleanup_conn;
2314
2315 ret = register_pernet_device(&ipvs_core_dev_ops);
2316 if (ret < 0)
2317 goto cleanup_sub;
2318
2319 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2320 if (ret < 0) {
2321 pr_err("can't register hooks.\n");
2322 goto cleanup_dev;
2323 }
2324
2325 ret = ip_vs_register_nl_ioctl();
2326 if (ret < 0) {
2327 pr_err("can't register netlink/ioctl.\n");
2328 goto cleanup_hooks;
2329 }
2330
2331 pr_info("ipvs loaded.\n");
2332
2333 return ret;
2334
2335 cleanup_hooks:
2336 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2337 cleanup_dev:
2338 unregister_pernet_device(&ipvs_core_dev_ops);
2339 cleanup_sub:
2340 unregister_pernet_subsys(&ipvs_core_ops);
2341 cleanup_conn:
2342 ip_vs_conn_cleanup();
2343 cleanup_protocol:
2344 ip_vs_protocol_cleanup();
2345 ip_vs_control_cleanup();
2346 exit:
2347 return ret;
2348 }
2349
2350 static void __exit ip_vs_cleanup(void)
2351 {
2352 ip_vs_unregister_nl_ioctl();
2353 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2354 unregister_pernet_device(&ipvs_core_dev_ops);
2355 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2356 ip_vs_conn_cleanup();
2357 ip_vs_protocol_cleanup();
2358 ip_vs_control_cleanup();
2359 pr_info("ipvs unloaded.\n");
2360 }
2361
2362 module_init(ip_vs_init);
2363 module_exit(ip_vs_cleanup);
2364 MODULE_LICENSE("GPL");
This page took 0.081394 seconds and 5 git commands to generate.