2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
27 #include <linux/module.h>
28 #include <linux/kernel.h>
30 #include <linux/tcp.h>
31 #include <linux/icmp.h>
36 #include <net/icmp.h> /* for icmp_send */
37 #include <net/route.h>
39 #include <linux/netfilter.h>
40 #include <linux/netfilter_ipv4.h>
42 #include <net/ip_vs.h>
45 EXPORT_SYMBOL(register_ip_vs_scheduler
);
46 EXPORT_SYMBOL(unregister_ip_vs_scheduler
);
47 EXPORT_SYMBOL(ip_vs_skb_replace
);
48 EXPORT_SYMBOL(ip_vs_proto_name
);
49 EXPORT_SYMBOL(ip_vs_conn_new
);
50 EXPORT_SYMBOL(ip_vs_conn_in_get
);
51 EXPORT_SYMBOL(ip_vs_conn_out_get
);
52 #ifdef CONFIG_IP_VS_PROTO_TCP
53 EXPORT_SYMBOL(ip_vs_tcp_conn_listen
);
55 EXPORT_SYMBOL(ip_vs_conn_put
);
56 #ifdef CONFIG_IP_VS_DEBUG
57 EXPORT_SYMBOL(ip_vs_get_debug_level
);
61 /* ID used in ICMP lookups */
62 #define icmp_id(icmph) (((icmph)->un).echo.id)
64 const char *ip_vs_proto_name(unsigned proto
)
78 sprintf(buf
, "IP_%d", proto
);
83 void ip_vs_init_hash_table(struct list_head
*table
, int rows
)
86 INIT_LIST_HEAD(&table
[rows
]);
90 ip_vs_in_stats(struct ip_vs_conn
*cp
, struct sk_buff
*skb
)
92 struct ip_vs_dest
*dest
= cp
->dest
;
93 if (dest
&& (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
94 spin_lock(&dest
->stats
.lock
);
96 dest
->stats
.inbytes
+= skb
->len
;
97 spin_unlock(&dest
->stats
.lock
);
99 spin_lock(&dest
->svc
->stats
.lock
);
100 dest
->svc
->stats
.inpkts
++;
101 dest
->svc
->stats
.inbytes
+= skb
->len
;
102 spin_unlock(&dest
->svc
->stats
.lock
);
104 spin_lock(&ip_vs_stats
.lock
);
105 ip_vs_stats
.inpkts
++;
106 ip_vs_stats
.inbytes
+= skb
->len
;
107 spin_unlock(&ip_vs_stats
.lock
);
113 ip_vs_out_stats(struct ip_vs_conn
*cp
, struct sk_buff
*skb
)
115 struct ip_vs_dest
*dest
= cp
->dest
;
116 if (dest
&& (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
117 spin_lock(&dest
->stats
.lock
);
118 dest
->stats
.outpkts
++;
119 dest
->stats
.outbytes
+= skb
->len
;
120 spin_unlock(&dest
->stats
.lock
);
122 spin_lock(&dest
->svc
->stats
.lock
);
123 dest
->svc
->stats
.outpkts
++;
124 dest
->svc
->stats
.outbytes
+= skb
->len
;
125 spin_unlock(&dest
->svc
->stats
.lock
);
127 spin_lock(&ip_vs_stats
.lock
);
128 ip_vs_stats
.outpkts
++;
129 ip_vs_stats
.outbytes
+= skb
->len
;
130 spin_unlock(&ip_vs_stats
.lock
);
136 ip_vs_conn_stats(struct ip_vs_conn
*cp
, struct ip_vs_service
*svc
)
138 spin_lock(&cp
->dest
->stats
.lock
);
139 cp
->dest
->stats
.conns
++;
140 spin_unlock(&cp
->dest
->stats
.lock
);
142 spin_lock(&svc
->stats
.lock
);
144 spin_unlock(&svc
->stats
.lock
);
146 spin_lock(&ip_vs_stats
.lock
);
148 spin_unlock(&ip_vs_stats
.lock
);
153 ip_vs_set_state(struct ip_vs_conn
*cp
, int direction
,
154 const struct sk_buff
*skb
,
155 struct ip_vs_protocol
*pp
)
157 if (unlikely(!pp
->state_transition
))
159 return pp
->state_transition(cp
, direction
, skb
, pp
);
164 * IPVS persistent scheduling function
165 * It creates a connection entry according to its template if exists,
166 * or selects a server and creates a connection entry plus a template.
167 * Locking: we are svc user (svc->refcnt), so we hold all dests too
168 * Protocols supported: TCP, UDP
170 static struct ip_vs_conn
*
171 ip_vs_sched_persist(struct ip_vs_service
*svc
,
172 const struct sk_buff
*skb
,
175 struct ip_vs_conn
*cp
= NULL
;
176 struct iphdr
*iph
= ip_hdr(skb
);
177 struct ip_vs_dest
*dest
;
178 struct ip_vs_conn
*ct
;
179 __be16 dport
; /* destination port to forward */
180 __be32 snet
; /* source network of the client, after masking */
182 /* Mask saddr with the netmask to adjust template granularity */
183 snet
= iph
->saddr
& svc
->netmask
;
185 IP_VS_DBG(6, "p-schedule: src %u.%u.%u.%u:%u dest %u.%u.%u.%u:%u "
186 "mnet %u.%u.%u.%u\n",
187 NIPQUAD(iph
->saddr
), ntohs(ports
[0]),
188 NIPQUAD(iph
->daddr
), ntohs(ports
[1]),
192 * As far as we know, FTP is a very complicated network protocol, and
193 * it uses control connection and data connections. For active FTP,
194 * FTP server initialize data connection to the client, its source port
195 * is often 20. For passive FTP, FTP server tells the clients the port
196 * that it passively listens to, and the client issues the data
197 * connection. In the tunneling or direct routing mode, the load
198 * balancer is on the client-to-server half of connection, the port
199 * number is unknown to the load balancer. So, a conn template like
200 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
201 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
202 * is created for other persistent services.
204 if (ports
[1] == svc
->port
) {
205 /* Check if a template already exists */
206 if (svc
->port
!= FTPPORT
)
207 ct
= ip_vs_ct_in_get(iph
->protocol
, snet
, 0,
208 iph
->daddr
, ports
[1]);
210 ct
= ip_vs_ct_in_get(iph
->protocol
, snet
, 0,
213 if (!ct
|| !ip_vs_check_template(ct
)) {
215 * No template found or the dest of the connection
216 * template is not available.
218 dest
= svc
->scheduler
->schedule(svc
, skb
);
220 IP_VS_DBG(1, "p-schedule: no dest found.\n");
225 * Create a template like <protocol,caddr,0,
226 * vaddr,vport,daddr,dport> for non-ftp service,
227 * and <protocol,caddr,0,vaddr,0,daddr,0>
230 if (svc
->port
!= FTPPORT
)
231 ct
= ip_vs_conn_new(iph
->protocol
,
235 dest
->addr
, dest
->port
,
236 IP_VS_CONN_F_TEMPLATE
,
239 ct
= ip_vs_conn_new(iph
->protocol
,
243 IP_VS_CONN_F_TEMPLATE
,
248 ct
->timeout
= svc
->timeout
;
250 /* set destination with the found template */
256 * Note: persistent fwmark-based services and persistent
257 * port zero service are handled here.
258 * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
259 * port zero template: <protocol,caddr,0,vaddr,0,daddr,0>
262 ct
= ip_vs_ct_in_get(IPPROTO_IP
, snet
, 0,
263 htonl(svc
->fwmark
), 0);
265 ct
= ip_vs_ct_in_get(iph
->protocol
, snet
, 0,
268 if (!ct
|| !ip_vs_check_template(ct
)) {
270 * If it is not persistent port zero, return NULL,
271 * otherwise create a connection template.
276 dest
= svc
->scheduler
->schedule(svc
, skb
);
278 IP_VS_DBG(1, "p-schedule: no dest found.\n");
283 * Create a template according to the service
286 ct
= ip_vs_conn_new(IPPROTO_IP
,
288 htonl(svc
->fwmark
), 0,
290 IP_VS_CONN_F_TEMPLATE
,
293 ct
= ip_vs_conn_new(iph
->protocol
,
297 IP_VS_CONN_F_TEMPLATE
,
302 ct
->timeout
= svc
->timeout
;
304 /* set destination with the found template */
311 * Create a new connection according to the template
313 cp
= ip_vs_conn_new(iph
->protocol
,
314 iph
->saddr
, ports
[0],
315 iph
->daddr
, ports
[1],
327 ip_vs_control_add(cp
, ct
);
330 ip_vs_conn_stats(cp
, svc
);
336 * IPVS main scheduling function
337 * It selects a server according to the virtual service, and
338 * creates a connection entry.
339 * Protocols supported: TCP, UDP
342 ip_vs_schedule(struct ip_vs_service
*svc
, const struct sk_buff
*skb
)
344 struct ip_vs_conn
*cp
= NULL
;
345 struct iphdr
*iph
= ip_hdr(skb
);
346 struct ip_vs_dest
*dest
;
347 __be16 _ports
[2], *pptr
;
349 pptr
= skb_header_pointer(skb
, iph
->ihl
*4,
350 sizeof(_ports
), _ports
);
357 if (svc
->flags
& IP_VS_SVC_F_PERSISTENT
)
358 return ip_vs_sched_persist(svc
, skb
, pptr
);
361 * Non-persistent service
363 if (!svc
->fwmark
&& pptr
[1] != svc
->port
) {
365 IP_VS_ERR("Schedule: port zero only supported "
366 "in persistent services, "
367 "check your ipvs configuration\n");
371 dest
= svc
->scheduler
->schedule(svc
, skb
);
373 IP_VS_DBG(1, "Schedule: no dest found.\n");
378 * Create a connection entry.
380 cp
= ip_vs_conn_new(iph
->protocol
,
383 dest
->addr
, dest
->port
?dest
->port
:pptr
[1],
389 IP_VS_DBG(6, "Schedule fwd:%c c:%u.%u.%u.%u:%u v:%u.%u.%u.%u:%u "
390 "d:%u.%u.%u.%u:%u conn->flags:%X conn->refcnt:%d\n",
392 NIPQUAD(cp
->caddr
), ntohs(cp
->cport
),
393 NIPQUAD(cp
->vaddr
), ntohs(cp
->vport
),
394 NIPQUAD(cp
->daddr
), ntohs(cp
->dport
),
395 cp
->flags
, atomic_read(&cp
->refcnt
));
397 ip_vs_conn_stats(cp
, svc
);
403 * Pass or drop the packet.
404 * Called by ip_vs_in, when the virtual service is available but
405 * no destination is available for a new connection.
407 int ip_vs_leave(struct ip_vs_service
*svc
, struct sk_buff
*skb
,
408 struct ip_vs_protocol
*pp
)
410 __be16 _ports
[2], *pptr
;
411 struct iphdr
*iph
= ip_hdr(skb
);
413 pptr
= skb_header_pointer(skb
, iph
->ihl
*4,
414 sizeof(_ports
), _ports
);
416 ip_vs_service_put(svc
);
420 /* if it is fwmark-based service, the cache_bypass sysctl is up
421 and the destination is RTN_UNICAST (and not local), then create
422 a cache_bypass connection entry */
423 if (sysctl_ip_vs_cache_bypass
&& svc
->fwmark
424 && (inet_addr_type(&init_net
, iph
->daddr
) == RTN_UNICAST
)) {
426 struct ip_vs_conn
*cp
;
428 ip_vs_service_put(svc
);
430 /* create a new connection entry */
431 IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
432 cp
= ip_vs_conn_new(iph
->protocol
,
442 ip_vs_in_stats(cp
, skb
);
445 cs
= ip_vs_set_state(cp
, IP_VS_DIR_INPUT
, skb
, pp
);
447 /* transmit the first SYN packet */
448 ret
= cp
->packet_xmit(skb
, cp
, pp
);
449 /* do not touch skb anymore */
451 atomic_inc(&cp
->in_pkts
);
457 * When the virtual ftp service is presented, packets destined
458 * for other services on the VIP may get here (except services
459 * listed in the ipvs table), pass the packets, because it is
460 * not ipvs job to decide to drop the packets.
462 if ((svc
->port
== FTPPORT
) && (pptr
[1] != FTPPORT
)) {
463 ip_vs_service_put(svc
);
467 ip_vs_service_put(svc
);
470 * Notify the client that the destination is unreachable, and
471 * release the socket buffer.
472 * Since it is in IP layer, the TCP socket is not actually
473 * created, the TCP RST packet cannot be sent, instead that
474 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
476 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
482 * It is hooked before NF_IP_PRI_NAT_SRC at the NF_INET_POST_ROUTING
483 * chain, and is used for VS/NAT.
484 * It detects packets for VS/NAT connections and sends the packets
485 * immediately. This can avoid that iptable_nat mangles the packets
488 static unsigned int ip_vs_post_routing(unsigned int hooknum
,
490 const struct net_device
*in
,
491 const struct net_device
*out
,
492 int (*okfn
)(struct sk_buff
*))
494 if (!skb
->ipvs_property
)
496 /* The packet was sent from IPVS, exit this chain */
500 __sum16
ip_vs_checksum_complete(struct sk_buff
*skb
, int offset
)
502 return csum_fold(skb_checksum(skb
, offset
, skb
->len
- offset
, 0));
505 static inline int ip_vs_gather_frags(struct sk_buff
*skb
, u_int32_t user
)
507 int err
= ip_defrag(skb
, user
);
510 ip_send_check(ip_hdr(skb
));
516 * Packet has been made sufficiently writable in caller
517 * - inout: 1=in->out, 0=out->in
519 void ip_vs_nat_icmp(struct sk_buff
*skb
, struct ip_vs_protocol
*pp
,
520 struct ip_vs_conn
*cp
, int inout
)
522 struct iphdr
*iph
= ip_hdr(skb
);
523 unsigned int icmp_offset
= iph
->ihl
*4;
524 struct icmphdr
*icmph
= (struct icmphdr
*)(skb_network_header(skb
) +
526 struct iphdr
*ciph
= (struct iphdr
*)(icmph
+ 1);
529 iph
->saddr
= cp
->vaddr
;
531 ciph
->daddr
= cp
->vaddr
;
534 iph
->daddr
= cp
->daddr
;
536 ciph
->saddr
= cp
->daddr
;
540 /* the TCP/UDP port */
541 if (IPPROTO_TCP
== ciph
->protocol
|| IPPROTO_UDP
== ciph
->protocol
) {
542 __be16
*ports
= (void *)ciph
+ ciph
->ihl
*4;
545 ports
[1] = cp
->vport
;
547 ports
[0] = cp
->dport
;
550 /* And finally the ICMP checksum */
552 icmph
->checksum
= ip_vs_checksum_complete(skb
, icmp_offset
);
553 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
556 IP_VS_DBG_PKT(11, pp
, skb
, (void *)ciph
- (void *)iph
,
557 "Forwarding altered outgoing ICMP");
559 IP_VS_DBG_PKT(11, pp
, skb
, (void *)ciph
- (void *)iph
,
560 "Forwarding altered incoming ICMP");
564 * Handle ICMP messages in the inside-to-outside direction (outgoing).
565 * Find any that might be relevant, check against existing connections,
566 * forward to the right destination host if relevant.
567 * Currently handles error types - unreachable, quench, ttl exceeded.
568 * (Only used in VS/NAT)
570 static int ip_vs_out_icmp(struct sk_buff
*skb
, int *related
)
573 struct icmphdr _icmph
, *ic
;
574 struct iphdr _ciph
, *cih
; /* The ip header contained within the ICMP */
575 struct ip_vs_conn
*cp
;
576 struct ip_vs_protocol
*pp
;
577 unsigned int offset
, ihl
, verdict
;
581 /* reassemble IP fragments */
582 if (ip_hdr(skb
)->frag_off
& htons(IP_MF
| IP_OFFSET
)) {
583 if (ip_vs_gather_frags(skb
, IP_DEFRAG_VS_OUT
))
588 offset
= ihl
= iph
->ihl
* 4;
589 ic
= skb_header_pointer(skb
, offset
, sizeof(_icmph
), &_icmph
);
593 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %u.%u.%u.%u->%u.%u.%u.%u\n",
594 ic
->type
, ntohs(icmp_id(ic
)),
595 NIPQUAD(iph
->saddr
), NIPQUAD(iph
->daddr
));
598 * Work through seeing if this is for us.
599 * These checks are supposed to be in an order that means easy
600 * things are checked first to speed up processing.... however
601 * this means that some packets will manage to get a long way
602 * down this stack and then be rejected, but that's life.
604 if ((ic
->type
!= ICMP_DEST_UNREACH
) &&
605 (ic
->type
!= ICMP_SOURCE_QUENCH
) &&
606 (ic
->type
!= ICMP_TIME_EXCEEDED
)) {
611 /* Now find the contained IP header */
612 offset
+= sizeof(_icmph
);
613 cih
= skb_header_pointer(skb
, offset
, sizeof(_ciph
), &_ciph
);
615 return NF_ACCEPT
; /* The packet looks wrong, ignore */
617 pp
= ip_vs_proto_get(cih
->protocol
);
621 /* Is the embedded protocol header present? */
622 if (unlikely(cih
->frag_off
& htons(IP_OFFSET
) &&
626 IP_VS_DBG_PKT(11, pp
, skb
, offset
, "Checking outgoing ICMP for");
628 offset
+= cih
->ihl
* 4;
630 /* The embedded headers contain source and dest in reverse order */
631 cp
= pp
->conn_out_get(skb
, pp
, cih
, offset
, 1);
637 if (IP_VS_FWD_METHOD(cp
) != 0) {
638 IP_VS_ERR("shouldn't reach here, because the box is on the "
639 "half connection in the tun/dr module.\n");
642 /* Ensure the checksum is correct */
643 if (!skb_csum_unnecessary(skb
) && ip_vs_checksum_complete(skb
, ihl
)) {
644 /* Failed checksum! */
645 IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n",
646 NIPQUAD(iph
->saddr
));
650 if (IPPROTO_TCP
== cih
->protocol
|| IPPROTO_UDP
== cih
->protocol
)
651 offset
+= 2 * sizeof(__u16
);
652 if (!skb_make_writable(skb
, offset
))
655 ip_vs_nat_icmp(skb
, pp
, cp
, 1);
657 /* do the statistics and put it back */
658 ip_vs_out_stats(cp
, skb
);
660 skb
->ipvs_property
= 1;
664 __ip_vs_conn_put(cp
);
669 static inline int is_tcp_reset(const struct sk_buff
*skb
)
671 struct tcphdr _tcph
, *th
;
673 th
= skb_header_pointer(skb
, ip_hdrlen(skb
), sizeof(_tcph
), &_tcph
);
680 * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT.
681 * Check if outgoing packet belongs to the established ip_vs_conn,
682 * rewrite addresses of the packet and send it on its way...
685 ip_vs_out(unsigned int hooknum
, struct sk_buff
*skb
,
686 const struct net_device
*in
, const struct net_device
*out
,
687 int (*okfn
)(struct sk_buff
*))
690 struct ip_vs_protocol
*pp
;
691 struct ip_vs_conn
*cp
;
696 if (skb
->ipvs_property
)
700 if (unlikely(iph
->protocol
== IPPROTO_ICMP
)) {
701 int related
, verdict
= ip_vs_out_icmp(skb
, &related
);
708 pp
= ip_vs_proto_get(iph
->protocol
);
712 /* reassemble IP fragments */
713 if (unlikely(iph
->frag_off
& htons(IP_MF
|IP_OFFSET
) &&
715 if (ip_vs_gather_frags(skb
, IP_DEFRAG_VS_OUT
))
723 * Check if the packet belongs to an existing entry
725 cp
= pp
->conn_out_get(skb
, pp
, iph
, ihl
, 0);
728 if (sysctl_ip_vs_nat_icmp_send
&&
729 (pp
->protocol
== IPPROTO_TCP
||
730 pp
->protocol
== IPPROTO_UDP
)) {
731 __be16 _ports
[2], *pptr
;
733 pptr
= skb_header_pointer(skb
, ihl
,
734 sizeof(_ports
), _ports
);
736 return NF_ACCEPT
; /* Not for me */
737 if (ip_vs_lookup_real_service(iph
->protocol
,
738 iph
->saddr
, pptr
[0])) {
740 * Notify the real server: there is no
741 * existing entry if it is not RST
742 * packet or not TCP packet.
744 if (iph
->protocol
!= IPPROTO_TCP
745 || !is_tcp_reset(skb
)) {
746 icmp_send(skb
,ICMP_DEST_UNREACH
,
747 ICMP_PORT_UNREACH
, 0);
752 IP_VS_DBG_PKT(12, pp
, skb
, 0,
753 "packet continues traversal as normal");
757 IP_VS_DBG_PKT(11, pp
, skb
, 0, "Outgoing packet");
759 if (!skb_make_writable(skb
, ihl
))
762 /* mangle the packet */
763 if (pp
->snat_handler
&& !pp
->snat_handler(skb
, pp
, cp
))
765 ip_hdr(skb
)->saddr
= cp
->vaddr
;
766 ip_send_check(ip_hdr(skb
));
768 /* For policy routing, packets originating from this
769 * machine itself may be routed differently to packets
770 * passing through. We want this packet to be routed as
771 * if it came from this machine itself. So re-compute
772 * the routing information.
774 if (ip_route_me_harder(skb
, RTN_LOCAL
) != 0)
777 IP_VS_DBG_PKT(10, pp
, skb
, 0, "After SNAT");
779 ip_vs_out_stats(cp
, skb
);
780 ip_vs_set_state(cp
, IP_VS_DIR_OUTPUT
, skb
, pp
);
783 skb
->ipvs_property
= 1;
796 * Handle ICMP messages in the outside-to-inside direction (incoming).
797 * Find any that might be relevant, check against existing connections,
798 * forward to the right destination host if relevant.
799 * Currently handles error types - unreachable, quench, ttl exceeded.
802 ip_vs_in_icmp(struct sk_buff
*skb
, int *related
, unsigned int hooknum
)
805 struct icmphdr _icmph
, *ic
;
806 struct iphdr _ciph
, *cih
; /* The ip header contained within the ICMP */
807 struct ip_vs_conn
*cp
;
808 struct ip_vs_protocol
*pp
;
809 unsigned int offset
, ihl
, verdict
;
813 /* reassemble IP fragments */
814 if (ip_hdr(skb
)->frag_off
& htons(IP_MF
| IP_OFFSET
)) {
815 if (ip_vs_gather_frags(skb
, hooknum
== NF_INET_LOCAL_IN
?
816 IP_DEFRAG_VS_IN
: IP_DEFRAG_VS_FWD
))
821 offset
= ihl
= iph
->ihl
* 4;
822 ic
= skb_header_pointer(skb
, offset
, sizeof(_icmph
), &_icmph
);
826 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %u.%u.%u.%u->%u.%u.%u.%u\n",
827 ic
->type
, ntohs(icmp_id(ic
)),
828 NIPQUAD(iph
->saddr
), NIPQUAD(iph
->daddr
));
831 * Work through seeing if this is for us.
832 * These checks are supposed to be in an order that means easy
833 * things are checked first to speed up processing.... however
834 * this means that some packets will manage to get a long way
835 * down this stack and then be rejected, but that's life.
837 if ((ic
->type
!= ICMP_DEST_UNREACH
) &&
838 (ic
->type
!= ICMP_SOURCE_QUENCH
) &&
839 (ic
->type
!= ICMP_TIME_EXCEEDED
)) {
844 /* Now find the contained IP header */
845 offset
+= sizeof(_icmph
);
846 cih
= skb_header_pointer(skb
, offset
, sizeof(_ciph
), &_ciph
);
848 return NF_ACCEPT
; /* The packet looks wrong, ignore */
850 pp
= ip_vs_proto_get(cih
->protocol
);
854 /* Is the embedded protocol header present? */
855 if (unlikely(cih
->frag_off
& htons(IP_OFFSET
) &&
859 IP_VS_DBG_PKT(11, pp
, skb
, offset
, "Checking incoming ICMP for");
861 offset
+= cih
->ihl
* 4;
863 /* The embedded headers contain source and dest in reverse order */
864 cp
= pp
->conn_in_get(skb
, pp
, cih
, offset
, 1);
870 /* Ensure the checksum is correct */
871 if (!skb_csum_unnecessary(skb
) && ip_vs_checksum_complete(skb
, ihl
)) {
872 /* Failed checksum! */
873 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %d.%d.%d.%d!\n",
874 NIPQUAD(iph
->saddr
));
878 /* do the statistics and put it back */
879 ip_vs_in_stats(cp
, skb
);
880 if (IPPROTO_TCP
== cih
->protocol
|| IPPROTO_UDP
== cih
->protocol
)
881 offset
+= 2 * sizeof(__u16
);
882 verdict
= ip_vs_icmp_xmit(skb
, cp
, pp
, offset
);
883 /* do not touch skb anymore */
886 __ip_vs_conn_put(cp
);
892 * Check if it's for virtual services, look it up,
893 * and send it on its way...
896 ip_vs_in(unsigned int hooknum
, struct sk_buff
*skb
,
897 const struct net_device
*in
, const struct net_device
*out
,
898 int (*okfn
)(struct sk_buff
*))
901 struct ip_vs_protocol
*pp
;
902 struct ip_vs_conn
*cp
;
907 * Big tappo: only PACKET_HOST (neither loopback nor mcasts)
908 * ... don't know why 1st test DOES NOT include 2nd (?)
910 if (unlikely(skb
->pkt_type
!= PACKET_HOST
911 || skb
->dev
->flags
& IFF_LOOPBACK
|| skb
->sk
)) {
912 IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n",
914 ip_hdr(skb
)->protocol
,
915 NIPQUAD(ip_hdr(skb
)->daddr
));
920 if (unlikely(iph
->protocol
== IPPROTO_ICMP
)) {
921 int related
, verdict
= ip_vs_in_icmp(skb
, &related
, hooknum
);
928 /* Protocol supported? */
929 pp
= ip_vs_proto_get(iph
->protocol
);
936 * Check if the packet belongs to an existing connection entry
938 cp
= pp
->conn_in_get(skb
, pp
, iph
, ihl
, 0);
943 if (!pp
->conn_schedule(skb
, pp
, &v
, &cp
))
948 /* sorry, all this trouble for a no-hit :) */
949 IP_VS_DBG_PKT(12, pp
, skb
, 0,
950 "packet continues traversal as normal");
954 IP_VS_DBG_PKT(11, pp
, skb
, 0, "Incoming packet");
956 /* Check the server status */
957 if (cp
->dest
&& !(cp
->dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
958 /* the destination server is not available */
960 if (sysctl_ip_vs_expire_nodest_conn
) {
961 /* try to expire the connection immediately */
962 ip_vs_conn_expire_now(cp
);
964 /* don't restart its timer, and silently
966 __ip_vs_conn_put(cp
);
970 ip_vs_in_stats(cp
, skb
);
971 restart
= ip_vs_set_state(cp
, IP_VS_DIR_INPUT
, skb
, pp
);
973 ret
= cp
->packet_xmit(skb
, cp
, pp
);
974 /* do not touch skb anymore */
976 IP_VS_DBG_RL("warning: packet_xmit is null");
980 /* Increase its packet counter and check if it is needed
983 * Sync connection if it is about to close to
984 * encorage the standby servers to update the connections timeout
986 atomic_inc(&cp
->in_pkts
);
987 if ((ip_vs_sync_state
& IP_VS_STATE_MASTER
) &&
988 (((cp
->protocol
!= IPPROTO_TCP
||
989 cp
->state
== IP_VS_TCP_S_ESTABLISHED
) &&
990 (atomic_read(&cp
->in_pkts
) % sysctl_ip_vs_sync_threshold
[1]
991 == sysctl_ip_vs_sync_threshold
[0])) ||
992 ((cp
->protocol
== IPPROTO_TCP
) && (cp
->old_state
!= cp
->state
) &&
993 ((cp
->state
== IP_VS_TCP_S_FIN_WAIT
) ||
994 (cp
->state
== IP_VS_TCP_S_CLOSE
)))))
996 cp
->old_state
= cp
->state
;
1004 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1005 * related packets destined for 0.0.0.0/0.
1006 * When fwmark-based virtual service is used, such as transparent
1007 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1008 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1009 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1010 * and send them to ip_vs_in_icmp.
1013 ip_vs_forward_icmp(unsigned int hooknum
, struct sk_buff
*skb
,
1014 const struct net_device
*in
, const struct net_device
*out
,
1015 int (*okfn
)(struct sk_buff
*))
1019 if (ip_hdr(skb
)->protocol
!= IPPROTO_ICMP
)
1022 return ip_vs_in_icmp(skb
, &r
, hooknum
);
1026 static struct nf_hook_ops ip_vs_ops
[] __read_mostly
= {
1027 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1028 * or VS/NAT(change destination), so that filtering rules can be
1029 * applied to IPVS. */
1032 .owner
= THIS_MODULE
,
1034 .hooknum
= NF_INET_LOCAL_IN
,
1037 /* After packet filtering, change source only for VS/NAT */
1040 .owner
= THIS_MODULE
,
1042 .hooknum
= NF_INET_FORWARD
,
1045 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1046 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1048 .hook
= ip_vs_forward_icmp
,
1049 .owner
= THIS_MODULE
,
1051 .hooknum
= NF_INET_FORWARD
,
1054 /* Before the netfilter connection tracking, exit from POST_ROUTING */
1056 .hook
= ip_vs_post_routing
,
1057 .owner
= THIS_MODULE
,
1059 .hooknum
= NF_INET_POST_ROUTING
,
1060 .priority
= NF_IP_PRI_NAT_SRC
-1,
1066 * Initialize IP Virtual Server
1068 static int __init
ip_vs_init(void)
1072 ret
= ip_vs_control_init();
1074 IP_VS_ERR("can't setup control.\n");
1075 goto cleanup_nothing
;
1078 ip_vs_protocol_init();
1080 ret
= ip_vs_app_init();
1082 IP_VS_ERR("can't setup application helper.\n");
1083 goto cleanup_protocol
;
1086 ret
= ip_vs_conn_init();
1088 IP_VS_ERR("can't setup connection table.\n");
1092 ret
= nf_register_hooks(ip_vs_ops
, ARRAY_SIZE(ip_vs_ops
));
1094 IP_VS_ERR("can't register hooks.\n");
1098 IP_VS_INFO("ipvs loaded.\n");
1102 ip_vs_conn_cleanup();
1104 ip_vs_app_cleanup();
1106 ip_vs_protocol_cleanup();
1107 ip_vs_control_cleanup();
1112 static void __exit
ip_vs_cleanup(void)
1114 nf_unregister_hooks(ip_vs_ops
, ARRAY_SIZE(ip_vs_ops
));
1115 ip_vs_conn_cleanup();
1116 ip_vs_app_cleanup();
1117 ip_vs_protocol_cleanup();
1118 ip_vs_control_cleanup();
1119 IP_VS_INFO("ipvs unloaded.\n");
1122 module_init(ip_vs_init
);
1123 module_exit(ip_vs_cleanup
);
1124 MODULE_LICENSE("GPL");