ipvs: support scheduling inverse and icmp UDP packets
[deliverable/linux.git] / net / netfilter / ipvs / ip_vs_proto_udp.c
1 /*
2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS
3 *
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
13 * Network name space (netns) aware.
14 *
15 */
16
17 #define KMSG_COMPONENT "IPVS"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20 #include <linux/in.h>
21 #include <linux/ip.h>
22 #include <linux/kernel.h>
23 #include <linux/netfilter.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/udp.h>
26
27 #include <net/ip_vs.h>
28 #include <net/ip.h>
29 #include <net/ip6_checksum.h>
30
31 static int
32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
33 int *verdict, struct ip_vs_conn **cpp,
34 struct ip_vs_iphdr *iph)
35 {
36 struct net *net;
37 struct ip_vs_service *svc;
38 struct udphdr _udph, *uh;
39 __be16 _ports[2], *ports = NULL;
40
41 if (likely(!ip_vs_iph_icmp(iph))) {
42 /* IPv6 fragments, only first fragment will hit this */
43 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
44 if (uh)
45 ports = &uh->source;
46 } else {
47 ports = skb_header_pointer(
48 skb, iph->len, sizeof(_ports), &_ports);
49 }
50
51 if (!ports) {
52 *verdict = NF_DROP;
53 return 0;
54 }
55
56 net = skb_net(skb);
57 rcu_read_lock();
58 if (likely(!ip_vs_iph_inverse(iph)))
59 svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
60 &iph->daddr, ports[1]);
61 else
62 svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
63 &iph->saddr, ports[0]);
64
65 if (svc) {
66 int ignored;
67
68 if (ip_vs_todrop(net_ipvs(net))) {
69 /*
70 * It seems that we are very loaded.
71 * We have to drop this packet :(
72 */
73 rcu_read_unlock();
74 *verdict = NF_DROP;
75 return 0;
76 }
77
78 /*
79 * Let the virtual server select a real server for the
80 * incoming connection, and create a connection entry.
81 */
82 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
83 if (!*cpp && ignored <= 0) {
84 if (!ignored)
85 *verdict = ip_vs_leave(svc, skb, pd, iph);
86 else
87 *verdict = NF_DROP;
88 rcu_read_unlock();
89 return 0;
90 }
91 }
92 rcu_read_unlock();
93 /* NF_ACCEPT */
94 return 1;
95 }
96
97
98 static inline void
99 udp_fast_csum_update(int af, struct udphdr *uhdr,
100 const union nf_inet_addr *oldip,
101 const union nf_inet_addr *newip,
102 __be16 oldport, __be16 newport)
103 {
104 #ifdef CONFIG_IP_VS_IPV6
105 if (af == AF_INET6)
106 uhdr->check =
107 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
108 ip_vs_check_diff2(oldport, newport,
109 ~csum_unfold(uhdr->check))));
110 else
111 #endif
112 uhdr->check =
113 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
114 ip_vs_check_diff2(oldport, newport,
115 ~csum_unfold(uhdr->check))));
116 if (!uhdr->check)
117 uhdr->check = CSUM_MANGLED_0;
118 }
119
120 static inline void
121 udp_partial_csum_update(int af, struct udphdr *uhdr,
122 const union nf_inet_addr *oldip,
123 const union nf_inet_addr *newip,
124 __be16 oldlen, __be16 newlen)
125 {
126 #ifdef CONFIG_IP_VS_IPV6
127 if (af == AF_INET6)
128 uhdr->check =
129 ~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
130 ip_vs_check_diff2(oldlen, newlen,
131 csum_unfold(uhdr->check))));
132 else
133 #endif
134 uhdr->check =
135 ~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
136 ip_vs_check_diff2(oldlen, newlen,
137 csum_unfold(uhdr->check))));
138 }
139
140
141 static int
142 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
143 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
144 {
145 struct udphdr *udph;
146 unsigned int udphoff = iph->len;
147 int oldlen;
148 int payload_csum = 0;
149
150 #ifdef CONFIG_IP_VS_IPV6
151 if (cp->af == AF_INET6 && iph->fragoffs)
152 return 1;
153 #endif
154 oldlen = skb->len - udphoff;
155
156 /* csum_check requires unshared skb */
157 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
158 return 0;
159
160 if (unlikely(cp->app != NULL)) {
161 int ret;
162
163 /* Some checks before mangling */
164 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
165 return 0;
166
167 /*
168 * Call application helper if needed
169 */
170 if (!(ret = ip_vs_app_pkt_out(cp, skb)))
171 return 0;
172 /* ret=2: csum update is needed after payload mangling */
173 if (ret == 1)
174 oldlen = skb->len - udphoff;
175 else
176 payload_csum = 1;
177 }
178
179 udph = (void *)skb_network_header(skb) + udphoff;
180 udph->source = cp->vport;
181
182 /*
183 * Adjust UDP checksums
184 */
185 if (skb->ip_summed == CHECKSUM_PARTIAL) {
186 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
187 htons(oldlen),
188 htons(skb->len - udphoff));
189 } else if (!payload_csum && (udph->check != 0)) {
190 /* Only port and addr are changed, do fast csum update */
191 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
192 cp->dport, cp->vport);
193 if (skb->ip_summed == CHECKSUM_COMPLETE)
194 skb->ip_summed = (cp->app && pp->csum_check) ?
195 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
196 } else {
197 /* full checksum calculation */
198 udph->check = 0;
199 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
200 #ifdef CONFIG_IP_VS_IPV6
201 if (cp->af == AF_INET6)
202 udph->check = csum_ipv6_magic(&cp->vaddr.in6,
203 &cp->caddr.in6,
204 skb->len - udphoff,
205 cp->protocol, skb->csum);
206 else
207 #endif
208 udph->check = csum_tcpudp_magic(cp->vaddr.ip,
209 cp->caddr.ip,
210 skb->len - udphoff,
211 cp->protocol,
212 skb->csum);
213 if (udph->check == 0)
214 udph->check = CSUM_MANGLED_0;
215 skb->ip_summed = CHECKSUM_UNNECESSARY;
216 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
217 pp->name, udph->check,
218 (char*)&(udph->check) - (char*)udph);
219 }
220 return 1;
221 }
222
223
224 static int
225 udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
226 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
227 {
228 struct udphdr *udph;
229 unsigned int udphoff = iph->len;
230 int oldlen;
231 int payload_csum = 0;
232
233 #ifdef CONFIG_IP_VS_IPV6
234 if (cp->af == AF_INET6 && iph->fragoffs)
235 return 1;
236 #endif
237 oldlen = skb->len - udphoff;
238
239 /* csum_check requires unshared skb */
240 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
241 return 0;
242
243 if (unlikely(cp->app != NULL)) {
244 int ret;
245
246 /* Some checks before mangling */
247 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
248 return 0;
249
250 /*
251 * Attempt ip_vs_app call.
252 * It will fix ip_vs_conn
253 */
254 if (!(ret = ip_vs_app_pkt_in(cp, skb)))
255 return 0;
256 /* ret=2: csum update is needed after payload mangling */
257 if (ret == 1)
258 oldlen = skb->len - udphoff;
259 else
260 payload_csum = 1;
261 }
262
263 udph = (void *)skb_network_header(skb) + udphoff;
264 udph->dest = cp->dport;
265
266 /*
267 * Adjust UDP checksums
268 */
269 if (skb->ip_summed == CHECKSUM_PARTIAL) {
270 udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
271 htons(oldlen),
272 htons(skb->len - udphoff));
273 } else if (!payload_csum && (udph->check != 0)) {
274 /* Only port and addr are changed, do fast csum update */
275 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
276 cp->vport, cp->dport);
277 if (skb->ip_summed == CHECKSUM_COMPLETE)
278 skb->ip_summed = (cp->app && pp->csum_check) ?
279 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
280 } else {
281 /* full checksum calculation */
282 udph->check = 0;
283 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
284 #ifdef CONFIG_IP_VS_IPV6
285 if (cp->af == AF_INET6)
286 udph->check = csum_ipv6_magic(&cp->caddr.in6,
287 &cp->daddr.in6,
288 skb->len - udphoff,
289 cp->protocol, skb->csum);
290 else
291 #endif
292 udph->check = csum_tcpudp_magic(cp->caddr.ip,
293 cp->daddr.ip,
294 skb->len - udphoff,
295 cp->protocol,
296 skb->csum);
297 if (udph->check == 0)
298 udph->check = CSUM_MANGLED_0;
299 skb->ip_summed = CHECKSUM_UNNECESSARY;
300 }
301 return 1;
302 }
303
304
305 static int
306 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
307 {
308 struct udphdr _udph, *uh;
309 unsigned int udphoff;
310
311 #ifdef CONFIG_IP_VS_IPV6
312 if (af == AF_INET6)
313 udphoff = sizeof(struct ipv6hdr);
314 else
315 #endif
316 udphoff = ip_hdrlen(skb);
317
318 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
319 if (uh == NULL)
320 return 0;
321
322 if (uh->check != 0) {
323 switch (skb->ip_summed) {
324 case CHECKSUM_NONE:
325 skb->csum = skb_checksum(skb, udphoff,
326 skb->len - udphoff, 0);
327 case CHECKSUM_COMPLETE:
328 #ifdef CONFIG_IP_VS_IPV6
329 if (af == AF_INET6) {
330 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
331 &ipv6_hdr(skb)->daddr,
332 skb->len - udphoff,
333 ipv6_hdr(skb)->nexthdr,
334 skb->csum)) {
335 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
336 "Failed checksum for");
337 return 0;
338 }
339 } else
340 #endif
341 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
342 ip_hdr(skb)->daddr,
343 skb->len - udphoff,
344 ip_hdr(skb)->protocol,
345 skb->csum)) {
346 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
347 "Failed checksum for");
348 return 0;
349 }
350 break;
351 default:
352 /* No need to checksum. */
353 break;
354 }
355 }
356 return 1;
357 }
358
359 static inline __u16 udp_app_hashkey(__be16 port)
360 {
361 return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
362 & UDP_APP_TAB_MASK;
363 }
364
365
366 static int udp_register_app(struct net *net, struct ip_vs_app *inc)
367 {
368 struct ip_vs_app *i;
369 __u16 hash;
370 __be16 port = inc->port;
371 int ret = 0;
372 struct netns_ipvs *ipvs = net_ipvs(net);
373 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
374
375 hash = udp_app_hashkey(port);
376
377 list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
378 if (i->port == port) {
379 ret = -EEXIST;
380 goto out;
381 }
382 }
383 list_add_rcu(&inc->p_list, &ipvs->udp_apps[hash]);
384 atomic_inc(&pd->appcnt);
385
386 out:
387 return ret;
388 }
389
390
391 static void
392 udp_unregister_app(struct net *net, struct ip_vs_app *inc)
393 {
394 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
395
396 atomic_dec(&pd->appcnt);
397 list_del_rcu(&inc->p_list);
398 }
399
400
401 static int udp_app_conn_bind(struct ip_vs_conn *cp)
402 {
403 struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
404 int hash;
405 struct ip_vs_app *inc;
406 int result = 0;
407
408 /* Default binding: bind app only for NAT */
409 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
410 return 0;
411
412 /* Lookup application incarnations and bind the right one */
413 hash = udp_app_hashkey(cp->vport);
414
415 rcu_read_lock();
416 list_for_each_entry_rcu(inc, &ipvs->udp_apps[hash], p_list) {
417 if (inc->port == cp->vport) {
418 if (unlikely(!ip_vs_app_inc_get(inc)))
419 break;
420 rcu_read_unlock();
421
422 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
423 "%s:%u to app %s on port %u\n",
424 __func__,
425 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
426 ntohs(cp->cport),
427 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
428 ntohs(cp->vport),
429 inc->name, ntohs(inc->port));
430
431 cp->app = inc;
432 if (inc->init_conn)
433 result = inc->init_conn(inc, cp);
434 goto out;
435 }
436 }
437 rcu_read_unlock();
438
439 out:
440 return result;
441 }
442
443
444 static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
445 [IP_VS_UDP_S_NORMAL] = 5*60*HZ,
446 [IP_VS_UDP_S_LAST] = 2*HZ,
447 };
448
449 static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
450 [IP_VS_UDP_S_NORMAL] = "UDP",
451 [IP_VS_UDP_S_LAST] = "BUG!",
452 };
453
454 static const char * udp_state_name(int state)
455 {
456 if (state >= IP_VS_UDP_S_LAST)
457 return "ERR!";
458 return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
459 }
460
461 static void
462 udp_state_transition(struct ip_vs_conn *cp, int direction,
463 const struct sk_buff *skb,
464 struct ip_vs_proto_data *pd)
465 {
466 if (unlikely(!pd)) {
467 pr_err("UDP no ns data\n");
468 return;
469 }
470
471 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
472 }
473
474 static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
475 {
476 struct netns_ipvs *ipvs = net_ipvs(net);
477
478 ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
479 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
480 sizeof(udp_timeouts));
481 if (!pd->timeout_table)
482 return -ENOMEM;
483 return 0;
484 }
485
486 static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
487 {
488 kfree(pd->timeout_table);
489 }
490
491
492 struct ip_vs_protocol ip_vs_protocol_udp = {
493 .name = "UDP",
494 .protocol = IPPROTO_UDP,
495 .num_states = IP_VS_UDP_S_LAST,
496 .dont_defrag = 0,
497 .init = NULL,
498 .exit = NULL,
499 .init_netns = __udp_init,
500 .exit_netns = __udp_exit,
501 .conn_schedule = udp_conn_schedule,
502 .conn_in_get = ip_vs_conn_in_get_proto,
503 .conn_out_get = ip_vs_conn_out_get_proto,
504 .snat_handler = udp_snat_handler,
505 .dnat_handler = udp_dnat_handler,
506 .csum_check = udp_csum_check,
507 .state_transition = udp_state_transition,
508 .state_name = udp_state_name,
509 .register_app = udp_register_app,
510 .unregister_app = udp_unregister_app,
511 .app_conn_bind = udp_app_conn_bind,
512 .debug_packet = ip_vs_tcpudp_debug_packet,
513 .timeout_change = NULL,
514 };
This page took 0.071056 seconds and 6 git commands to generate.