ipvs: Pass ipvs not net to ip_vs_estimator_net_init and ip_vs_estimator_cleanup
[deliverable/linux.git] / net / netfilter / ipvs / ip_vs_proto_udp.c
1 /*
2 * ip_vs_proto_udp.c: UDP load balancing support for IPVS
3 *
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
13 * Network name space (netns) aware.
14 *
15 */
16
17 #define KMSG_COMPONENT "IPVS"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20 #include <linux/in.h>
21 #include <linux/ip.h>
22 #include <linux/kernel.h>
23 #include <linux/netfilter.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/udp.h>
26
27 #include <net/ip_vs.h>
28 #include <net/ip.h>
29 #include <net/ip6_checksum.h>
30
31 static int
32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
33 int *verdict, struct ip_vs_conn **cpp,
34 struct ip_vs_iphdr *iph)
35 {
36 struct net *net;
37 struct netns_ipvs *ipvs;
38 struct ip_vs_service *svc;
39 struct udphdr _udph, *uh;
40 __be16 _ports[2], *ports = NULL;
41
42 if (likely(!ip_vs_iph_icmp(iph))) {
43 /* IPv6 fragments, only first fragment will hit this */
44 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
45 if (uh)
46 ports = &uh->source;
47 } else {
48 ports = skb_header_pointer(
49 skb, iph->len, sizeof(_ports), &_ports);
50 }
51
52 if (!ports) {
53 *verdict = NF_DROP;
54 return 0;
55 }
56
57 net = skb_net(skb);
58 ipvs = net_ipvs(net);
59 rcu_read_lock();
60 if (likely(!ip_vs_iph_inverse(iph)))
61 svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
62 &iph->daddr, ports[1]);
63 else
64 svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
65 &iph->saddr, ports[0]);
66
67 if (svc) {
68 int ignored;
69
70 if (ip_vs_todrop(net_ipvs(net))) {
71 /*
72 * It seems that we are very loaded.
73 * We have to drop this packet :(
74 */
75 rcu_read_unlock();
76 *verdict = NF_DROP;
77 return 0;
78 }
79
80 /*
81 * Let the virtual server select a real server for the
82 * incoming connection, and create a connection entry.
83 */
84 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph);
85 if (!*cpp && ignored <= 0) {
86 if (!ignored)
87 *verdict = ip_vs_leave(svc, skb, pd, iph);
88 else
89 *verdict = NF_DROP;
90 rcu_read_unlock();
91 return 0;
92 }
93 }
94 rcu_read_unlock();
95 /* NF_ACCEPT */
96 return 1;
97 }
98
99
100 static inline void
101 udp_fast_csum_update(int af, struct udphdr *uhdr,
102 const union nf_inet_addr *oldip,
103 const union nf_inet_addr *newip,
104 __be16 oldport, __be16 newport)
105 {
106 #ifdef CONFIG_IP_VS_IPV6
107 if (af == AF_INET6)
108 uhdr->check =
109 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
110 ip_vs_check_diff2(oldport, newport,
111 ~csum_unfold(uhdr->check))));
112 else
113 #endif
114 uhdr->check =
115 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
116 ip_vs_check_diff2(oldport, newport,
117 ~csum_unfold(uhdr->check))));
118 if (!uhdr->check)
119 uhdr->check = CSUM_MANGLED_0;
120 }
121
122 static inline void
123 udp_partial_csum_update(int af, struct udphdr *uhdr,
124 const union nf_inet_addr *oldip,
125 const union nf_inet_addr *newip,
126 __be16 oldlen, __be16 newlen)
127 {
128 #ifdef CONFIG_IP_VS_IPV6
129 if (af == AF_INET6)
130 uhdr->check =
131 ~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
132 ip_vs_check_diff2(oldlen, newlen,
133 csum_unfold(uhdr->check))));
134 else
135 #endif
136 uhdr->check =
137 ~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
138 ip_vs_check_diff2(oldlen, newlen,
139 csum_unfold(uhdr->check))));
140 }
141
142
143 static int
144 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
145 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
146 {
147 struct udphdr *udph;
148 unsigned int udphoff = iph->len;
149 int oldlen;
150 int payload_csum = 0;
151
152 #ifdef CONFIG_IP_VS_IPV6
153 if (cp->af == AF_INET6 && iph->fragoffs)
154 return 1;
155 #endif
156 oldlen = skb->len - udphoff;
157
158 /* csum_check requires unshared skb */
159 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
160 return 0;
161
162 if (unlikely(cp->app != NULL)) {
163 int ret;
164
165 /* Some checks before mangling */
166 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
167 return 0;
168
169 /*
170 * Call application helper if needed
171 */
172 if (!(ret = ip_vs_app_pkt_out(cp, skb)))
173 return 0;
174 /* ret=2: csum update is needed after payload mangling */
175 if (ret == 1)
176 oldlen = skb->len - udphoff;
177 else
178 payload_csum = 1;
179 }
180
181 udph = (void *)skb_network_header(skb) + udphoff;
182 udph->source = cp->vport;
183
184 /*
185 * Adjust UDP checksums
186 */
187 if (skb->ip_summed == CHECKSUM_PARTIAL) {
188 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
189 htons(oldlen),
190 htons(skb->len - udphoff));
191 } else if (!payload_csum && (udph->check != 0)) {
192 /* Only port and addr are changed, do fast csum update */
193 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
194 cp->dport, cp->vport);
195 if (skb->ip_summed == CHECKSUM_COMPLETE)
196 skb->ip_summed = (cp->app && pp->csum_check) ?
197 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
198 } else {
199 /* full checksum calculation */
200 udph->check = 0;
201 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
202 #ifdef CONFIG_IP_VS_IPV6
203 if (cp->af == AF_INET6)
204 udph->check = csum_ipv6_magic(&cp->vaddr.in6,
205 &cp->caddr.in6,
206 skb->len - udphoff,
207 cp->protocol, skb->csum);
208 else
209 #endif
210 udph->check = csum_tcpudp_magic(cp->vaddr.ip,
211 cp->caddr.ip,
212 skb->len - udphoff,
213 cp->protocol,
214 skb->csum);
215 if (udph->check == 0)
216 udph->check = CSUM_MANGLED_0;
217 skb->ip_summed = CHECKSUM_UNNECESSARY;
218 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
219 pp->name, udph->check,
220 (char*)&(udph->check) - (char*)udph);
221 }
222 return 1;
223 }
224
225
226 static int
227 udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
228 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
229 {
230 struct udphdr *udph;
231 unsigned int udphoff = iph->len;
232 int oldlen;
233 int payload_csum = 0;
234
235 #ifdef CONFIG_IP_VS_IPV6
236 if (cp->af == AF_INET6 && iph->fragoffs)
237 return 1;
238 #endif
239 oldlen = skb->len - udphoff;
240
241 /* csum_check requires unshared skb */
242 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
243 return 0;
244
245 if (unlikely(cp->app != NULL)) {
246 int ret;
247
248 /* Some checks before mangling */
249 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
250 return 0;
251
252 /*
253 * Attempt ip_vs_app call.
254 * It will fix ip_vs_conn
255 */
256 if (!(ret = ip_vs_app_pkt_in(cp, skb)))
257 return 0;
258 /* ret=2: csum update is needed after payload mangling */
259 if (ret == 1)
260 oldlen = skb->len - udphoff;
261 else
262 payload_csum = 1;
263 }
264
265 udph = (void *)skb_network_header(skb) + udphoff;
266 udph->dest = cp->dport;
267
268 /*
269 * Adjust UDP checksums
270 */
271 if (skb->ip_summed == CHECKSUM_PARTIAL) {
272 udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
273 htons(oldlen),
274 htons(skb->len - udphoff));
275 } else if (!payload_csum && (udph->check != 0)) {
276 /* Only port and addr are changed, do fast csum update */
277 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
278 cp->vport, cp->dport);
279 if (skb->ip_summed == CHECKSUM_COMPLETE)
280 skb->ip_summed = (cp->app && pp->csum_check) ?
281 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
282 } else {
283 /* full checksum calculation */
284 udph->check = 0;
285 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
286 #ifdef CONFIG_IP_VS_IPV6
287 if (cp->af == AF_INET6)
288 udph->check = csum_ipv6_magic(&cp->caddr.in6,
289 &cp->daddr.in6,
290 skb->len - udphoff,
291 cp->protocol, skb->csum);
292 else
293 #endif
294 udph->check = csum_tcpudp_magic(cp->caddr.ip,
295 cp->daddr.ip,
296 skb->len - udphoff,
297 cp->protocol,
298 skb->csum);
299 if (udph->check == 0)
300 udph->check = CSUM_MANGLED_0;
301 skb->ip_summed = CHECKSUM_UNNECESSARY;
302 }
303 return 1;
304 }
305
306
307 static int
308 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
309 {
310 struct udphdr _udph, *uh;
311 unsigned int udphoff;
312
313 #ifdef CONFIG_IP_VS_IPV6
314 if (af == AF_INET6)
315 udphoff = sizeof(struct ipv6hdr);
316 else
317 #endif
318 udphoff = ip_hdrlen(skb);
319
320 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
321 if (uh == NULL)
322 return 0;
323
324 if (uh->check != 0) {
325 switch (skb->ip_summed) {
326 case CHECKSUM_NONE:
327 skb->csum = skb_checksum(skb, udphoff,
328 skb->len - udphoff, 0);
329 case CHECKSUM_COMPLETE:
330 #ifdef CONFIG_IP_VS_IPV6
331 if (af == AF_INET6) {
332 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
333 &ipv6_hdr(skb)->daddr,
334 skb->len - udphoff,
335 ipv6_hdr(skb)->nexthdr,
336 skb->csum)) {
337 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
338 "Failed checksum for");
339 return 0;
340 }
341 } else
342 #endif
343 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
344 ip_hdr(skb)->daddr,
345 skb->len - udphoff,
346 ip_hdr(skb)->protocol,
347 skb->csum)) {
348 IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
349 "Failed checksum for");
350 return 0;
351 }
352 break;
353 default:
354 /* No need to checksum. */
355 break;
356 }
357 }
358 return 1;
359 }
360
361 static inline __u16 udp_app_hashkey(__be16 port)
362 {
363 return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
364 & UDP_APP_TAB_MASK;
365 }
366
367
368 static int udp_register_app(struct net *net, struct ip_vs_app *inc)
369 {
370 struct ip_vs_app *i;
371 __u16 hash;
372 __be16 port = inc->port;
373 int ret = 0;
374 struct netns_ipvs *ipvs = net_ipvs(net);
375 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
376
377 hash = udp_app_hashkey(port);
378
379 list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
380 if (i->port == port) {
381 ret = -EEXIST;
382 goto out;
383 }
384 }
385 list_add_rcu(&inc->p_list, &ipvs->udp_apps[hash]);
386 atomic_inc(&pd->appcnt);
387
388 out:
389 return ret;
390 }
391
392
393 static void
394 udp_unregister_app(struct net *net, struct ip_vs_app *inc)
395 {
396 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net_ipvs(net), IPPROTO_UDP);
397
398 atomic_dec(&pd->appcnt);
399 list_del_rcu(&inc->p_list);
400 }
401
402
403 static int udp_app_conn_bind(struct ip_vs_conn *cp)
404 {
405 struct netns_ipvs *ipvs = cp->ipvs;
406 int hash;
407 struct ip_vs_app *inc;
408 int result = 0;
409
410 /* Default binding: bind app only for NAT */
411 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
412 return 0;
413
414 /* Lookup application incarnations and bind the right one */
415 hash = udp_app_hashkey(cp->vport);
416
417 rcu_read_lock();
418 list_for_each_entry_rcu(inc, &ipvs->udp_apps[hash], p_list) {
419 if (inc->port == cp->vport) {
420 if (unlikely(!ip_vs_app_inc_get(inc)))
421 break;
422 rcu_read_unlock();
423
424 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
425 "%s:%u to app %s on port %u\n",
426 __func__,
427 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
428 ntohs(cp->cport),
429 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
430 ntohs(cp->vport),
431 inc->name, ntohs(inc->port));
432
433 cp->app = inc;
434 if (inc->init_conn)
435 result = inc->init_conn(inc, cp);
436 goto out;
437 }
438 }
439 rcu_read_unlock();
440
441 out:
442 return result;
443 }
444
445
446 static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
447 [IP_VS_UDP_S_NORMAL] = 5*60*HZ,
448 [IP_VS_UDP_S_LAST] = 2*HZ,
449 };
450
451 static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
452 [IP_VS_UDP_S_NORMAL] = "UDP",
453 [IP_VS_UDP_S_LAST] = "BUG!",
454 };
455
456 static const char * udp_state_name(int state)
457 {
458 if (state >= IP_VS_UDP_S_LAST)
459 return "ERR!";
460 return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
461 }
462
463 static void
464 udp_state_transition(struct ip_vs_conn *cp, int direction,
465 const struct sk_buff *skb,
466 struct ip_vs_proto_data *pd)
467 {
468 if (unlikely(!pd)) {
469 pr_err("UDP no ns data\n");
470 return;
471 }
472
473 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
474 }
475
476 static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
477 {
478 struct netns_ipvs *ipvs = net_ipvs(net);
479
480 ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
481 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
482 sizeof(udp_timeouts));
483 if (!pd->timeout_table)
484 return -ENOMEM;
485 return 0;
486 }
487
488 static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
489 {
490 kfree(pd->timeout_table);
491 }
492
493
494 struct ip_vs_protocol ip_vs_protocol_udp = {
495 .name = "UDP",
496 .protocol = IPPROTO_UDP,
497 .num_states = IP_VS_UDP_S_LAST,
498 .dont_defrag = 0,
499 .init = NULL,
500 .exit = NULL,
501 .init_netns = __udp_init,
502 .exit_netns = __udp_exit,
503 .conn_schedule = udp_conn_schedule,
504 .conn_in_get = ip_vs_conn_in_get_proto,
505 .conn_out_get = ip_vs_conn_out_get_proto,
506 .snat_handler = udp_snat_handler,
507 .dnat_handler = udp_dnat_handler,
508 .csum_check = udp_csum_check,
509 .state_transition = udp_state_transition,
510 .state_name = udp_state_name,
511 .register_app = udp_register_app,
512 .unregister_app = udp_unregister_app,
513 .app_conn_bind = udp_app_conn_bind,
514 .debug_packet = ip_vs_tcpudp_debug_packet,
515 .timeout_change = NULL,
516 };
This page took 0.041017 seconds and 5 git commands to generate.