Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / net / ipv6 / reassembly.c
1 /*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on: net/ipv4/ip_fragment.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /*
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
20 *
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
28 */
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/jiffies.h>
35 #include <linux/net.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/skbuff.h>
44
45 #include <net/sock.h>
46 #include <net/snmp.h>
47
48 #include <net/ipv6.h>
49 #include <net/ip6_route.h>
50 #include <net/protocol.h>
51 #include <net/transp_v6.h>
52 #include <net/rawv6.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
55 #include <net/inet_frag.h>
56
57 struct ip6frag_skb_cb
58 {
59 struct inet6_skb_parm h;
60 int offset;
61 };
62
63 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
64
65
66 /*
67 * Equivalent of ipv4 struct ipq
68 */
69
70 struct frag_queue
71 {
72 struct inet_frag_queue q;
73
74 __be32 id; /* fragment id */
75 u32 user;
76 struct in6_addr saddr;
77 struct in6_addr daddr;
78
79 int iif;
80 unsigned int csum;
81 __u16 nhoffset;
82 };
83
84 static struct inet_frags ip6_frags;
85
86 int ip6_frag_nqueues(struct net *net)
87 {
88 return net->ipv6.frags.nqueues;
89 }
90
91 int ip6_frag_mem(struct net *net)
92 {
93 return atomic_read(&net->ipv6.frags.mem);
94 }
95
96 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
97 struct net_device *dev);
98
99 /*
100 * callers should be careful not to use the hash value outside the ipfrag_lock
101 * as doing so could race with ipfrag_hash_rnd being recalculated.
102 */
103 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
104 const struct in6_addr *daddr, u32 rnd)
105 {
106 u32 a, b, c;
107
108 a = (__force u32)saddr->s6_addr32[0];
109 b = (__force u32)saddr->s6_addr32[1];
110 c = (__force u32)saddr->s6_addr32[2];
111
112 a += JHASH_GOLDEN_RATIO;
113 b += JHASH_GOLDEN_RATIO;
114 c += rnd;
115 __jhash_mix(a, b, c);
116
117 a += (__force u32)saddr->s6_addr32[3];
118 b += (__force u32)daddr->s6_addr32[0];
119 c += (__force u32)daddr->s6_addr32[1];
120 __jhash_mix(a, b, c);
121
122 a += (__force u32)daddr->s6_addr32[2];
123 b += (__force u32)daddr->s6_addr32[3];
124 c += (__force u32)id;
125 __jhash_mix(a, b, c);
126
127 return c & (INETFRAGS_HASHSZ - 1);
128 }
129 EXPORT_SYMBOL_GPL(inet6_hash_frag);
130
131 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
132 {
133 struct frag_queue *fq;
134
135 fq = container_of(q, struct frag_queue, q);
136 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
137 }
138
139 int ip6_frag_match(struct inet_frag_queue *q, void *a)
140 {
141 struct frag_queue *fq;
142 struct ip6_create_arg *arg = a;
143
144 fq = container_of(q, struct frag_queue, q);
145 return (fq->id == arg->id && fq->user == arg->user &&
146 ipv6_addr_equal(&fq->saddr, arg->src) &&
147 ipv6_addr_equal(&fq->daddr, arg->dst));
148 }
149 EXPORT_SYMBOL(ip6_frag_match);
150
151 /* Memory Tracking Functions. */
152 static inline void frag_kfree_skb(struct netns_frags *nf,
153 struct sk_buff *skb, int *work)
154 {
155 if (work)
156 *work -= skb->truesize;
157 atomic_sub(skb->truesize, &nf->mem);
158 kfree_skb(skb);
159 }
160
161 void ip6_frag_init(struct inet_frag_queue *q, void *a)
162 {
163 struct frag_queue *fq = container_of(q, struct frag_queue, q);
164 struct ip6_create_arg *arg = a;
165
166 fq->id = arg->id;
167 fq->user = arg->user;
168 ipv6_addr_copy(&fq->saddr, arg->src);
169 ipv6_addr_copy(&fq->daddr, arg->dst);
170 }
171 EXPORT_SYMBOL(ip6_frag_init);
172
173 /* Destruction primitives. */
174
175 static __inline__ void fq_put(struct frag_queue *fq)
176 {
177 inet_frag_put(&fq->q, &ip6_frags);
178 }
179
180 /* Kill fq entry. It is not destroyed immediately,
181 * because caller (and someone more) holds reference count.
182 */
183 static __inline__ void fq_kill(struct frag_queue *fq)
184 {
185 inet_frag_kill(&fq->q, &ip6_frags);
186 }
187
188 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
189 {
190 int evicted;
191
192 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
193 if (evicted)
194 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
195 }
196
197 static void ip6_frag_expire(unsigned long data)
198 {
199 struct frag_queue *fq;
200 struct net_device *dev = NULL;
201 struct net *net;
202
203 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
204
205 spin_lock(&fq->q.lock);
206
207 if (fq->q.last_in & INET_FRAG_COMPLETE)
208 goto out;
209
210 fq_kill(fq);
211
212 net = container_of(fq->q.net, struct net, ipv6.frags);
213 rcu_read_lock();
214 dev = dev_get_by_index_rcu(net, fq->iif);
215 if (!dev)
216 goto out_rcu_unlock;
217
218 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
219 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
220
221 /* Don't send error if the first segment did not arrive. */
222 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
223 goto out_rcu_unlock;
224
225 /*
226 But use as source device on which LAST ARRIVED
227 segment was received. And do not use fq->dev
228 pointer directly, device might already disappeared.
229 */
230 fq->q.fragments->dev = dev;
231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
232 out_rcu_unlock:
233 rcu_read_unlock();
234 out:
235 spin_unlock(&fq->q.lock);
236 fq_put(fq);
237 }
238
239 static __inline__ struct frag_queue *
240 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst)
241 {
242 struct inet_frag_queue *q;
243 struct ip6_create_arg arg;
244 unsigned int hash;
245
246 arg.id = id;
247 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
248 arg.src = src;
249 arg.dst = dst;
250
251 read_lock(&ip6_frags.lock);
252 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
253
254 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
255 if (q == NULL)
256 return NULL;
257
258 return container_of(q, struct frag_queue, q);
259 }
260
261 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
262 struct frag_hdr *fhdr, int nhoff)
263 {
264 struct sk_buff *prev, *next;
265 struct net_device *dev;
266 int offset, end;
267 struct net *net = dev_net(skb_dst(skb)->dev);
268
269 if (fq->q.last_in & INET_FRAG_COMPLETE)
270 goto err;
271
272 offset = ntohs(fhdr->frag_off) & ~0x7;
273 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
274 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
275
276 if ((unsigned int)end > IPV6_MAXPLEN) {
277 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
278 IPSTATS_MIB_INHDRERRORS);
279 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
280 ((u8 *)&fhdr->frag_off -
281 skb_network_header(skb)));
282 return -1;
283 }
284
285 if (skb->ip_summed == CHECKSUM_COMPLETE) {
286 const unsigned char *nh = skb_network_header(skb);
287 skb->csum = csum_sub(skb->csum,
288 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
289 0));
290 }
291
292 /* Is this the final fragment? */
293 if (!(fhdr->frag_off & htons(IP6_MF))) {
294 /* If we already have some bits beyond end
295 * or have different end, the segment is corrupted.
296 */
297 if (end < fq->q.len ||
298 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
299 goto err;
300 fq->q.last_in |= INET_FRAG_LAST_IN;
301 fq->q.len = end;
302 } else {
303 /* Check if the fragment is rounded to 8 bytes.
304 * Required by the RFC.
305 */
306 if (end & 0x7) {
307 /* RFC2460 says always send parameter problem in
308 * this case. -DaveM
309 */
310 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
311 IPSTATS_MIB_INHDRERRORS);
312 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
313 offsetof(struct ipv6hdr, payload_len));
314 return -1;
315 }
316 if (end > fq->q.len) {
317 /* Some bits beyond end -> corruption. */
318 if (fq->q.last_in & INET_FRAG_LAST_IN)
319 goto err;
320 fq->q.len = end;
321 }
322 }
323
324 if (end == offset)
325 goto err;
326
327 /* Point into the IP datagram 'data' part. */
328 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
329 goto err;
330
331 if (pskb_trim_rcsum(skb, end - offset))
332 goto err;
333
334 /* Find out which fragments are in front and at the back of us
335 * in the chain of fragments so far. We must know where to put
336 * this fragment, right?
337 */
338 prev = NULL;
339 for(next = fq->q.fragments; next != NULL; next = next->next) {
340 if (FRAG6_CB(next)->offset >= offset)
341 break; /* bingo! */
342 prev = next;
343 }
344
345 /* We found where to put this one. Check for overlap with
346 * preceding fragment, and, if needed, align things so that
347 * any overlaps are eliminated.
348 */
349 if (prev) {
350 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
351
352 if (i > 0) {
353 offset += i;
354 if (end <= offset)
355 goto err;
356 if (!pskb_pull(skb, i))
357 goto err;
358 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
359 skb->ip_summed = CHECKSUM_NONE;
360 }
361 }
362
363 /* Look for overlap with succeeding segments.
364 * If we can merge fragments, do it.
365 */
366 while (next && FRAG6_CB(next)->offset < end) {
367 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
368
369 if (i < next->len) {
370 /* Eat head of the next overlapped fragment
371 * and leave the loop. The next ones cannot overlap.
372 */
373 if (!pskb_pull(next, i))
374 goto err;
375 FRAG6_CB(next)->offset += i; /* next fragment */
376 fq->q.meat -= i;
377 if (next->ip_summed != CHECKSUM_UNNECESSARY)
378 next->ip_summed = CHECKSUM_NONE;
379 break;
380 } else {
381 struct sk_buff *free_it = next;
382
383 /* Old fragment is completely overridden with
384 * new one drop it.
385 */
386 next = next->next;
387
388 if (prev)
389 prev->next = next;
390 else
391 fq->q.fragments = next;
392
393 fq->q.meat -= free_it->len;
394 frag_kfree_skb(fq->q.net, free_it, NULL);
395 }
396 }
397
398 FRAG6_CB(skb)->offset = offset;
399
400 /* Insert this fragment in the chain of fragments. */
401 skb->next = next;
402 if (prev)
403 prev->next = skb;
404 else
405 fq->q.fragments = skb;
406
407 dev = skb->dev;
408 if (dev) {
409 fq->iif = dev->ifindex;
410 skb->dev = NULL;
411 }
412 fq->q.stamp = skb->tstamp;
413 fq->q.meat += skb->len;
414 atomic_add(skb->truesize, &fq->q.net->mem);
415
416 /* The first fragment.
417 * nhoffset is obtained from the first fragment, of course.
418 */
419 if (offset == 0) {
420 fq->nhoffset = nhoff;
421 fq->q.last_in |= INET_FRAG_FIRST_IN;
422 }
423
424 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
425 fq->q.meat == fq->q.len)
426 return ip6_frag_reasm(fq, prev, dev);
427
428 write_lock(&ip6_frags.lock);
429 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
430 write_unlock(&ip6_frags.lock);
431 return -1;
432
433 err:
434 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
435 IPSTATS_MIB_REASMFAILS);
436 kfree_skb(skb);
437 return -1;
438 }
439
440 /*
441 * Check if this packet is complete.
442 * Returns NULL on failure by any reason, and pointer
443 * to current nexthdr field in reassembled frame.
444 *
445 * It is called with locked fq, and caller must check that
446 * queue is eligible for reassembly i.e. it is not COMPLETE,
447 * the last and the first frames arrived and all the bits are here.
448 */
449 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
450 struct net_device *dev)
451 {
452 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
453 struct sk_buff *fp, *head = fq->q.fragments;
454 int payload_len;
455 unsigned int nhoff;
456
457 fq_kill(fq);
458
459 /* Make the one we just received the head. */
460 if (prev) {
461 head = prev->next;
462 fp = skb_clone(head, GFP_ATOMIC);
463
464 if (!fp)
465 goto out_oom;
466
467 fp->next = head->next;
468 prev->next = fp;
469
470 skb_morph(head, fq->q.fragments);
471 head->next = fq->q.fragments->next;
472
473 kfree_skb(fq->q.fragments);
474 fq->q.fragments = head;
475 }
476
477 WARN_ON(head == NULL);
478 WARN_ON(FRAG6_CB(head)->offset != 0);
479
480 /* Unfragmented part is taken from the first segment. */
481 payload_len = ((head->data - skb_network_header(head)) -
482 sizeof(struct ipv6hdr) + fq->q.len -
483 sizeof(struct frag_hdr));
484 if (payload_len > IPV6_MAXPLEN)
485 goto out_oversize;
486
487 /* Head of list must not be cloned. */
488 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
489 goto out_oom;
490
491 /* If the first fragment is fragmented itself, we split
492 * it to two chunks: the first with data and paged part
493 * and the second, holding only fragments. */
494 if (skb_has_frags(head)) {
495 struct sk_buff *clone;
496 int i, plen = 0;
497
498 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
499 goto out_oom;
500 clone->next = head->next;
501 head->next = clone;
502 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
503 skb_frag_list_init(head);
504 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
505 plen += skb_shinfo(head)->frags[i].size;
506 clone->len = clone->data_len = head->data_len - plen;
507 head->data_len -= clone->len;
508 head->len -= clone->len;
509 clone->csum = 0;
510 clone->ip_summed = head->ip_summed;
511 atomic_add(clone->truesize, &fq->q.net->mem);
512 }
513
514 /* We have to remove fragment header from datagram and to relocate
515 * header in order to calculate ICV correctly. */
516 nhoff = fq->nhoffset;
517 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
518 memmove(head->head + sizeof(struct frag_hdr), head->head,
519 (head->data - head->head) - sizeof(struct frag_hdr));
520 head->mac_header += sizeof(struct frag_hdr);
521 head->network_header += sizeof(struct frag_hdr);
522
523 skb_shinfo(head)->frag_list = head->next;
524 skb_reset_transport_header(head);
525 skb_push(head, head->data - skb_network_header(head));
526 atomic_sub(head->truesize, &fq->q.net->mem);
527
528 for (fp=head->next; fp; fp = fp->next) {
529 head->data_len += fp->len;
530 head->len += fp->len;
531 if (head->ip_summed != fp->ip_summed)
532 head->ip_summed = CHECKSUM_NONE;
533 else if (head->ip_summed == CHECKSUM_COMPLETE)
534 head->csum = csum_add(head->csum, fp->csum);
535 head->truesize += fp->truesize;
536 atomic_sub(fp->truesize, &fq->q.net->mem);
537 }
538
539 head->next = NULL;
540 head->dev = dev;
541 head->tstamp = fq->q.stamp;
542 ipv6_hdr(head)->payload_len = htons(payload_len);
543 IP6CB(head)->nhoff = nhoff;
544
545 /* Yes, and fold redundant checksum back. 8) */
546 if (head->ip_summed == CHECKSUM_COMPLETE)
547 head->csum = csum_partial(skb_network_header(head),
548 skb_network_header_len(head),
549 head->csum);
550
551 rcu_read_lock();
552 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
553 rcu_read_unlock();
554 fq->q.fragments = NULL;
555 return 1;
556
557 out_oversize:
558 if (net_ratelimit())
559 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
560 goto out_fail;
561 out_oom:
562 if (net_ratelimit())
563 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
564 out_fail:
565 rcu_read_lock();
566 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
567 rcu_read_unlock();
568 return -1;
569 }
570
571 static int ipv6_frag_rcv(struct sk_buff *skb)
572 {
573 struct frag_hdr *fhdr;
574 struct frag_queue *fq;
575 struct ipv6hdr *hdr = ipv6_hdr(skb);
576 struct net *net = dev_net(skb_dst(skb)->dev);
577
578 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
579
580 /* Jumbo payload inhibits frag. header */
581 if (hdr->payload_len==0)
582 goto fail_hdr;
583
584 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
585 sizeof(struct frag_hdr))))
586 goto fail_hdr;
587
588 hdr = ipv6_hdr(skb);
589 fhdr = (struct frag_hdr *)skb_transport_header(skb);
590
591 if (!(fhdr->frag_off & htons(0xFFF9))) {
592 /* It is not a fragmented frame */
593 skb->transport_header += sizeof(struct frag_hdr);
594 IP6_INC_STATS_BH(net,
595 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
596
597 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
598 return 1;
599 }
600
601 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
602 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
603
604 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
605 if (fq != NULL) {
606 int ret;
607
608 spin_lock(&fq->q.lock);
609
610 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
611
612 spin_unlock(&fq->q.lock);
613 fq_put(fq);
614 return ret;
615 }
616
617 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
618 kfree_skb(skb);
619 return -1;
620
621 fail_hdr:
622 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
623 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
624 return -1;
625 }
626
627 static const struct inet6_protocol frag_protocol =
628 {
629 .handler = ipv6_frag_rcv,
630 .flags = INET6_PROTO_NOPOLICY,
631 };
632
633 #ifdef CONFIG_SYSCTL
634 static struct ctl_table ip6_frags_ns_ctl_table[] = {
635 {
636 .procname = "ip6frag_high_thresh",
637 .data = &init_net.ipv6.frags.high_thresh,
638 .maxlen = sizeof(int),
639 .mode = 0644,
640 .proc_handler = proc_dointvec
641 },
642 {
643 .procname = "ip6frag_low_thresh",
644 .data = &init_net.ipv6.frags.low_thresh,
645 .maxlen = sizeof(int),
646 .mode = 0644,
647 .proc_handler = proc_dointvec
648 },
649 {
650 .procname = "ip6frag_time",
651 .data = &init_net.ipv6.frags.timeout,
652 .maxlen = sizeof(int),
653 .mode = 0644,
654 .proc_handler = proc_dointvec_jiffies,
655 },
656 { }
657 };
658
659 static struct ctl_table ip6_frags_ctl_table[] = {
660 {
661 .procname = "ip6frag_secret_interval",
662 .data = &ip6_frags.secret_interval,
663 .maxlen = sizeof(int),
664 .mode = 0644,
665 .proc_handler = proc_dointvec_jiffies,
666 },
667 { }
668 };
669
670 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
671 {
672 struct ctl_table *table;
673 struct ctl_table_header *hdr;
674
675 table = ip6_frags_ns_ctl_table;
676 if (!net_eq(net, &init_net)) {
677 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
678 if (table == NULL)
679 goto err_alloc;
680
681 table[0].data = &net->ipv6.frags.high_thresh;
682 table[1].data = &net->ipv6.frags.low_thresh;
683 table[2].data = &net->ipv6.frags.timeout;
684 }
685
686 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
687 if (hdr == NULL)
688 goto err_reg;
689
690 net->ipv6.sysctl.frags_hdr = hdr;
691 return 0;
692
693 err_reg:
694 if (!net_eq(net, &init_net))
695 kfree(table);
696 err_alloc:
697 return -ENOMEM;
698 }
699
700 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
701 {
702 struct ctl_table *table;
703
704 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
705 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
706 if (!net_eq(net, &init_net))
707 kfree(table);
708 }
709
710 static struct ctl_table_header *ip6_ctl_header;
711
712 static int ip6_frags_sysctl_register(void)
713 {
714 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
715 ip6_frags_ctl_table);
716 return ip6_ctl_header == NULL ? -ENOMEM : 0;
717 }
718
719 static void ip6_frags_sysctl_unregister(void)
720 {
721 unregister_net_sysctl_table(ip6_ctl_header);
722 }
723 #else
724 static inline int ip6_frags_ns_sysctl_register(struct net *net)
725 {
726 return 0;
727 }
728
729 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
730 {
731 }
732
733 static inline int ip6_frags_sysctl_register(void)
734 {
735 return 0;
736 }
737
738 static inline void ip6_frags_sysctl_unregister(void)
739 {
740 }
741 #endif
742
743 static int __net_init ipv6_frags_init_net(struct net *net)
744 {
745 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
746 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
747 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
748
749 inet_frags_init_net(&net->ipv6.frags);
750
751 return ip6_frags_ns_sysctl_register(net);
752 }
753
754 static void __net_exit ipv6_frags_exit_net(struct net *net)
755 {
756 ip6_frags_ns_sysctl_unregister(net);
757 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
758 }
759
760 static struct pernet_operations ip6_frags_ops = {
761 .init = ipv6_frags_init_net,
762 .exit = ipv6_frags_exit_net,
763 };
764
765 int __init ipv6_frag_init(void)
766 {
767 int ret;
768
769 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
770 if (ret)
771 goto out;
772
773 ret = ip6_frags_sysctl_register();
774 if (ret)
775 goto err_sysctl;
776
777 ret = register_pernet_subsys(&ip6_frags_ops);
778 if (ret)
779 goto err_pernet;
780
781 ip6_frags.hashfn = ip6_hashfn;
782 ip6_frags.constructor = ip6_frag_init;
783 ip6_frags.destructor = NULL;
784 ip6_frags.skb_free = NULL;
785 ip6_frags.qsize = sizeof(struct frag_queue);
786 ip6_frags.match = ip6_frag_match;
787 ip6_frags.frag_expire = ip6_frag_expire;
788 ip6_frags.secret_interval = 10 * 60 * HZ;
789 inet_frags_init(&ip6_frags);
790 out:
791 return ret;
792
793 err_pernet:
794 ip6_frags_sysctl_unregister();
795 err_sysctl:
796 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
797 goto out;
798 }
799
800 void ipv6_frag_exit(void)
801 {
802 inet_frags_fini(&ip6_frags);
803 ip6_frags_sysctl_unregister();
804 unregister_pernet_subsys(&ip6_frags_ops);
805 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
806 }
This page took 0.045837 seconds and 6 git commands to generate.