[INET]: Consolidate the xxx_frag_destroy
[deliverable/linux.git] / net / ipv6 / reassembly.c
CommitLineData
1da177e4
LT
1/*
2 * IPv6 fragment reassembly
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4
LT
7 *
8 * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9 *
10 * Based on: net/ipv4/ip_fragment.c
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
1ab1457c
YH
18/*
19 * Fixes:
1da177e4
LT
20 * Andi Kleen Make it work with multiple hosts.
21 * More RFC compliance.
22 *
23 * Horst von Brand Add missing #include <linux/string.h>
24 * Alexey Kuznetsov SMP races, threading, cleanup.
25 * Patrick McHardy LRU queue of frag heads for evictor.
26 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
27 * David Stevens and
28 * YOSHIFUJI,H. @USAGI Always remove fragment header to
29 * calculate ICV correctly.
30 */
1da177e4
LT
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/socket.h>
35#include <linux/sockios.h>
36#include <linux/jiffies.h>
37#include <linux/net.h>
38#include <linux/list.h>
39#include <linux/netdevice.h>
40#include <linux/in6.h>
41#include <linux/ipv6.h>
42#include <linux/icmpv6.h>
43#include <linux/random.h>
44#include <linux/jhash.h>
f61944ef 45#include <linux/skbuff.h>
1da177e4
LT
46
47#include <net/sock.h>
48#include <net/snmp.h>
49
50#include <net/ipv6.h>
a11d206d 51#include <net/ip6_route.h>
1da177e4
LT
52#include <net/protocol.h>
53#include <net/transp_v6.h>
54#include <net/rawv6.h>
55#include <net/ndisc.h>
56#include <net/addrconf.h>
5ab11c98 57#include <net/inet_frag.h>
1da177e4 58
1da177e4
LT
59struct ip6frag_skb_cb
60{
61 struct inet6_skb_parm h;
62 int offset;
63};
64
65#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
66
67
68/*
69 * Equivalent of ipv4 struct ipq
70 */
71
72struct frag_queue
73{
5ab11c98 74 struct inet_frag_queue q;
1da177e4 75
e69a4adc 76 __be32 id; /* fragment id */
1da177e4
LT
77 struct in6_addr saddr;
78 struct in6_addr daddr;
79
1da177e4 80 int iif;
1da177e4 81 unsigned int csum;
1da177e4 82 __u16 nhoffset;
1da177e4
LT
83};
84
04128f23
PE
85struct inet_frags_ctl ip6_frags_ctl __read_mostly = {
86 .high_thresh = 256 * 1024,
87 .low_thresh = 192 * 1024,
88 .timeout = IPV6_FRAG_TIMEOUT,
89 .secret_interval = 10 * 60 * HZ,
90};
91
7eb95156 92static struct inet_frags ip6_frags;
1da177e4 93
7eb95156
PE
94int ip6_frag_nqueues(void)
95{
96 return ip6_frags.nqueues;
97}
1da177e4 98
7eb95156
PE
99int ip6_frag_mem(void)
100{
101 return atomic_read(&ip6_frags.mem);
102}
1da177e4 103
f61944ef
HX
104static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
105 struct net_device *dev);
106
f6596f9d
ZB
107/*
108 * callers should be careful not to use the hash value outside the ipfrag_lock
109 * as doing so could race with ipfrag_hash_rnd being recalculated.
110 */
e69a4adc 111static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
1da177e4
LT
112 struct in6_addr *daddr)
113{
114 u32 a, b, c;
115
e69a4adc
AV
116 a = (__force u32)saddr->s6_addr32[0];
117 b = (__force u32)saddr->s6_addr32[1];
118 c = (__force u32)saddr->s6_addr32[2];
1da177e4
LT
119
120 a += JHASH_GOLDEN_RATIO;
121 b += JHASH_GOLDEN_RATIO;
7eb95156 122 c += ip6_frags.rnd;
1da177e4
LT
123 __jhash_mix(a, b, c);
124
e69a4adc
AV
125 a += (__force u32)saddr->s6_addr32[3];
126 b += (__force u32)daddr->s6_addr32[0];
127 c += (__force u32)daddr->s6_addr32[1];
1da177e4
LT
128 __jhash_mix(a, b, c);
129
e69a4adc
AV
130 a += (__force u32)daddr->s6_addr32[2];
131 b += (__force u32)daddr->s6_addr32[3];
132 c += (__force u32)id;
1da177e4
LT
133 __jhash_mix(a, b, c);
134
7eb95156 135 return c & (INETFRAGS_HASHSZ - 1);
1da177e4
LT
136}
137
321a3a99 138static unsigned int ip6_hashfn(struct inet_frag_queue *q)
1da177e4 139{
321a3a99 140 struct frag_queue *fq;
1da177e4 141
321a3a99
PE
142 fq = container_of(q, struct frag_queue, q);
143 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
1da177e4
LT
144}
145
1da177e4
LT
146/* Memory Tracking Functions. */
147static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
148{
149 if (work)
150 *work -= skb->truesize;
7eb95156 151 atomic_sub(skb->truesize, &ip6_frags.mem);
1da177e4
LT
152 kfree_skb(skb);
153}
154
1e4b8287 155static void ip6_frag_free(struct inet_frag_queue *fq)
1da177e4 156{
1e4b8287 157 kfree(container_of(fq, struct frag_queue, q));
1da177e4
LT
158}
159
160static inline struct frag_queue *frag_alloc_queue(void)
161{
78c784c4 162 struct frag_queue *fq = kzalloc(sizeof(struct frag_queue), GFP_ATOMIC);
1da177e4
LT
163
164 if(!fq)
165 return NULL;
7eb95156 166 atomic_add(sizeof(struct frag_queue), &ip6_frags.mem);
1da177e4
LT
167 return fq;
168}
169
170/* Destruction primitives. */
171
1da177e4
LT
172static __inline__ void fq_put(struct frag_queue *fq, int *work)
173{
5ab11c98 174 if (atomic_dec_and_test(&fq->q.refcnt))
1e4b8287 175 inet_frag_destroy(&fq->q, &ip6_frags, work);
1da177e4
LT
176}
177
178/* Kill fq entry. It is not destroyed immediately,
179 * because caller (and someone more) holds reference count.
180 */
181static __inline__ void fq_kill(struct frag_queue *fq)
182{
277e650d 183 inet_frag_kill(&fq->q, &ip6_frags);
1da177e4
LT
184}
185
a11d206d 186static void ip6_evictor(struct inet6_dev *idev)
1da177e4
LT
187{
188 struct frag_queue *fq;
189 struct list_head *tmp;
190 int work;
191
04128f23 192 work = atomic_read(&ip6_frags.mem) - ip6_frags_ctl.low_thresh;
1da177e4
LT
193 if (work <= 0)
194 return;
195
196 while(work > 0) {
7eb95156
PE
197 read_lock(&ip6_frags.lock);
198 if (list_empty(&ip6_frags.lru_list)) {
199 read_unlock(&ip6_frags.lock);
1da177e4
LT
200 return;
201 }
7eb95156 202 tmp = ip6_frags.lru_list.next;
5ab11c98
PE
203 fq = list_entry(tmp, struct frag_queue, q.lru_list);
204 atomic_inc(&fq->q.refcnt);
7eb95156 205 read_unlock(&ip6_frags.lock);
1da177e4 206
5ab11c98
PE
207 spin_lock(&fq->q.lock);
208 if (!(fq->q.last_in&COMPLETE))
1da177e4 209 fq_kill(fq);
5ab11c98 210 spin_unlock(&fq->q.lock);
1da177e4
LT
211
212 fq_put(fq, &work);
a11d206d 213 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
1da177e4
LT
214 }
215}
216
217static void ip6_frag_expire(unsigned long data)
218{
219 struct frag_queue *fq = (struct frag_queue *) data;
a11d206d 220 struct net_device *dev = NULL;
1da177e4 221
5ab11c98 222 spin_lock(&fq->q.lock);
1da177e4 223
5ab11c98 224 if (fq->q.last_in & COMPLETE)
1da177e4
LT
225 goto out;
226
227 fq_kill(fq);
228
881d966b 229 dev = dev_get_by_index(&init_net, fq->iif);
a11d206d
YH
230 if (!dev)
231 goto out;
232
233 rcu_read_lock();
234 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
235 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
236 rcu_read_unlock();
1da177e4 237
78c784c4 238 /* Don't send error if the first segment did not arrive. */
5ab11c98 239 if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments)
78c784c4
IO
240 goto out;
241
78c784c4
IO
242 /*
243 But use as source device on which LAST ARRIVED
244 segment was received. And do not use fq->dev
245 pointer directly, device might already disappeared.
246 */
5ab11c98
PE
247 fq->q.fragments->dev = dev;
248 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
1da177e4 249out:
a11d206d
YH
250 if (dev)
251 dev_put(dev);
5ab11c98 252 spin_unlock(&fq->q.lock);
1da177e4
LT
253 fq_put(fq, NULL);
254}
255
256/* Creation primitives. */
257
258
f6596f9d 259static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
1da177e4
LT
260{
261 struct frag_queue *fq;
f6596f9d 262 unsigned int hash;
e7c8a41e
YK
263#ifdef CONFIG_SMP
264 struct hlist_node *n;
265#endif
1da177e4 266
7eb95156 267 write_lock(&ip6_frags.lock);
f6596f9d 268 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
1da177e4 269#ifdef CONFIG_SMP
7eb95156 270 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
1ab1457c 271 if (fq->id == fq_in->id &&
1da177e4
LT
272 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
273 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
5ab11c98 274 atomic_inc(&fq->q.refcnt);
7eb95156 275 write_unlock(&ip6_frags.lock);
5ab11c98 276 fq_in->q.last_in |= COMPLETE;
1da177e4
LT
277 fq_put(fq_in, NULL);
278 return fq;
279 }
280 }
281#endif
282 fq = fq_in;
283
04128f23 284 if (!mod_timer(&fq->q.timer, jiffies + ip6_frags_ctl.timeout))
5ab11c98 285 atomic_inc(&fq->q.refcnt);
1da177e4 286
5ab11c98 287 atomic_inc(&fq->q.refcnt);
7eb95156 288 hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]);
5ab11c98 289 INIT_LIST_HEAD(&fq->q.lru_list);
7eb95156
PE
290 list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list);
291 ip6_frags.nqueues++;
292 write_unlock(&ip6_frags.lock);
1da177e4
LT
293 return fq;
294}
295
296
297static struct frag_queue *
e69a4adc 298ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
a11d206d 299 struct inet6_dev *idev)
1da177e4
LT
300{
301 struct frag_queue *fq;
302
303 if ((fq = frag_alloc_queue()) == NULL)
304 goto oom;
305
1da177e4
LT
306 fq->id = id;
307 ipv6_addr_copy(&fq->saddr, src);
308 ipv6_addr_copy(&fq->daddr, dst);
309
5ab11c98
PE
310 init_timer(&fq->q.timer);
311 fq->q.timer.function = ip6_frag_expire;
312 fq->q.timer.data = (long) fq;
313 spin_lock_init(&fq->q.lock);
314 atomic_set(&fq->q.refcnt, 1);
1da177e4 315
f6596f9d 316 return ip6_frag_intern(fq);
1da177e4
LT
317
318oom:
a11d206d 319 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
1da177e4
LT
320 return NULL;
321}
322
323static __inline__ struct frag_queue *
e69a4adc 324fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
a11d206d 325 struct inet6_dev *idev)
1da177e4
LT
326{
327 struct frag_queue *fq;
e7c8a41e 328 struct hlist_node *n;
f6596f9d 329 unsigned int hash;
1da177e4 330
7eb95156 331 read_lock(&ip6_frags.lock);
f6596f9d 332 hash = ip6qhashfn(id, src, dst);
7eb95156 333 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
1ab1457c 334 if (fq->id == id &&
1da177e4
LT
335 ipv6_addr_equal(src, &fq->saddr) &&
336 ipv6_addr_equal(dst, &fq->daddr)) {
5ab11c98 337 atomic_inc(&fq->q.refcnt);
7eb95156 338 read_unlock(&ip6_frags.lock);
1da177e4
LT
339 return fq;
340 }
341 }
7eb95156 342 read_unlock(&ip6_frags.lock);
1da177e4 343
a11d206d 344 return ip6_frag_create(id, src, dst, idev);
1da177e4
LT
345}
346
347
f61944ef 348static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
1da177e4
LT
349 struct frag_hdr *fhdr, int nhoff)
350{
351 struct sk_buff *prev, *next;
f61944ef 352 struct net_device *dev;
1da177e4
LT
353 int offset, end;
354
5ab11c98 355 if (fq->q.last_in & COMPLETE)
1da177e4
LT
356 goto err;
357
358 offset = ntohs(fhdr->frag_off) & ~0x7;
0660e03f
ACM
359 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
360 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
1da177e4
LT
361
362 if ((unsigned int)end > IPV6_MAXPLEN) {
a11d206d
YH
363 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
364 IPSTATS_MIB_INHDRERRORS);
d56f90a7
ACM
365 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
366 ((u8 *)&fhdr->frag_off -
367 skb_network_header(skb)));
f61944ef 368 return -1;
1da177e4
LT
369 }
370
d56f90a7
ACM
371 if (skb->ip_summed == CHECKSUM_COMPLETE) {
372 const unsigned char *nh = skb_network_header(skb);
1ab1457c 373 skb->csum = csum_sub(skb->csum,
d56f90a7
ACM
374 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
375 0));
376 }
1da177e4
LT
377
378 /* Is this the final fragment? */
379 if (!(fhdr->frag_off & htons(IP6_MF))) {
380 /* If we already have some bits beyond end
381 * or have different end, the segment is corrupted.
382 */
5ab11c98
PE
383 if (end < fq->q.len ||
384 ((fq->q.last_in & LAST_IN) && end != fq->q.len))
1da177e4 385 goto err;
5ab11c98
PE
386 fq->q.last_in |= LAST_IN;
387 fq->q.len = end;
1da177e4
LT
388 } else {
389 /* Check if the fragment is rounded to 8 bytes.
390 * Required by the RFC.
391 */
392 if (end & 0x7) {
393 /* RFC2460 says always send parameter problem in
394 * this case. -DaveM
395 */
a11d206d
YH
396 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
397 IPSTATS_MIB_INHDRERRORS);
1ab1457c 398 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
1da177e4 399 offsetof(struct ipv6hdr, payload_len));
f61944ef 400 return -1;
1da177e4 401 }
5ab11c98 402 if (end > fq->q.len) {
1da177e4 403 /* Some bits beyond end -> corruption. */
5ab11c98 404 if (fq->q.last_in & LAST_IN)
1da177e4 405 goto err;
5ab11c98 406 fq->q.len = end;
1da177e4
LT
407 }
408 }
409
410 if (end == offset)
411 goto err;
412
413 /* Point into the IP datagram 'data' part. */
414 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
415 goto err;
1ab1457c 416
42ca89c1
SH
417 if (pskb_trim_rcsum(skb, end - offset))
418 goto err;
1da177e4
LT
419
420 /* Find out which fragments are in front and at the back of us
421 * in the chain of fragments so far. We must know where to put
422 * this fragment, right?
423 */
424 prev = NULL;
5ab11c98 425 for(next = fq->q.fragments; next != NULL; next = next->next) {
1da177e4
LT
426 if (FRAG6_CB(next)->offset >= offset)
427 break; /* bingo! */
428 prev = next;
429 }
430
431 /* We found where to put this one. Check for overlap with
432 * preceding fragment, and, if needed, align things so that
433 * any overlaps are eliminated.
434 */
435 if (prev) {
436 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
437
438 if (i > 0) {
439 offset += i;
440 if (end <= offset)
441 goto err;
442 if (!pskb_pull(skb, i))
443 goto err;
444 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
445 skb->ip_summed = CHECKSUM_NONE;
446 }
447 }
448
449 /* Look for overlap with succeeding segments.
450 * If we can merge fragments, do it.
451 */
452 while (next && FRAG6_CB(next)->offset < end) {
453 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
454
455 if (i < next->len) {
456 /* Eat head of the next overlapped fragment
457 * and leave the loop. The next ones cannot overlap.
458 */
459 if (!pskb_pull(next, i))
460 goto err;
461 FRAG6_CB(next)->offset += i; /* next fragment */
5ab11c98 462 fq->q.meat -= i;
1da177e4
LT
463 if (next->ip_summed != CHECKSUM_UNNECESSARY)
464 next->ip_summed = CHECKSUM_NONE;
465 break;
466 } else {
467 struct sk_buff *free_it = next;
468
469 /* Old fragment is completely overridden with
470 * new one drop it.
471 */
472 next = next->next;
473
474 if (prev)
475 prev->next = next;
476 else
5ab11c98 477 fq->q.fragments = next;
1da177e4 478
5ab11c98 479 fq->q.meat -= free_it->len;
1da177e4
LT
480 frag_kfree_skb(free_it, NULL);
481 }
482 }
483
484 FRAG6_CB(skb)->offset = offset;
485
486 /* Insert this fragment in the chain of fragments. */
487 skb->next = next;
488 if (prev)
489 prev->next = skb;
490 else
5ab11c98 491 fq->q.fragments = skb;
1da177e4 492
f61944ef
HX
493 dev = skb->dev;
494 if (dev) {
495 fq->iif = dev->ifindex;
496 skb->dev = NULL;
497 }
5ab11c98
PE
498 fq->q.stamp = skb->tstamp;
499 fq->q.meat += skb->len;
7eb95156 500 atomic_add(skb->truesize, &ip6_frags.mem);
1da177e4
LT
501
502 /* The first fragment.
503 * nhoffset is obtained from the first fragment, of course.
504 */
505 if (offset == 0) {
506 fq->nhoffset = nhoff;
5ab11c98 507 fq->q.last_in |= FIRST_IN;
1da177e4 508 }
f61944ef 509
5ab11c98 510 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
f61944ef
HX
511 return ip6_frag_reasm(fq, prev, dev);
512
7eb95156
PE
513 write_lock(&ip6_frags.lock);
514 list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
515 write_unlock(&ip6_frags.lock);
f61944ef 516 return -1;
1da177e4
LT
517
518err:
a11d206d 519 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
1da177e4 520 kfree_skb(skb);
f61944ef 521 return -1;
1da177e4
LT
522}
523
524/*
525 * Check if this packet is complete.
526 * Returns NULL on failure by any reason, and pointer
527 * to current nexthdr field in reassembled frame.
528 *
529 * It is called with locked fq, and caller must check that
530 * queue is eligible for reassembly i.e. it is not COMPLETE,
531 * the last and the first frames arrived and all the bits are here.
532 */
f61944ef 533static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
1da177e4
LT
534 struct net_device *dev)
535{
5ab11c98 536 struct sk_buff *fp, *head = fq->q.fragments;
1da177e4
LT
537 int payload_len;
538 unsigned int nhoff;
539
540 fq_kill(fq);
541
f61944ef
HX
542 /* Make the one we just received the head. */
543 if (prev) {
544 head = prev->next;
545 fp = skb_clone(head, GFP_ATOMIC);
546
547 if (!fp)
548 goto out_oom;
549
550 fp->next = head->next;
551 prev->next = fp;
552
5ab11c98
PE
553 skb_morph(head, fq->q.fragments);
554 head->next = fq->q.fragments->next;
f61944ef 555
5ab11c98
PE
556 kfree_skb(fq->q.fragments);
557 fq->q.fragments = head;
f61944ef
HX
558 }
559
1da177e4
LT
560 BUG_TRAP(head != NULL);
561 BUG_TRAP(FRAG6_CB(head)->offset == 0);
562
563 /* Unfragmented part is taken from the first segment. */
d56f90a7 564 payload_len = ((head->data - skb_network_header(head)) -
5ab11c98 565 sizeof(struct ipv6hdr) + fq->q.len -
d56f90a7 566 sizeof(struct frag_hdr));
1da177e4
LT
567 if (payload_len > IPV6_MAXPLEN)
568 goto out_oversize;
569
570 /* Head of list must not be cloned. */
571 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
572 goto out_oom;
573
574 /* If the first fragment is fragmented itself, we split
575 * it to two chunks: the first with data and paged part
576 * and the second, holding only fragments. */
577 if (skb_shinfo(head)->frag_list) {
578 struct sk_buff *clone;
579 int i, plen = 0;
580
581 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
582 goto out_oom;
583 clone->next = head->next;
584 head->next = clone;
585 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
586 skb_shinfo(head)->frag_list = NULL;
587 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
588 plen += skb_shinfo(head)->frags[i].size;
589 clone->len = clone->data_len = head->data_len - plen;
590 head->data_len -= clone->len;
591 head->len -= clone->len;
592 clone->csum = 0;
593 clone->ip_summed = head->ip_summed;
7eb95156 594 atomic_add(clone->truesize, &ip6_frags.mem);
1da177e4
LT
595 }
596
597 /* We have to remove fragment header from datagram and to relocate
598 * header in order to calculate ICV correctly. */
599 nhoff = fq->nhoffset;
b0e380b1 600 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
1ab1457c 601 memmove(head->head + sizeof(struct frag_hdr), head->head,
1da177e4 602 (head->data - head->head) - sizeof(struct frag_hdr));
b0e380b1
ACM
603 head->mac_header += sizeof(struct frag_hdr);
604 head->network_header += sizeof(struct frag_hdr);
1da177e4
LT
605
606 skb_shinfo(head)->frag_list = head->next;
badff6d0 607 skb_reset_transport_header(head);
d56f90a7 608 skb_push(head, head->data - skb_network_header(head));
7eb95156 609 atomic_sub(head->truesize, &ip6_frags.mem);
1da177e4
LT
610
611 for (fp=head->next; fp; fp = fp->next) {
612 head->data_len += fp->len;
613 head->len += fp->len;
614 if (head->ip_summed != fp->ip_summed)
615 head->ip_summed = CHECKSUM_NONE;
84fa7933 616 else if (head->ip_summed == CHECKSUM_COMPLETE)
1da177e4
LT
617 head->csum = csum_add(head->csum, fp->csum);
618 head->truesize += fp->truesize;
7eb95156 619 atomic_sub(fp->truesize, &ip6_frags.mem);
1da177e4
LT
620 }
621
622 head->next = NULL;
623 head->dev = dev;
5ab11c98 624 head->tstamp = fq->q.stamp;
0660e03f 625 ipv6_hdr(head)->payload_len = htons(payload_len);
951dbc8a 626 IP6CB(head)->nhoff = nhoff;
1da177e4 627
1da177e4 628 /* Yes, and fold redundant checksum back. 8) */
84fa7933 629 if (head->ip_summed == CHECKSUM_COMPLETE)
d56f90a7 630 head->csum = csum_partial(skb_network_header(head),
cfe1fc77 631 skb_network_header_len(head),
d56f90a7 632 head->csum);
1da177e4 633
a11d206d
YH
634 rcu_read_lock();
635 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
636 rcu_read_unlock();
5ab11c98 637 fq->q.fragments = NULL;
1da177e4
LT
638 return 1;
639
640out_oversize:
641 if (net_ratelimit())
642 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
643 goto out_fail;
644out_oom:
645 if (net_ratelimit())
646 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
647out_fail:
a11d206d
YH
648 rcu_read_lock();
649 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
650 rcu_read_unlock();
1da177e4
LT
651 return -1;
652}
653
951dbc8a 654static int ipv6_frag_rcv(struct sk_buff **skbp)
1da177e4 655{
1ab1457c 656 struct sk_buff *skb = *skbp;
1da177e4
LT
657 struct frag_hdr *fhdr;
658 struct frag_queue *fq;
0660e03f 659 struct ipv6hdr *hdr = ipv6_hdr(skb);
1da177e4 660
a11d206d 661 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
1da177e4
LT
662
663 /* Jumbo payload inhibits frag. header */
664 if (hdr->payload_len==0) {
a11d206d 665 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
cfe1fc77
ACM
666 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
667 skb_network_header_len(skb));
1da177e4
LT
668 return -1;
669 }
ea2ae17d
ACM
670 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
671 sizeof(struct frag_hdr)))) {
a11d206d 672 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
cfe1fc77
ACM
673 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
674 skb_network_header_len(skb));
1da177e4
LT
675 return -1;
676 }
677
0660e03f 678 hdr = ipv6_hdr(skb);
9c70220b 679 fhdr = (struct frag_hdr *)skb_transport_header(skb);
1da177e4
LT
680
681 if (!(fhdr->frag_off & htons(0xFFF9))) {
682 /* It is not a fragmented frame */
b0e380b1 683 skb->transport_header += sizeof(struct frag_hdr);
a11d206d 684 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
1da177e4 685
d56f90a7 686 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
1da177e4
LT
687 return 1;
688 }
689
04128f23 690 if (atomic_read(&ip6_frags.mem) > ip6_frags_ctl.high_thresh)
a11d206d 691 ip6_evictor(ip6_dst_idev(skb->dst));
1da177e4 692
a11d206d
YH
693 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
694 ip6_dst_idev(skb->dst))) != NULL) {
f61944ef 695 int ret;
1da177e4 696
5ab11c98 697 spin_lock(&fq->q.lock);
1da177e4 698
f61944ef 699 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
1da177e4 700
5ab11c98 701 spin_unlock(&fq->q.lock);
1da177e4
LT
702 fq_put(fq, NULL);
703 return ret;
704 }
705
a11d206d 706 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
1da177e4
LT
707 kfree_skb(skb);
708 return -1;
709}
710
711static struct inet6_protocol frag_protocol =
712{
713 .handler = ipv6_frag_rcv,
714 .flags = INET6_PROTO_NOPOLICY,
715};
716
717void __init ipv6_frag_init(void)
718{
719 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
720 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
721
04128f23 722 ip6_frags.ctl = &ip6_frags_ctl;
321a3a99 723 ip6_frags.hashfn = ip6_hashfn;
1e4b8287
PE
724 ip6_frags.destructor = ip6_frag_free;
725 ip6_frags.skb_free = NULL;
726 ip6_frags.qsize = sizeof(struct frag_queue);
7eb95156 727 inet_frags_init(&ip6_frags);
1da177e4 728}
This page took 0.342682 seconds and 5 git commands to generate.