Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * The IP fragmentation functionality. | |
e905a9ed | 7 | * |
1da177e4 | 8 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
113aa838 | 9 | * Alan Cox <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
10 | * |
11 | * Fixes: | |
12 | * Alan Cox : Split from ip.c , see ip_input.c for history. | |
13 | * David S. Miller : Begin massive cleanup... | |
14 | * Andi Kleen : Add sysctls. | |
15 | * xxxx : Overlapfrag bug. | |
16 | * Ultima : ip_expire() kernel panic. | |
17 | * Bill Hawes : Frag accounting and evictor fixes. | |
18 | * John McDonald : 0 length frag bug. | |
19 | * Alexey Kuznetsov: SMP races, threading, cleanup. | |
20 | * Patrick McHardy : LRU queue of frag heads for evictor. | |
21 | */ | |
22 | ||
89cee8b1 | 23 | #include <linux/compiler.h> |
1da177e4 LT |
24 | #include <linux/module.h> |
25 | #include <linux/types.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/jiffies.h> | |
28 | #include <linux/skbuff.h> | |
29 | #include <linux/list.h> | |
30 | #include <linux/ip.h> | |
31 | #include <linux/icmp.h> | |
32 | #include <linux/netdevice.h> | |
33 | #include <linux/jhash.h> | |
34 | #include <linux/random.h> | |
5a0e3ad6 | 35 | #include <linux/slab.h> |
e9017b55 SW |
36 | #include <net/route.h> |
37 | #include <net/dst.h> | |
1da177e4 LT |
38 | #include <net/sock.h> |
39 | #include <net/ip.h> | |
40 | #include <net/icmp.h> | |
41 | #include <net/checksum.h> | |
89cee8b1 | 42 | #include <net/inetpeer.h> |
5ab11c98 | 43 | #include <net/inet_frag.h> |
1da177e4 LT |
44 | #include <linux/tcp.h> |
45 | #include <linux/udp.h> | |
46 | #include <linux/inet.h> | |
47 | #include <linux/netfilter_ipv4.h> | |
48 | ||
49 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 | |
50 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c | |
51 | * as well. Or notify me, at least. --ANK | |
52 | */ | |
53 | ||
8d8354d2 | 54 | static int sysctl_ipfrag_max_dist __read_mostly = 64; |
89cee8b1 | 55 | |
1da177e4 LT |
56 | struct ipfrag_skb_cb |
57 | { | |
58 | struct inet_skb_parm h; | |
59 | int offset; | |
60 | }; | |
61 | ||
fd3f8c4c | 62 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
1da177e4 LT |
63 | |
64 | /* Describe an entry in the "incomplete datagrams" queue. */ | |
65 | struct ipq { | |
5ab11c98 PE |
66 | struct inet_frag_queue q; |
67 | ||
1da177e4 | 68 | u32 user; |
18277770 AV |
69 | __be32 saddr; |
70 | __be32 daddr; | |
71 | __be16 id; | |
1da177e4 | 72 | u8 protocol; |
89cee8b1 HX |
73 | int iif; |
74 | unsigned int rid; | |
75 | struct inet_peer *peer; | |
1da177e4 LT |
76 | }; |
77 | ||
7eb95156 | 78 | static struct inet_frags ip4_frags; |
1da177e4 | 79 | |
e5a2bb84 | 80 | int ip_frag_nqueues(struct net *net) |
7eb95156 | 81 | { |
e5a2bb84 | 82 | return net->ipv4.frags.nqueues; |
7eb95156 | 83 | } |
1da177e4 | 84 | |
6ddc0822 | 85 | int ip_frag_mem(struct net *net) |
7eb95156 | 86 | { |
6ddc0822 | 87 | return atomic_read(&net->ipv4.frags.mem); |
7eb95156 | 88 | } |
1da177e4 | 89 | |
1706d587 HX |
90 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
91 | struct net_device *dev); | |
92 | ||
c6fda282 PE |
93 | struct ip4_create_arg { |
94 | struct iphdr *iph; | |
95 | u32 user; | |
96 | }; | |
97 | ||
18277770 | 98 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
1da177e4 | 99 | { |
18277770 AV |
100 | return jhash_3words((__force u32)id << 16 | prot, |
101 | (__force u32)saddr, (__force u32)daddr, | |
7eb95156 | 102 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); |
1da177e4 LT |
103 | } |
104 | ||
321a3a99 | 105 | static unsigned int ip4_hashfn(struct inet_frag_queue *q) |
1da177e4 | 106 | { |
321a3a99 | 107 | struct ipq *ipq; |
1da177e4 | 108 | |
321a3a99 PE |
109 | ipq = container_of(q, struct ipq, q); |
110 | return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); | |
1da177e4 LT |
111 | } |
112 | ||
abd6523d PE |
113 | static int ip4_frag_match(struct inet_frag_queue *q, void *a) |
114 | { | |
115 | struct ipq *qp; | |
116 | struct ip4_create_arg *arg = a; | |
117 | ||
118 | qp = container_of(q, struct ipq, q); | |
119 | return (qp->id == arg->iph->id && | |
120 | qp->saddr == arg->iph->saddr && | |
121 | qp->daddr == arg->iph->daddr && | |
122 | qp->protocol == arg->iph->protocol && | |
123 | qp->user == arg->user); | |
124 | } | |
125 | ||
1da177e4 | 126 | /* Memory Tracking Functions. */ |
a95d8c88 | 127 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) |
1da177e4 | 128 | { |
6ddc0822 | 129 | atomic_sub(skb->truesize, &nf->mem); |
1da177e4 LT |
130 | kfree_skb(skb); |
131 | } | |
132 | ||
c6fda282 PE |
133 | static void ip4_frag_init(struct inet_frag_queue *q, void *a) |
134 | { | |
135 | struct ipq *qp = container_of(q, struct ipq, q); | |
136 | struct ip4_create_arg *arg = a; | |
137 | ||
138 | qp->protocol = arg->iph->protocol; | |
139 | qp->id = arg->iph->id; | |
140 | qp->saddr = arg->iph->saddr; | |
141 | qp->daddr = arg->iph->daddr; | |
142 | qp->user = arg->user; | |
143 | qp->peer = sysctl_ipfrag_max_dist ? | |
144 | inet_getpeer(arg->iph->saddr, 1) : NULL; | |
145 | } | |
146 | ||
1e4b8287 | 147 | static __inline__ void ip4_frag_free(struct inet_frag_queue *q) |
1da177e4 | 148 | { |
1e4b8287 PE |
149 | struct ipq *qp; |
150 | ||
151 | qp = container_of(q, struct ipq, q); | |
152 | if (qp->peer) | |
153 | inet_putpeer(qp->peer); | |
1da177e4 LT |
154 | } |
155 | ||
1da177e4 LT |
156 | |
157 | /* Destruction primitives. */ | |
158 | ||
4b6cb5d8 | 159 | static __inline__ void ipq_put(struct ipq *ipq) |
1da177e4 | 160 | { |
762cc408 | 161 | inet_frag_put(&ipq->q, &ip4_frags); |
1da177e4 LT |
162 | } |
163 | ||
164 | /* Kill ipq entry. It is not destroyed immediately, | |
165 | * because caller (and someone more) holds reference count. | |
166 | */ | |
167 | static void ipq_kill(struct ipq *ipq) | |
168 | { | |
277e650d | 169 | inet_frag_kill(&ipq->q, &ip4_frags); |
1da177e4 LT |
170 | } |
171 | ||
e905a9ed | 172 | /* Memory limiting on fragments. Evictor trashes the oldest |
1da177e4 LT |
173 | * fragment queue until we are back under the threshold. |
174 | */ | |
6ddc0822 | 175 | static void ip_evictor(struct net *net) |
1da177e4 | 176 | { |
8e7999c4 PE |
177 | int evicted; |
178 | ||
6ddc0822 | 179 | evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); |
8e7999c4 | 180 | if (evicted) |
c5346fe3 | 181 | IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); |
1da177e4 LT |
182 | } |
183 | ||
184 | /* | |
185 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. | |
186 | */ | |
187 | static void ip_expire(unsigned long arg) | |
188 | { | |
e521db9d | 189 | struct ipq *qp; |
84a3aa00 | 190 | struct net *net; |
e521db9d PE |
191 | |
192 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); | |
84a3aa00 | 193 | net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 194 | |
5ab11c98 | 195 | spin_lock(&qp->q.lock); |
1da177e4 | 196 | |
bc578a54 | 197 | if (qp->q.last_in & INET_FRAG_COMPLETE) |
1da177e4 LT |
198 | goto out; |
199 | ||
200 | ipq_kill(qp); | |
201 | ||
7c73a6fa PE |
202 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); |
203 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | |
1da177e4 | 204 | |
bc578a54 | 205 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
5ab11c98 | 206 | struct sk_buff *head = qp->q.fragments; |
cb84663e | 207 | |
69df9d59 ED |
208 | rcu_read_lock(); |
209 | head->dev = dev_get_by_index_rcu(net, qp->iif); | |
e9017b55 SW |
210 | if (!head->dev) |
211 | goto out_rcu_unlock; | |
212 | ||
213 | /* | |
214 | * Only search router table for the head fragment, | |
215 | * when defraging timeout at PRE_ROUTING HOOK. | |
216 | */ | |
217 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { | |
218 | const struct iphdr *iph = ip_hdr(head); | |
219 | int err = ip_route_input(head, iph->daddr, iph->saddr, | |
220 | iph->tos, head->dev); | |
221 | if (unlikely(err)) | |
222 | goto out_rcu_unlock; | |
223 | ||
224 | /* | |
225 | * Only an end host needs to send an ICMP | |
226 | * "Fragment Reassembly Timeout" message, per RFC792. | |
227 | */ | |
228 | if (skb_rtable(head)->rt_type != RTN_LOCAL) | |
229 | goto out_rcu_unlock; | |
230 | ||
231 | } | |
232 | ||
233 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | |
234 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | |
e9017b55 | 235 | out_rcu_unlock: |
d1c9ae6d PM |
236 | rcu_read_unlock(); |
237 | } | |
1da177e4 | 238 | out: |
5ab11c98 | 239 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 240 | ipq_put(qp); |
1da177e4 LT |
241 | } |
242 | ||
abd6523d PE |
243 | /* Find the correct entry in the "incomplete datagrams" queue for |
244 | * this IP datagram, and create new one, if nothing is found. | |
245 | */ | |
ac18e750 | 246 | static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) |
1da177e4 | 247 | { |
c6fda282 PE |
248 | struct inet_frag_queue *q; |
249 | struct ip4_create_arg arg; | |
abd6523d | 250 | unsigned int hash; |
1da177e4 | 251 | |
c6fda282 PE |
252 | arg.iph = iph; |
253 | arg.user = user; | |
9a375803 PE |
254 | |
255 | read_lock(&ip4_frags.lock); | |
abd6523d | 256 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
1da177e4 | 257 | |
ac18e750 | 258 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
c6fda282 PE |
259 | if (q == NULL) |
260 | goto out_nomem; | |
1da177e4 | 261 | |
c6fda282 | 262 | return container_of(q, struct ipq, q); |
1da177e4 LT |
263 | |
264 | out_nomem: | |
64ce2073 | 265 | LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); |
1da177e4 LT |
266 | return NULL; |
267 | } | |
268 | ||
89cee8b1 HX |
269 | /* Is the fragment too far ahead to be part of ipq? */ |
270 | static inline int ip_frag_too_far(struct ipq *qp) | |
271 | { | |
272 | struct inet_peer *peer = qp->peer; | |
273 | unsigned int max = sysctl_ipfrag_max_dist; | |
274 | unsigned int start, end; | |
275 | ||
276 | int rc; | |
277 | ||
278 | if (!peer || !max) | |
279 | return 0; | |
280 | ||
281 | start = qp->rid; | |
282 | end = atomic_inc_return(&peer->rid); | |
283 | qp->rid = end; | |
284 | ||
5ab11c98 | 285 | rc = qp->q.fragments && (end - start) > max; |
89cee8b1 HX |
286 | |
287 | if (rc) { | |
7c73a6fa PE |
288 | struct net *net; |
289 | ||
290 | net = container_of(qp->q.net, struct net, ipv4.frags); | |
291 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | |
89cee8b1 HX |
292 | } |
293 | ||
294 | return rc; | |
295 | } | |
296 | ||
297 | static int ip_frag_reinit(struct ipq *qp) | |
298 | { | |
299 | struct sk_buff *fp; | |
300 | ||
b2fd5321 | 301 | if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { |
5ab11c98 | 302 | atomic_inc(&qp->q.refcnt); |
89cee8b1 HX |
303 | return -ETIMEDOUT; |
304 | } | |
305 | ||
5ab11c98 | 306 | fp = qp->q.fragments; |
89cee8b1 HX |
307 | do { |
308 | struct sk_buff *xp = fp->next; | |
a95d8c88 | 309 | frag_kfree_skb(qp->q.net, fp); |
89cee8b1 HX |
310 | fp = xp; |
311 | } while (fp); | |
312 | ||
5ab11c98 PE |
313 | qp->q.last_in = 0; |
314 | qp->q.len = 0; | |
315 | qp->q.meat = 0; | |
316 | qp->q.fragments = NULL; | |
89cee8b1 HX |
317 | qp->iif = 0; |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
1da177e4 | 322 | /* Add new segment to existing queue. */ |
1706d587 | 323 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
1da177e4 LT |
324 | { |
325 | struct sk_buff *prev, *next; | |
1706d587 | 326 | struct net_device *dev; |
1da177e4 LT |
327 | int flags, offset; |
328 | int ihl, end; | |
1706d587 | 329 | int err = -ENOENT; |
1da177e4 | 330 | |
bc578a54 | 331 | if (qp->q.last_in & INET_FRAG_COMPLETE) |
1da177e4 LT |
332 | goto err; |
333 | ||
89cee8b1 | 334 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
1706d587 HX |
335 | unlikely(ip_frag_too_far(qp)) && |
336 | unlikely(err = ip_frag_reinit(qp))) { | |
89cee8b1 HX |
337 | ipq_kill(qp); |
338 | goto err; | |
339 | } | |
340 | ||
eddc9ec5 | 341 | offset = ntohs(ip_hdr(skb)->frag_off); |
1da177e4 LT |
342 | flags = offset & ~IP_OFFSET; |
343 | offset &= IP_OFFSET; | |
344 | offset <<= 3; /* offset is in 8-byte chunks */ | |
c9bdd4b5 | 345 | ihl = ip_hdrlen(skb); |
1da177e4 LT |
346 | |
347 | /* Determine the position of this fragment. */ | |
e905a9ed | 348 | end = offset + skb->len - ihl; |
1706d587 | 349 | err = -EINVAL; |
1da177e4 LT |
350 | |
351 | /* Is this the final fragment? */ | |
352 | if ((flags & IP_MF) == 0) { | |
353 | /* If we already have some bits beyond end | |
354 | * or have different end, the segment is corrrupted. | |
355 | */ | |
5ab11c98 | 356 | if (end < qp->q.len || |
bc578a54 | 357 | ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len)) |
1da177e4 | 358 | goto err; |
bc578a54 | 359 | qp->q.last_in |= INET_FRAG_LAST_IN; |
5ab11c98 | 360 | qp->q.len = end; |
1da177e4 LT |
361 | } else { |
362 | if (end&7) { | |
363 | end &= ~7; | |
364 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
365 | skb->ip_summed = CHECKSUM_NONE; | |
366 | } | |
5ab11c98 | 367 | if (end > qp->q.len) { |
1da177e4 | 368 | /* Some bits beyond end -> corruption. */ |
bc578a54 | 369 | if (qp->q.last_in & INET_FRAG_LAST_IN) |
1da177e4 | 370 | goto err; |
5ab11c98 | 371 | qp->q.len = end; |
1da177e4 LT |
372 | } |
373 | } | |
374 | if (end == offset) | |
375 | goto err; | |
376 | ||
1706d587 | 377 | err = -ENOMEM; |
1da177e4 LT |
378 | if (pskb_pull(skb, ihl) == NULL) |
379 | goto err; | |
1706d587 HX |
380 | |
381 | err = pskb_trim_rcsum(skb, end - offset); | |
382 | if (err) | |
1da177e4 LT |
383 | goto err; |
384 | ||
385 | /* Find out which fragments are in front and at the back of us | |
386 | * in the chain of fragments so far. We must know where to put | |
387 | * this fragment, right? | |
388 | */ | |
389 | prev = NULL; | |
5ab11c98 | 390 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
1da177e4 LT |
391 | if (FRAG_CB(next)->offset >= offset) |
392 | break; /* bingo! */ | |
393 | prev = next; | |
394 | } | |
395 | ||
396 | /* We found where to put this one. Check for overlap with | |
397 | * preceding fragment, and, if needed, align things so that | |
398 | * any overlaps are eliminated. | |
399 | */ | |
400 | if (prev) { | |
401 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; | |
402 | ||
403 | if (i > 0) { | |
404 | offset += i; | |
1706d587 | 405 | err = -EINVAL; |
1da177e4 LT |
406 | if (end <= offset) |
407 | goto err; | |
1706d587 | 408 | err = -ENOMEM; |
1da177e4 LT |
409 | if (!pskb_pull(skb, i)) |
410 | goto err; | |
411 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
412 | skb->ip_summed = CHECKSUM_NONE; | |
413 | } | |
414 | } | |
415 | ||
1706d587 HX |
416 | err = -ENOMEM; |
417 | ||
1da177e4 LT |
418 | while (next && FRAG_CB(next)->offset < end) { |
419 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ | |
420 | ||
421 | if (i < next->len) { | |
422 | /* Eat head of the next overlapped fragment | |
423 | * and leave the loop. The next ones cannot overlap. | |
424 | */ | |
425 | if (!pskb_pull(next, i)) | |
426 | goto err; | |
427 | FRAG_CB(next)->offset += i; | |
5ab11c98 | 428 | qp->q.meat -= i; |
1da177e4 LT |
429 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
430 | next->ip_summed = CHECKSUM_NONE; | |
431 | break; | |
432 | } else { | |
433 | struct sk_buff *free_it = next; | |
434 | ||
47c6bf77 | 435 | /* Old fragment is completely overridden with |
1da177e4 LT |
436 | * new one drop it. |
437 | */ | |
438 | next = next->next; | |
439 | ||
440 | if (prev) | |
441 | prev->next = next; | |
442 | else | |
5ab11c98 | 443 | qp->q.fragments = next; |
1da177e4 | 444 | |
5ab11c98 | 445 | qp->q.meat -= free_it->len; |
a95d8c88 | 446 | frag_kfree_skb(qp->q.net, free_it); |
1da177e4 LT |
447 | } |
448 | } | |
449 | ||
450 | FRAG_CB(skb)->offset = offset; | |
451 | ||
452 | /* Insert this fragment in the chain of fragments. */ | |
453 | skb->next = next; | |
454 | if (prev) | |
455 | prev->next = skb; | |
456 | else | |
5ab11c98 | 457 | qp->q.fragments = skb; |
1da177e4 | 458 | |
1706d587 HX |
459 | dev = skb->dev; |
460 | if (dev) { | |
461 | qp->iif = dev->ifindex; | |
462 | skb->dev = NULL; | |
463 | } | |
5ab11c98 PE |
464 | qp->q.stamp = skb->tstamp; |
465 | qp->q.meat += skb->len; | |
6ddc0822 | 466 | atomic_add(skb->truesize, &qp->q.net->mem); |
1da177e4 | 467 | if (offset == 0) |
bc578a54 | 468 | qp->q.last_in |= INET_FRAG_FIRST_IN; |
1da177e4 | 469 | |
bc578a54 JP |
470 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
471 | qp->q.meat == qp->q.len) | |
1706d587 HX |
472 | return ip_frag_reasm(qp, prev, dev); |
473 | ||
7eb95156 | 474 | write_lock(&ip4_frags.lock); |
3140c25c | 475 | list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); |
7eb95156 | 476 | write_unlock(&ip4_frags.lock); |
1706d587 | 477 | return -EINPROGRESS; |
1da177e4 LT |
478 | |
479 | err: | |
480 | kfree_skb(skb); | |
1706d587 | 481 | return err; |
1da177e4 LT |
482 | } |
483 | ||
484 | ||
485 | /* Build a new IP datagram from all its fragments. */ | |
486 | ||
1706d587 HX |
487 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
488 | struct net_device *dev) | |
1da177e4 | 489 | { |
2bad35b7 | 490 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 491 | struct iphdr *iph; |
5ab11c98 | 492 | struct sk_buff *fp, *head = qp->q.fragments; |
1da177e4 LT |
493 | int len; |
494 | int ihlen; | |
1706d587 | 495 | int err; |
1da177e4 LT |
496 | |
497 | ipq_kill(qp); | |
498 | ||
1706d587 HX |
499 | /* Make the one we just received the head. */ |
500 | if (prev) { | |
501 | head = prev->next; | |
502 | fp = skb_clone(head, GFP_ATOMIC); | |
1706d587 HX |
503 | if (!fp) |
504 | goto out_nomem; | |
505 | ||
506 | fp->next = head->next; | |
507 | prev->next = fp; | |
508 | ||
5ab11c98 PE |
509 | skb_morph(head, qp->q.fragments); |
510 | head->next = qp->q.fragments->next; | |
1706d587 | 511 | |
5ab11c98 PE |
512 | kfree_skb(qp->q.fragments); |
513 | qp->q.fragments = head; | |
1706d587 HX |
514 | } |
515 | ||
547b792c IJ |
516 | WARN_ON(head == NULL); |
517 | WARN_ON(FRAG_CB(head)->offset != 0); | |
1da177e4 LT |
518 | |
519 | /* Allocate a new buffer for the datagram. */ | |
c9bdd4b5 | 520 | ihlen = ip_hdrlen(head); |
5ab11c98 | 521 | len = ihlen + qp->q.len; |
1da177e4 | 522 | |
1706d587 | 523 | err = -E2BIG; |
132adf54 | 524 | if (len > 65535) |
1da177e4 LT |
525 | goto out_oversize; |
526 | ||
527 | /* Head of list must not be cloned. */ | |
528 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) | |
529 | goto out_nomem; | |
530 | ||
531 | /* If the first fragment is fragmented itself, we split | |
532 | * it to two chunks: the first with data and paged part | |
533 | * and the second, holding only fragments. */ | |
d7fcf1a5 | 534 | if (skb_has_frags(head)) { |
1da177e4 LT |
535 | struct sk_buff *clone; |
536 | int i, plen = 0; | |
537 | ||
538 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | |
539 | goto out_nomem; | |
540 | clone->next = head->next; | |
541 | head->next = clone; | |
542 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
d7fcf1a5 | 543 | skb_frag_list_init(head); |
1da177e4 LT |
544 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
545 | plen += skb_shinfo(head)->frags[i].size; | |
546 | clone->len = clone->data_len = head->data_len - plen; | |
547 | head->data_len -= clone->len; | |
548 | head->len -= clone->len; | |
549 | clone->csum = 0; | |
550 | clone->ip_summed = head->ip_summed; | |
6ddc0822 | 551 | atomic_add(clone->truesize, &qp->q.net->mem); |
1da177e4 LT |
552 | } |
553 | ||
554 | skb_shinfo(head)->frag_list = head->next; | |
d56f90a7 | 555 | skb_push(head, head->data - skb_network_header(head)); |
1da177e4 LT |
556 | |
557 | for (fp=head->next; fp; fp = fp->next) { | |
558 | head->data_len += fp->len; | |
559 | head->len += fp->len; | |
560 | if (head->ip_summed != fp->ip_summed) | |
561 | head->ip_summed = CHECKSUM_NONE; | |
84fa7933 | 562 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
1da177e4 LT |
563 | head->csum = csum_add(head->csum, fp->csum); |
564 | head->truesize += fp->truesize; | |
1da177e4 | 565 | } |
d27f9b35 | 566 | atomic_sub(head->truesize, &qp->q.net->mem); |
1da177e4 LT |
567 | |
568 | head->next = NULL; | |
569 | head->dev = dev; | |
5ab11c98 | 570 | head->tstamp = qp->q.stamp; |
1da177e4 | 571 | |
eddc9ec5 | 572 | iph = ip_hdr(head); |
1da177e4 LT |
573 | iph->frag_off = 0; |
574 | iph->tot_len = htons(len); | |
2bad35b7 | 575 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
5ab11c98 | 576 | qp->q.fragments = NULL; |
1706d587 | 577 | return 0; |
1da177e4 LT |
578 | |
579 | out_nomem: | |
e905a9ed | 580 | LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " |
64ce2073 | 581 | "queue %p\n", qp); |
45542479 | 582 | err = -ENOMEM; |
1da177e4 LT |
583 | goto out_fail; |
584 | out_oversize: | |
585 | if (net_ratelimit()) | |
673d57e7 HH |
586 | printk(KERN_INFO "Oversized IP packet from %pI4.\n", |
587 | &qp->saddr); | |
1da177e4 | 588 | out_fail: |
bbf31bf1 | 589 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1706d587 | 590 | return err; |
1da177e4 LT |
591 | } |
592 | ||
593 | /* Process an incoming IP datagram fragment. */ | |
776c729e | 594 | int ip_defrag(struct sk_buff *skb, u32 user) |
1da177e4 | 595 | { |
1da177e4 | 596 | struct ipq *qp; |
ac18e750 | 597 | struct net *net; |
e905a9ed | 598 | |
adf30907 | 599 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); |
7c73a6fa | 600 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
1da177e4 LT |
601 | |
602 | /* Start by cleaning up the memory. */ | |
e31e0bdc | 603 | if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) |
6ddc0822 | 604 | ip_evictor(net); |
1da177e4 | 605 | |
1da177e4 | 606 | /* Lookup (or create) queue header */ |
ac18e750 | 607 | if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { |
1706d587 | 608 | int ret; |
1da177e4 | 609 | |
5ab11c98 | 610 | spin_lock(&qp->q.lock); |
1da177e4 | 611 | |
1706d587 | 612 | ret = ip_frag_queue(qp, skb); |
1da177e4 | 613 | |
5ab11c98 | 614 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 615 | ipq_put(qp); |
776c729e | 616 | return ret; |
1da177e4 LT |
617 | } |
618 | ||
7c73a6fa | 619 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1da177e4 | 620 | kfree_skb(skb); |
776c729e | 621 | return -ENOMEM; |
1da177e4 LT |
622 | } |
623 | ||
8d8354d2 PE |
624 | #ifdef CONFIG_SYSCTL |
625 | static int zero; | |
626 | ||
0a64b4b8 | 627 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
8d8354d2 | 628 | { |
8d8354d2 | 629 | .procname = "ipfrag_high_thresh", |
e31e0bdc | 630 | .data = &init_net.ipv4.frags.high_thresh, |
8d8354d2 PE |
631 | .maxlen = sizeof(int), |
632 | .mode = 0644, | |
6d9f239a | 633 | .proc_handler = proc_dointvec |
8d8354d2 PE |
634 | }, |
635 | { | |
8d8354d2 | 636 | .procname = "ipfrag_low_thresh", |
e31e0bdc | 637 | .data = &init_net.ipv4.frags.low_thresh, |
8d8354d2 PE |
638 | .maxlen = sizeof(int), |
639 | .mode = 0644, | |
6d9f239a | 640 | .proc_handler = proc_dointvec |
8d8354d2 PE |
641 | }, |
642 | { | |
8d8354d2 | 643 | .procname = "ipfrag_time", |
b2fd5321 | 644 | .data = &init_net.ipv4.frags.timeout, |
8d8354d2 PE |
645 | .maxlen = sizeof(int), |
646 | .mode = 0644, | |
6d9f239a | 647 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 | 648 | }, |
7d291ebb PE |
649 | { } |
650 | }; | |
651 | ||
652 | static struct ctl_table ip4_frags_ctl_table[] = { | |
8d8354d2 | 653 | { |
8d8354d2 | 654 | .procname = "ipfrag_secret_interval", |
3b4bc4a2 | 655 | .data = &ip4_frags.secret_interval, |
8d8354d2 PE |
656 | .maxlen = sizeof(int), |
657 | .mode = 0644, | |
6d9f239a | 658 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 PE |
659 | }, |
660 | { | |
661 | .procname = "ipfrag_max_dist", | |
662 | .data = &sysctl_ipfrag_max_dist, | |
663 | .maxlen = sizeof(int), | |
664 | .mode = 0644, | |
6d9f239a | 665 | .proc_handler = proc_dointvec_minmax, |
8d8354d2 PE |
666 | .extra1 = &zero |
667 | }, | |
668 | { } | |
669 | }; | |
670 | ||
2c8c1e72 | 671 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 | 672 | { |
e4a2d5c2 | 673 | struct ctl_table *table; |
8d8354d2 PE |
674 | struct ctl_table_header *hdr; |
675 | ||
0a64b4b8 | 676 | table = ip4_frags_ns_ctl_table; |
09ad9bc7 | 677 | if (!net_eq(net, &init_net)) { |
0a64b4b8 | 678 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
e4a2d5c2 PE |
679 | if (table == NULL) |
680 | goto err_alloc; | |
681 | ||
e31e0bdc PE |
682 | table[0].data = &net->ipv4.frags.high_thresh; |
683 | table[1].data = &net->ipv4.frags.low_thresh; | |
b2fd5321 | 684 | table[2].data = &net->ipv4.frags.timeout; |
e4a2d5c2 PE |
685 | } |
686 | ||
687 | hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); | |
688 | if (hdr == NULL) | |
689 | goto err_reg; | |
690 | ||
691 | net->ipv4.frags_hdr = hdr; | |
692 | return 0; | |
693 | ||
694 | err_reg: | |
09ad9bc7 | 695 | if (!net_eq(net, &init_net)) |
e4a2d5c2 PE |
696 | kfree(table); |
697 | err_alloc: | |
698 | return -ENOMEM; | |
699 | } | |
700 | ||
2c8c1e72 | 701 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
702 | { |
703 | struct ctl_table *table; | |
704 | ||
705 | table = net->ipv4.frags_hdr->ctl_table_arg; | |
706 | unregister_net_sysctl_table(net->ipv4.frags_hdr); | |
707 | kfree(table); | |
8d8354d2 | 708 | } |
7d291ebb PE |
709 | |
710 | static void ip4_frags_ctl_register(void) | |
711 | { | |
712 | register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); | |
713 | } | |
8d8354d2 | 714 | #else |
0a64b4b8 | 715 | static inline int ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 PE |
716 | { |
717 | return 0; | |
718 | } | |
e4a2d5c2 | 719 | |
0a64b4b8 | 720 | static inline void ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
721 | { |
722 | } | |
7d291ebb PE |
723 | |
724 | static inline void ip4_frags_ctl_register(void) | |
725 | { | |
726 | } | |
8d8354d2 PE |
727 | #endif |
728 | ||
2c8c1e72 | 729 | static int __net_init ipv4_frags_init_net(struct net *net) |
8d8354d2 | 730 | { |
e31e0bdc PE |
731 | /* |
732 | * Fragment cache limits. We will commit 256K at one time. Should we | |
733 | * cross that limit we will prune down to 192K. This should cope with | |
734 | * even the most extreme cases without allowing an attacker to | |
735 | * measurably harm machine performance. | |
736 | */ | |
737 | net->ipv4.frags.high_thresh = 256 * 1024; | |
738 | net->ipv4.frags.low_thresh = 192 * 1024; | |
b2fd5321 PE |
739 | /* |
740 | * Important NOTE! Fragment queue must be destroyed before MSL expires. | |
741 | * RFC791 is wrong proposing to prolongate timer each fragment arrival | |
742 | * by TTL. | |
743 | */ | |
744 | net->ipv4.frags.timeout = IP_FRAG_TIME; | |
745 | ||
e5a2bb84 PE |
746 | inet_frags_init_net(&net->ipv4.frags); |
747 | ||
0a64b4b8 | 748 | return ip4_frags_ns_ctl_register(net); |
8d8354d2 PE |
749 | } |
750 | ||
2c8c1e72 | 751 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
81566e83 | 752 | { |
0a64b4b8 | 753 | ip4_frags_ns_ctl_unregister(net); |
81566e83 PE |
754 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
755 | } | |
756 | ||
757 | static struct pernet_operations ip4_frags_ops = { | |
758 | .init = ipv4_frags_init_net, | |
759 | .exit = ipv4_frags_exit_net, | |
760 | }; | |
761 | ||
b7aa0bf7 | 762 | void __init ipfrag_init(void) |
1da177e4 | 763 | { |
7d291ebb | 764 | ip4_frags_ctl_register(); |
81566e83 | 765 | register_pernet_subsys(&ip4_frags_ops); |
321a3a99 | 766 | ip4_frags.hashfn = ip4_hashfn; |
c6fda282 | 767 | ip4_frags.constructor = ip4_frag_init; |
1e4b8287 PE |
768 | ip4_frags.destructor = ip4_frag_free; |
769 | ip4_frags.skb_free = NULL; | |
770 | ip4_frags.qsize = sizeof(struct ipq); | |
abd6523d | 771 | ip4_frags.match = ip4_frag_match; |
e521db9d | 772 | ip4_frags.frag_expire = ip_expire; |
3b4bc4a2 | 773 | ip4_frags.secret_interval = 10 * 60 * HZ; |
7eb95156 | 774 | inet_frags_init(&ip4_frags); |
1da177e4 LT |
775 | } |
776 | ||
777 | EXPORT_SYMBOL(ip_defrag); |