Commit | Line | Data |
---|---|---|
7240cdec AA |
1 | /* 6LoWPAN fragment reassembly |
2 | * | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Aring <aar@pengutronix.de> | |
6 | * | |
7 | * Based on: net/ipv6/reassembly.c | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) "6LoWPAN: " fmt | |
16 | ||
17 | #include <linux/net.h> | |
18 | #include <linux/list.h> | |
19 | #include <linux/netdevice.h> | |
20 | #include <linux/random.h> | |
21 | #include <linux/jhash.h> | |
22 | #include <linux/skbuff.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/export.h> | |
25 | ||
26 | #include <net/ieee802154_netdev.h> | |
cefc8c8a | 27 | #include <net/6lowpan.h> |
7240cdec AA |
28 | #include <net/ipv6.h> |
29 | #include <net/inet_frag.h> | |
30 | ||
7240cdec AA |
31 | #include "reassembly.h" |
32 | ||
33 | static struct inet_frags lowpan_frags; | |
34 | ||
35 | static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, | |
36 | struct sk_buff *prev, struct net_device *dev); | |
37 | ||
4c7f778e | 38 | static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size, |
ae531b94 PB |
39 | const struct ieee802154_addr *saddr, |
40 | const struct ieee802154_addr *daddr) | |
7240cdec AA |
41 | { |
42 | u32 c; | |
43 | ||
44 | net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd)); | |
45 | c = jhash_3words(ieee802154_addr_hash(saddr), | |
46 | ieee802154_addr_hash(daddr), | |
47 | (__force u32)(tag + (d_size << 16)), | |
48 | lowpan_frags.rnd); | |
49 | ||
50 | return c & (INETFRAGS_HASHSZ - 1); | |
51 | } | |
52 | ||
53 | static unsigned int lowpan_hashfn(struct inet_frag_queue *q) | |
54 | { | |
55 | struct lowpan_frag_queue *fq; | |
56 | ||
57 | fq = container_of(q, struct lowpan_frag_queue, q); | |
58 | return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr); | |
59 | } | |
60 | ||
17794326 | 61 | static bool lowpan_frag_match(struct inet_frag_queue *q, void *a) |
7240cdec AA |
62 | { |
63 | struct lowpan_frag_queue *fq; | |
64 | struct lowpan_create_arg *arg = a; | |
65 | ||
66 | fq = container_of(q, struct lowpan_frag_queue, q); | |
67 | return fq->tag == arg->tag && fq->d_size == arg->d_size && | |
ae531b94 PB |
68 | ieee802154_addr_equal(&fq->saddr, arg->src) && |
69 | ieee802154_addr_equal(&fq->daddr, arg->dst); | |
7240cdec | 70 | } |
7240cdec | 71 | |
17794326 | 72 | static void lowpan_frag_init(struct inet_frag_queue *q, void *a) |
7240cdec AA |
73 | { |
74 | struct lowpan_frag_queue *fq; | |
75 | struct lowpan_create_arg *arg = a; | |
76 | ||
77 | fq = container_of(q, struct lowpan_frag_queue, q); | |
78 | ||
79 | fq->tag = arg->tag; | |
80 | fq->d_size = arg->d_size; | |
81 | fq->saddr = *arg->src; | |
82 | fq->daddr = *arg->dst; | |
83 | } | |
7240cdec AA |
84 | |
85 | static void lowpan_frag_expire(unsigned long data) | |
86 | { | |
87 | struct frag_queue *fq; | |
88 | struct net *net; | |
89 | ||
90 | fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); | |
91 | net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); | |
92 | ||
17794326 FW |
93 | spin_lock(&fq->q.lock); |
94 | ||
95 | if (fq->q.last_in & INET_FRAG_COMPLETE) | |
96 | goto out; | |
97 | ||
98 | inet_frag_kill(&fq->q, &lowpan_frags); | |
99 | out: | |
100 | spin_unlock(&fq->q.lock); | |
101 | inet_frag_put(&fq->q, &lowpan_frags); | |
7240cdec AA |
102 | } |
103 | ||
104 | static inline struct lowpan_frag_queue * | |
105 | fq_find(struct net *net, const struct ieee802154_frag_info *frag_info, | |
ae531b94 PB |
106 | const struct ieee802154_addr *src, |
107 | const struct ieee802154_addr *dst) | |
7240cdec AA |
108 | { |
109 | struct inet_frag_queue *q; | |
110 | struct lowpan_create_arg arg; | |
111 | unsigned int hash; | |
112 | ||
113 | arg.tag = frag_info->d_tag; | |
114 | arg.d_size = frag_info->d_size; | |
115 | arg.src = src; | |
116 | arg.dst = dst; | |
117 | ||
118 | read_lock(&lowpan_frags.lock); | |
119 | hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst); | |
120 | ||
121 | q = inet_frag_find(&net->ieee802154_lowpan.frags, | |
122 | &lowpan_frags, &arg, hash); | |
123 | if (IS_ERR_OR_NULL(q)) { | |
124 | inet_frag_maybe_warn_overflow(q, pr_fmt()); | |
125 | return NULL; | |
126 | } | |
127 | return container_of(q, struct lowpan_frag_queue, q); | |
128 | } | |
129 | ||
130 | static int lowpan_frag_queue(struct lowpan_frag_queue *fq, | |
131 | struct sk_buff *skb, const u8 frag_type) | |
132 | { | |
133 | struct sk_buff *prev, *next; | |
134 | struct net_device *dev; | |
135 | int end, offset; | |
136 | ||
137 | if (fq->q.last_in & INET_FRAG_COMPLETE) | |
138 | goto err; | |
139 | ||
140 | offset = mac_cb(skb)->frag_info.d_offset << 3; | |
141 | end = mac_cb(skb)->frag_info.d_size; | |
142 | ||
143 | /* Is this the final fragment? */ | |
144 | if (offset + skb->len == end) { | |
145 | /* If we already have some bits beyond end | |
146 | * or have different end, the segment is corrupted. | |
147 | */ | |
148 | if (end < fq->q.len || | |
149 | ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) | |
150 | goto err; | |
151 | fq->q.last_in |= INET_FRAG_LAST_IN; | |
152 | fq->q.len = end; | |
153 | } else { | |
154 | if (end > fq->q.len) { | |
155 | /* Some bits beyond end -> corruption. */ | |
156 | if (fq->q.last_in & INET_FRAG_LAST_IN) | |
157 | goto err; | |
158 | fq->q.len = end; | |
159 | } | |
160 | } | |
161 | ||
162 | /* Find out which fragments are in front and at the back of us | |
163 | * in the chain of fragments so far. We must know where to put | |
164 | * this fragment, right? | |
165 | */ | |
166 | prev = fq->q.fragments_tail; | |
167 | if (!prev || mac_cb(prev)->frag_info.d_offset < | |
168 | mac_cb(skb)->frag_info.d_offset) { | |
169 | next = NULL; | |
170 | goto found; | |
171 | } | |
172 | prev = NULL; | |
173 | for (next = fq->q.fragments; next != NULL; next = next->next) { | |
174 | if (mac_cb(next)->frag_info.d_offset >= | |
175 | mac_cb(skb)->frag_info.d_offset) | |
176 | break; /* bingo! */ | |
177 | prev = next; | |
178 | } | |
179 | ||
180 | found: | |
181 | /* Insert this fragment in the chain of fragments. */ | |
182 | skb->next = next; | |
183 | if (!next) | |
184 | fq->q.fragments_tail = skb; | |
185 | if (prev) | |
186 | prev->next = skb; | |
187 | else | |
188 | fq->q.fragments = skb; | |
189 | ||
190 | dev = skb->dev; | |
191 | if (dev) | |
192 | skb->dev = NULL; | |
193 | ||
194 | fq->q.stamp = skb->tstamp; | |
195 | if (frag_type == LOWPAN_DISPATCH_FRAG1) { | |
196 | /* Calculate uncomp. 6lowpan header to estimate full size */ | |
197 | fq->q.meat += lowpan_uncompress_size(skb, NULL); | |
198 | fq->q.last_in |= INET_FRAG_FIRST_IN; | |
199 | } else { | |
200 | fq->q.meat += skb->len; | |
201 | } | |
202 | add_frag_mem_limit(&fq->q, skb->truesize); | |
203 | ||
204 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && | |
205 | fq->q.meat == fq->q.len) { | |
206 | int res; | |
207 | unsigned long orefdst = skb->_skb_refdst; | |
208 | ||
209 | skb->_skb_refdst = 0UL; | |
210 | res = lowpan_frag_reasm(fq, prev, dev); | |
211 | skb->_skb_refdst = orefdst; | |
212 | return res; | |
213 | } | |
214 | ||
215 | inet_frag_lru_move(&fq->q); | |
216 | return -1; | |
217 | err: | |
218 | kfree_skb(skb); | |
219 | return -1; | |
220 | } | |
221 | ||
222 | /* Check if this packet is complete. | |
223 | * Returns NULL on failure by any reason, and pointer | |
224 | * to current nexthdr field in reassembled frame. | |
225 | * | |
226 | * It is called with locked fq, and caller must check that | |
227 | * queue is eligible for reassembly i.e. it is not COMPLETE, | |
228 | * the last and the first frames arrived and all the bits are here. | |
229 | */ | |
230 | static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, | |
231 | struct net_device *dev) | |
232 | { | |
233 | struct sk_buff *fp, *head = fq->q.fragments; | |
234 | int sum_truesize; | |
235 | ||
236 | inet_frag_kill(&fq->q, &lowpan_frags); | |
237 | ||
238 | /* Make the one we just received the head. */ | |
239 | if (prev) { | |
240 | head = prev->next; | |
241 | fp = skb_clone(head, GFP_ATOMIC); | |
242 | ||
243 | if (!fp) | |
244 | goto out_oom; | |
245 | ||
246 | fp->next = head->next; | |
247 | if (!fp->next) | |
248 | fq->q.fragments_tail = fp; | |
249 | prev->next = fp; | |
250 | ||
251 | skb_morph(head, fq->q.fragments); | |
252 | head->next = fq->q.fragments->next; | |
253 | ||
254 | consume_skb(fq->q.fragments); | |
255 | fq->q.fragments = head; | |
256 | } | |
257 | ||
258 | /* Head of list must not be cloned. */ | |
259 | if (skb_unclone(head, GFP_ATOMIC)) | |
260 | goto out_oom; | |
261 | ||
262 | /* If the first fragment is fragmented itself, we split | |
263 | * it to two chunks: the first with data and paged part | |
264 | * and the second, holding only fragments. | |
265 | */ | |
266 | if (skb_has_frag_list(head)) { | |
267 | struct sk_buff *clone; | |
268 | int i, plen = 0; | |
269 | ||
270 | clone = alloc_skb(0, GFP_ATOMIC); | |
271 | if (!clone) | |
272 | goto out_oom; | |
273 | clone->next = head->next; | |
274 | head->next = clone; | |
275 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
276 | skb_frag_list_init(head); | |
277 | for (i = 0; i < skb_shinfo(head)->nr_frags; i++) | |
278 | plen += skb_frag_size(&skb_shinfo(head)->frags[i]); | |
279 | clone->len = head->data_len - plen; | |
280 | clone->data_len = clone->len; | |
281 | head->data_len -= clone->len; | |
282 | head->len -= clone->len; | |
283 | add_frag_mem_limit(&fq->q, clone->truesize); | |
284 | } | |
285 | ||
286 | WARN_ON(head == NULL); | |
287 | ||
288 | sum_truesize = head->truesize; | |
289 | for (fp = head->next; fp;) { | |
290 | bool headstolen; | |
291 | int delta; | |
292 | struct sk_buff *next = fp->next; | |
293 | ||
294 | sum_truesize += fp->truesize; | |
295 | if (skb_try_coalesce(head, fp, &headstolen, &delta)) { | |
296 | kfree_skb_partial(fp, headstolen); | |
297 | } else { | |
298 | if (!skb_shinfo(head)->frag_list) | |
299 | skb_shinfo(head)->frag_list = fp; | |
300 | head->data_len += fp->len; | |
301 | head->len += fp->len; | |
302 | head->truesize += fp->truesize; | |
303 | } | |
304 | fp = next; | |
305 | } | |
306 | sub_frag_mem_limit(&fq->q, sum_truesize); | |
307 | ||
308 | head->next = NULL; | |
309 | head->dev = dev; | |
310 | head->tstamp = fq->q.stamp; | |
311 | ||
312 | fq->q.fragments = NULL; | |
313 | fq->q.fragments_tail = NULL; | |
314 | ||
315 | return 1; | |
316 | out_oom: | |
317 | net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n"); | |
318 | return -1; | |
319 | } | |
320 | ||
321 | static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type, | |
322 | struct ieee802154_frag_info *frag_info) | |
323 | { | |
324 | bool fail; | |
325 | u8 pattern = 0, low = 0; | |
326 | ||
327 | fail = lowpan_fetch_skb(skb, &pattern, 1); | |
328 | fail |= lowpan_fetch_skb(skb, &low, 1); | |
329 | frag_info->d_size = (pattern & 7) << 8 | low; | |
330 | fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2); | |
331 | ||
332 | if (frag_type == LOWPAN_DISPATCH_FRAGN) { | |
333 | fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1); | |
334 | } else { | |
335 | skb_reset_network_header(skb); | |
336 | frag_info->d_offset = 0; | |
337 | } | |
338 | ||
339 | if (unlikely(fail)) | |
340 | return -EIO; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type) | |
346 | { | |
347 | struct lowpan_frag_queue *fq; | |
348 | struct net *net = dev_net(skb->dev); | |
349 | struct ieee802154_frag_info *frag_info = &mac_cb(skb)->frag_info; | |
ae531b94 | 350 | struct ieee802154_addr source, dest; |
7240cdec AA |
351 | int err; |
352 | ||
ae531b94 PB |
353 | source = mac_cb(skb)->source; |
354 | dest = mac_cb(skb)->dest; | |
355 | ||
7240cdec AA |
356 | err = lowpan_get_frag_info(skb, frag_type, frag_info); |
357 | if (err < 0) | |
358 | goto err; | |
359 | ||
360 | if (frag_info->d_size > net->ieee802154_lowpan.max_dsize) | |
361 | goto err; | |
362 | ||
363 | inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false); | |
364 | ||
ae531b94 | 365 | fq = fq_find(net, frag_info, &source, &dest); |
7240cdec AA |
366 | if (fq != NULL) { |
367 | int ret; | |
368 | spin_lock(&fq->q.lock); | |
369 | ret = lowpan_frag_queue(fq, skb, frag_type); | |
370 | spin_unlock(&fq->q.lock); | |
371 | ||
372 | inet_frag_put(&fq->q, &lowpan_frags); | |
373 | return ret; | |
374 | } | |
375 | ||
376 | err: | |
377 | kfree_skb(skb); | |
378 | return -1; | |
379 | } | |
380 | EXPORT_SYMBOL(lowpan_frag_rcv); | |
381 | ||
382 | #ifdef CONFIG_SYSCTL | |
383 | static struct ctl_table lowpan_frags_ns_ctl_table[] = { | |
384 | { | |
385 | .procname = "6lowpanfrag_high_thresh", | |
386 | .data = &init_net.ieee802154_lowpan.frags.high_thresh, | |
387 | .maxlen = sizeof(int), | |
388 | .mode = 0644, | |
389 | .proc_handler = proc_dointvec | |
390 | }, | |
391 | { | |
392 | .procname = "6lowpanfrag_low_thresh", | |
393 | .data = &init_net.ieee802154_lowpan.frags.low_thresh, | |
394 | .maxlen = sizeof(int), | |
395 | .mode = 0644, | |
396 | .proc_handler = proc_dointvec | |
397 | }, | |
398 | { | |
399 | .procname = "6lowpanfrag_time", | |
400 | .data = &init_net.ieee802154_lowpan.frags.timeout, | |
401 | .maxlen = sizeof(int), | |
402 | .mode = 0644, | |
403 | .proc_handler = proc_dointvec_jiffies, | |
404 | }, | |
405 | { | |
406 | .procname = "6lowpanfrag_max_datagram_size", | |
407 | .data = &init_net.ieee802154_lowpan.max_dsize, | |
408 | .maxlen = sizeof(int), | |
409 | .mode = 0644, | |
410 | .proc_handler = proc_dointvec | |
411 | }, | |
412 | { } | |
413 | }; | |
414 | ||
415 | static struct ctl_table lowpan_frags_ctl_table[] = { | |
416 | { | |
417 | .procname = "6lowpanfrag_secret_interval", | |
418 | .data = &lowpan_frags.secret_interval, | |
419 | .maxlen = sizeof(int), | |
420 | .mode = 0644, | |
421 | .proc_handler = proc_dointvec_jiffies, | |
422 | }, | |
423 | { } | |
424 | }; | |
425 | ||
426 | static int __net_init lowpan_frags_ns_sysctl_register(struct net *net) | |
427 | { | |
428 | struct ctl_table *table; | |
429 | struct ctl_table_header *hdr; | |
430 | ||
431 | table = lowpan_frags_ns_ctl_table; | |
432 | if (!net_eq(net, &init_net)) { | |
433 | table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table), | |
434 | GFP_KERNEL); | |
435 | if (table == NULL) | |
436 | goto err_alloc; | |
437 | ||
438 | table[0].data = &net->ieee802154_lowpan.frags.high_thresh; | |
439 | table[1].data = &net->ieee802154_lowpan.frags.low_thresh; | |
440 | table[2].data = &net->ieee802154_lowpan.frags.timeout; | |
3772ab1d | 441 | table[3].data = &net->ieee802154_lowpan.max_dsize; |
7240cdec AA |
442 | |
443 | /* Don't export sysctls to unprivileged users */ | |
444 | if (net->user_ns != &init_user_ns) | |
445 | table[0].procname = NULL; | |
446 | } | |
447 | ||
448 | hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table); | |
449 | if (hdr == NULL) | |
450 | goto err_reg; | |
451 | ||
452 | net->ieee802154_lowpan.sysctl.frags_hdr = hdr; | |
453 | return 0; | |
454 | ||
455 | err_reg: | |
456 | if (!net_eq(net, &init_net)) | |
457 | kfree(table); | |
458 | err_alloc: | |
459 | return -ENOMEM; | |
460 | } | |
461 | ||
462 | static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net) | |
463 | { | |
464 | struct ctl_table *table; | |
465 | ||
466 | table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg; | |
467 | unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr); | |
468 | if (!net_eq(net, &init_net)) | |
469 | kfree(table); | |
470 | } | |
471 | ||
472 | static struct ctl_table_header *lowpan_ctl_header; | |
473 | ||
474 | static int lowpan_frags_sysctl_register(void) | |
475 | { | |
476 | lowpan_ctl_header = register_net_sysctl(&init_net, | |
477 | "net/ieee802154/6lowpan", | |
478 | lowpan_frags_ctl_table); | |
479 | return lowpan_ctl_header == NULL ? -ENOMEM : 0; | |
480 | } | |
481 | ||
482 | static void lowpan_frags_sysctl_unregister(void) | |
483 | { | |
484 | unregister_net_sysctl_table(lowpan_ctl_header); | |
485 | } | |
486 | #else | |
487 | static inline int lowpan_frags_ns_sysctl_register(struct net *net) | |
488 | { | |
489 | return 0; | |
490 | } | |
491 | ||
492 | static inline void lowpan_frags_ns_sysctl_unregister(struct net *net) | |
493 | { | |
494 | } | |
495 | ||
496 | static inline int lowpan_frags_sysctl_register(void) | |
497 | { | |
498 | return 0; | |
499 | } | |
500 | ||
501 | static inline void lowpan_frags_sysctl_unregister(void) | |
502 | { | |
503 | } | |
504 | #endif | |
505 | ||
506 | static int __net_init lowpan_frags_init_net(struct net *net) | |
507 | { | |
508 | net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; | |
509 | net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH; | |
510 | net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT; | |
511 | net->ieee802154_lowpan.max_dsize = 0xFFFF; | |
512 | ||
513 | inet_frags_init_net(&net->ieee802154_lowpan.frags); | |
514 | ||
515 | return lowpan_frags_ns_sysctl_register(net); | |
516 | } | |
517 | ||
518 | static void __net_exit lowpan_frags_exit_net(struct net *net) | |
519 | { | |
520 | lowpan_frags_ns_sysctl_unregister(net); | |
521 | inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags); | |
522 | } | |
523 | ||
524 | static struct pernet_operations lowpan_frags_ops = { | |
525 | .init = lowpan_frags_init_net, | |
526 | .exit = lowpan_frags_exit_net, | |
527 | }; | |
528 | ||
529 | int __init lowpan_net_frag_init(void) | |
530 | { | |
531 | int ret; | |
532 | ||
533 | ret = lowpan_frags_sysctl_register(); | |
534 | if (ret) | |
37147652 | 535 | return ret; |
7240cdec AA |
536 | |
537 | ret = register_pernet_subsys(&lowpan_frags_ops); | |
538 | if (ret) | |
539 | goto err_pernet; | |
540 | ||
541 | lowpan_frags.hashfn = lowpan_hashfn; | |
542 | lowpan_frags.constructor = lowpan_frag_init; | |
543 | lowpan_frags.destructor = NULL; | |
544 | lowpan_frags.skb_free = NULL; | |
545 | lowpan_frags.qsize = sizeof(struct frag_queue); | |
546 | lowpan_frags.match = lowpan_frag_match; | |
547 | lowpan_frags.frag_expire = lowpan_frag_expire; | |
548 | lowpan_frags.secret_interval = 10 * 60 * HZ; | |
549 | inet_frags_init(&lowpan_frags); | |
37147652 AA |
550 | |
551 | return ret; | |
7240cdec AA |
552 | err_pernet: |
553 | lowpan_frags_sysctl_unregister(); | |
7240cdec AA |
554 | return ret; |
555 | } | |
556 | ||
557 | void lowpan_net_frag_exit(void) | |
558 | { | |
559 | inet_frags_fini(&lowpan_frags); | |
560 | lowpan_frags_sysctl_unregister(); | |
561 | unregister_pernet_subsys(&lowpan_frags_ops); | |
562 | } |