6lowpan: reassembly: un-export local functions
[deliverable/linux.git] / net / ieee802154 / reassembly.c
CommitLineData
7240cdec
AA
1/* 6LoWPAN fragment reassembly
2 *
3 *
4 * Authors:
5 * Alexander Aring <aar@pengutronix.de>
6 *
7 * Based on: net/ipv6/reassembly.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define pr_fmt(fmt) "6LoWPAN: " fmt
16
17#include <linux/net.h>
18#include <linux/list.h>
19#include <linux/netdevice.h>
20#include <linux/random.h>
21#include <linux/jhash.h>
22#include <linux/skbuff.h>
23#include <linux/slab.h>
24#include <linux/export.h>
25
26#include <net/ieee802154_netdev.h>
cefc8c8a 27#include <net/6lowpan.h>
7240cdec
AA
28#include <net/ipv6.h>
29#include <net/inet_frag.h>
30
7240cdec
AA
31#include "reassembly.h"
32
33static struct inet_frags lowpan_frags;
34
35static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
36 struct sk_buff *prev, struct net_device *dev);
37
4c7f778e 38static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
7240cdec
AA
39 const struct ieee802154_addr *saddr,
40 const struct ieee802154_addr *daddr)
41{
42 u32 c;
43
44 net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
45 c = jhash_3words(ieee802154_addr_hash(saddr),
46 ieee802154_addr_hash(daddr),
47 (__force u32)(tag + (d_size << 16)),
48 lowpan_frags.rnd);
49
50 return c & (INETFRAGS_HASHSZ - 1);
51}
52
53static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
54{
55 struct lowpan_frag_queue *fq;
56
57 fq = container_of(q, struct lowpan_frag_queue, q);
58 return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
59}
60
17794326 61static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
7240cdec
AA
62{
63 struct lowpan_frag_queue *fq;
64 struct lowpan_create_arg *arg = a;
65
66 fq = container_of(q, struct lowpan_frag_queue, q);
67 return fq->tag == arg->tag && fq->d_size == arg->d_size &&
68 ieee802154_addr_addr_equal(&fq->saddr, arg->src) &&
69 ieee802154_addr_addr_equal(&fq->daddr, arg->dst);
70}
7240cdec 71
17794326 72static void lowpan_frag_init(struct inet_frag_queue *q, void *a)
7240cdec
AA
73{
74 struct lowpan_frag_queue *fq;
75 struct lowpan_create_arg *arg = a;
76
77 fq = container_of(q, struct lowpan_frag_queue, q);
78
79 fq->tag = arg->tag;
80 fq->d_size = arg->d_size;
81 fq->saddr = *arg->src;
82 fq->daddr = *arg->dst;
83}
7240cdec
AA
84
85static void lowpan_frag_expire(unsigned long data)
86{
87 struct frag_queue *fq;
88 struct net *net;
89
90 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
91 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
92
17794326
FW
93 spin_lock(&fq->q.lock);
94
95 if (fq->q.last_in & INET_FRAG_COMPLETE)
96 goto out;
97
98 inet_frag_kill(&fq->q, &lowpan_frags);
99out:
100 spin_unlock(&fq->q.lock);
101 inet_frag_put(&fq->q, &lowpan_frags);
7240cdec
AA
102}
103
104static inline struct lowpan_frag_queue *
105fq_find(struct net *net, const struct ieee802154_frag_info *frag_info,
106 const struct ieee802154_addr *src, const struct ieee802154_addr *dst)
107{
108 struct inet_frag_queue *q;
109 struct lowpan_create_arg arg;
110 unsigned int hash;
111
112 arg.tag = frag_info->d_tag;
113 arg.d_size = frag_info->d_size;
114 arg.src = src;
115 arg.dst = dst;
116
117 read_lock(&lowpan_frags.lock);
118 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
119
120 q = inet_frag_find(&net->ieee802154_lowpan.frags,
121 &lowpan_frags, &arg, hash);
122 if (IS_ERR_OR_NULL(q)) {
123 inet_frag_maybe_warn_overflow(q, pr_fmt());
124 return NULL;
125 }
126 return container_of(q, struct lowpan_frag_queue, q);
127}
128
129static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
130 struct sk_buff *skb, const u8 frag_type)
131{
132 struct sk_buff *prev, *next;
133 struct net_device *dev;
134 int end, offset;
135
136 if (fq->q.last_in & INET_FRAG_COMPLETE)
137 goto err;
138
139 offset = mac_cb(skb)->frag_info.d_offset << 3;
140 end = mac_cb(skb)->frag_info.d_size;
141
142 /* Is this the final fragment? */
143 if (offset + skb->len == end) {
144 /* If we already have some bits beyond end
145 * or have different end, the segment is corrupted.
146 */
147 if (end < fq->q.len ||
148 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
149 goto err;
150 fq->q.last_in |= INET_FRAG_LAST_IN;
151 fq->q.len = end;
152 } else {
153 if (end > fq->q.len) {
154 /* Some bits beyond end -> corruption. */
155 if (fq->q.last_in & INET_FRAG_LAST_IN)
156 goto err;
157 fq->q.len = end;
158 }
159 }
160
161 /* Find out which fragments are in front and at the back of us
162 * in the chain of fragments so far. We must know where to put
163 * this fragment, right?
164 */
165 prev = fq->q.fragments_tail;
166 if (!prev || mac_cb(prev)->frag_info.d_offset <
167 mac_cb(skb)->frag_info.d_offset) {
168 next = NULL;
169 goto found;
170 }
171 prev = NULL;
172 for (next = fq->q.fragments; next != NULL; next = next->next) {
173 if (mac_cb(next)->frag_info.d_offset >=
174 mac_cb(skb)->frag_info.d_offset)
175 break; /* bingo! */
176 prev = next;
177 }
178
179found:
180 /* Insert this fragment in the chain of fragments. */
181 skb->next = next;
182 if (!next)
183 fq->q.fragments_tail = skb;
184 if (prev)
185 prev->next = skb;
186 else
187 fq->q.fragments = skb;
188
189 dev = skb->dev;
190 if (dev)
191 skb->dev = NULL;
192
193 fq->q.stamp = skb->tstamp;
194 if (frag_type == LOWPAN_DISPATCH_FRAG1) {
195 /* Calculate uncomp. 6lowpan header to estimate full size */
196 fq->q.meat += lowpan_uncompress_size(skb, NULL);
197 fq->q.last_in |= INET_FRAG_FIRST_IN;
198 } else {
199 fq->q.meat += skb->len;
200 }
201 add_frag_mem_limit(&fq->q, skb->truesize);
202
203 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
204 fq->q.meat == fq->q.len) {
205 int res;
206 unsigned long orefdst = skb->_skb_refdst;
207
208 skb->_skb_refdst = 0UL;
209 res = lowpan_frag_reasm(fq, prev, dev);
210 skb->_skb_refdst = orefdst;
211 return res;
212 }
213
214 inet_frag_lru_move(&fq->q);
215 return -1;
216err:
217 kfree_skb(skb);
218 return -1;
219}
220
221/* Check if this packet is complete.
222 * Returns NULL on failure by any reason, and pointer
223 * to current nexthdr field in reassembled frame.
224 *
225 * It is called with locked fq, and caller must check that
226 * queue is eligible for reassembly i.e. it is not COMPLETE,
227 * the last and the first frames arrived and all the bits are here.
228 */
229static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
230 struct net_device *dev)
231{
232 struct sk_buff *fp, *head = fq->q.fragments;
233 int sum_truesize;
234
235 inet_frag_kill(&fq->q, &lowpan_frags);
236
237 /* Make the one we just received the head. */
238 if (prev) {
239 head = prev->next;
240 fp = skb_clone(head, GFP_ATOMIC);
241
242 if (!fp)
243 goto out_oom;
244
245 fp->next = head->next;
246 if (!fp->next)
247 fq->q.fragments_tail = fp;
248 prev->next = fp;
249
250 skb_morph(head, fq->q.fragments);
251 head->next = fq->q.fragments->next;
252
253 consume_skb(fq->q.fragments);
254 fq->q.fragments = head;
255 }
256
257 /* Head of list must not be cloned. */
258 if (skb_unclone(head, GFP_ATOMIC))
259 goto out_oom;
260
261 /* If the first fragment is fragmented itself, we split
262 * it to two chunks: the first with data and paged part
263 * and the second, holding only fragments.
264 */
265 if (skb_has_frag_list(head)) {
266 struct sk_buff *clone;
267 int i, plen = 0;
268
269 clone = alloc_skb(0, GFP_ATOMIC);
270 if (!clone)
271 goto out_oom;
272 clone->next = head->next;
273 head->next = clone;
274 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
275 skb_frag_list_init(head);
276 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
277 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
278 clone->len = head->data_len - plen;
279 clone->data_len = clone->len;
280 head->data_len -= clone->len;
281 head->len -= clone->len;
282 add_frag_mem_limit(&fq->q, clone->truesize);
283 }
284
285 WARN_ON(head == NULL);
286
287 sum_truesize = head->truesize;
288 for (fp = head->next; fp;) {
289 bool headstolen;
290 int delta;
291 struct sk_buff *next = fp->next;
292
293 sum_truesize += fp->truesize;
294 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
295 kfree_skb_partial(fp, headstolen);
296 } else {
297 if (!skb_shinfo(head)->frag_list)
298 skb_shinfo(head)->frag_list = fp;
299 head->data_len += fp->len;
300 head->len += fp->len;
301 head->truesize += fp->truesize;
302 }
303 fp = next;
304 }
305 sub_frag_mem_limit(&fq->q, sum_truesize);
306
307 head->next = NULL;
308 head->dev = dev;
309 head->tstamp = fq->q.stamp;
310
311 fq->q.fragments = NULL;
312 fq->q.fragments_tail = NULL;
313
314 return 1;
315out_oom:
316 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
317 return -1;
318}
319
320static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
321 struct ieee802154_frag_info *frag_info)
322{
323 bool fail;
324 u8 pattern = 0, low = 0;
325
326 fail = lowpan_fetch_skb(skb, &pattern, 1);
327 fail |= lowpan_fetch_skb(skb, &low, 1);
328 frag_info->d_size = (pattern & 7) << 8 | low;
329 fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
330
331 if (frag_type == LOWPAN_DISPATCH_FRAGN) {
332 fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
333 } else {
334 skb_reset_network_header(skb);
335 frag_info->d_offset = 0;
336 }
337
338 if (unlikely(fail))
339 return -EIO;
340
341 return 0;
342}
343
344int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
345{
346 struct lowpan_frag_queue *fq;
347 struct net *net = dev_net(skb->dev);
348 struct ieee802154_frag_info *frag_info = &mac_cb(skb)->frag_info;
349 int err;
350
351 err = lowpan_get_frag_info(skb, frag_type, frag_info);
352 if (err < 0)
353 goto err;
354
355 if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
356 goto err;
357
358 inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
359
360 fq = fq_find(net, frag_info, &mac_cb(skb)->sa, &mac_cb(skb)->da);
361 if (fq != NULL) {
362 int ret;
363 spin_lock(&fq->q.lock);
364 ret = lowpan_frag_queue(fq, skb, frag_type);
365 spin_unlock(&fq->q.lock);
366
367 inet_frag_put(&fq->q, &lowpan_frags);
368 return ret;
369 }
370
371err:
372 kfree_skb(skb);
373 return -1;
374}
375EXPORT_SYMBOL(lowpan_frag_rcv);
376
377#ifdef CONFIG_SYSCTL
378static struct ctl_table lowpan_frags_ns_ctl_table[] = {
379 {
380 .procname = "6lowpanfrag_high_thresh",
381 .data = &init_net.ieee802154_lowpan.frags.high_thresh,
382 .maxlen = sizeof(int),
383 .mode = 0644,
384 .proc_handler = proc_dointvec
385 },
386 {
387 .procname = "6lowpanfrag_low_thresh",
388 .data = &init_net.ieee802154_lowpan.frags.low_thresh,
389 .maxlen = sizeof(int),
390 .mode = 0644,
391 .proc_handler = proc_dointvec
392 },
393 {
394 .procname = "6lowpanfrag_time",
395 .data = &init_net.ieee802154_lowpan.frags.timeout,
396 .maxlen = sizeof(int),
397 .mode = 0644,
398 .proc_handler = proc_dointvec_jiffies,
399 },
400 {
401 .procname = "6lowpanfrag_max_datagram_size",
402 .data = &init_net.ieee802154_lowpan.max_dsize,
403 .maxlen = sizeof(int),
404 .mode = 0644,
405 .proc_handler = proc_dointvec
406 },
407 { }
408};
409
410static struct ctl_table lowpan_frags_ctl_table[] = {
411 {
412 .procname = "6lowpanfrag_secret_interval",
413 .data = &lowpan_frags.secret_interval,
414 .maxlen = sizeof(int),
415 .mode = 0644,
416 .proc_handler = proc_dointvec_jiffies,
417 },
418 { }
419};
420
421static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
422{
423 struct ctl_table *table;
424 struct ctl_table_header *hdr;
425
426 table = lowpan_frags_ns_ctl_table;
427 if (!net_eq(net, &init_net)) {
428 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
429 GFP_KERNEL);
430 if (table == NULL)
431 goto err_alloc;
432
433 table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
434 table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
435 table[2].data = &net->ieee802154_lowpan.frags.timeout;
3772ab1d 436 table[3].data = &net->ieee802154_lowpan.max_dsize;
7240cdec
AA
437
438 /* Don't export sysctls to unprivileged users */
439 if (net->user_ns != &init_user_ns)
440 table[0].procname = NULL;
441 }
442
443 hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
444 if (hdr == NULL)
445 goto err_reg;
446
447 net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
448 return 0;
449
450err_reg:
451 if (!net_eq(net, &init_net))
452 kfree(table);
453err_alloc:
454 return -ENOMEM;
455}
456
457static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
458{
459 struct ctl_table *table;
460
461 table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
462 unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
463 if (!net_eq(net, &init_net))
464 kfree(table);
465}
466
467static struct ctl_table_header *lowpan_ctl_header;
468
469static int lowpan_frags_sysctl_register(void)
470{
471 lowpan_ctl_header = register_net_sysctl(&init_net,
472 "net/ieee802154/6lowpan",
473 lowpan_frags_ctl_table);
474 return lowpan_ctl_header == NULL ? -ENOMEM : 0;
475}
476
477static void lowpan_frags_sysctl_unregister(void)
478{
479 unregister_net_sysctl_table(lowpan_ctl_header);
480}
481#else
482static inline int lowpan_frags_ns_sysctl_register(struct net *net)
483{
484 return 0;
485}
486
487static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
488{
489}
490
491static inline int lowpan_frags_sysctl_register(void)
492{
493 return 0;
494}
495
496static inline void lowpan_frags_sysctl_unregister(void)
497{
498}
499#endif
500
501static int __net_init lowpan_frags_init_net(struct net *net)
502{
503 net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
504 net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
505 net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
506 net->ieee802154_lowpan.max_dsize = 0xFFFF;
507
508 inet_frags_init_net(&net->ieee802154_lowpan.frags);
509
510 return lowpan_frags_ns_sysctl_register(net);
511}
512
513static void __net_exit lowpan_frags_exit_net(struct net *net)
514{
515 lowpan_frags_ns_sysctl_unregister(net);
516 inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
517}
518
519static struct pernet_operations lowpan_frags_ops = {
520 .init = lowpan_frags_init_net,
521 .exit = lowpan_frags_exit_net,
522};
523
524int __init lowpan_net_frag_init(void)
525{
526 int ret;
527
528 ret = lowpan_frags_sysctl_register();
529 if (ret)
37147652 530 return ret;
7240cdec
AA
531
532 ret = register_pernet_subsys(&lowpan_frags_ops);
533 if (ret)
534 goto err_pernet;
535
536 lowpan_frags.hashfn = lowpan_hashfn;
537 lowpan_frags.constructor = lowpan_frag_init;
538 lowpan_frags.destructor = NULL;
539 lowpan_frags.skb_free = NULL;
540 lowpan_frags.qsize = sizeof(struct frag_queue);
541 lowpan_frags.match = lowpan_frag_match;
542 lowpan_frags.frag_expire = lowpan_frag_expire;
543 lowpan_frags.secret_interval = 10 * 60 * HZ;
544 inet_frags_init(&lowpan_frags);
37147652
AA
545
546 return ret;
7240cdec
AA
547err_pernet:
548 lowpan_frags_sysctl_unregister();
7240cdec
AA
549 return ret;
550}
551
552void lowpan_net_frag_exit(void)
553{
554 inet_frags_fini(&lowpan_frags);
555 lowpan_frags_sysctl_unregister();
556 unregister_pernet_subsys(&lowpan_frags_ops);
557}
This page took 0.049316 seconds and 5 git commands to generate.