net: Move bonding headers under include/net
[deliverable/linux.git] / net / ipv4 / fou.c
1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
5 #include <linux/ip.h>
6 #include <linux/udp.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
10 #include <net/gue.h>
11 #include <net/ip.h>
12 #include <net/protocol.h>
13 #include <net/udp.h>
14 #include <net/udp_tunnel.h>
15 #include <net/xfrm.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
18
19 static DEFINE_SPINLOCK(fou_lock);
20 static LIST_HEAD(fou_list);
21
22 struct fou {
23 struct socket *sock;
24 u8 protocol;
25 u16 port;
26 struct udp_offload udp_offloads;
27 struct list_head list;
28 };
29
30 struct fou_cfg {
31 u16 type;
32 u8 protocol;
33 struct udp_port_cfg udp_config;
34 };
35
36 static inline struct fou *fou_from_sock(struct sock *sk)
37 {
38 return sk->sk_user_data;
39 }
40
41 static void fou_recv_pull(struct sk_buff *skb, size_t len)
42 {
43 struct iphdr *iph = ip_hdr(skb);
44
45 /* Remove 'len' bytes from the packet (UDP header and
46 * FOU header if present).
47 */
48 iph->tot_len = htons(ntohs(iph->tot_len) - len);
49 __skb_pull(skb, len);
50 skb_postpull_rcsum(skb, udp_hdr(skb), len);
51 skb_reset_transport_header(skb);
52 }
53
54 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
55 {
56 struct fou *fou = fou_from_sock(sk);
57
58 if (!fou)
59 return 1;
60
61 fou_recv_pull(skb, sizeof(struct udphdr));
62
63 return -fou->protocol;
64 }
65
66 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
67 void *data, int hdrlen, u8 ipproto)
68 {
69 __be16 *pd = data;
70 u16 start = ntohs(pd[0]);
71 u16 offset = ntohs(pd[1]);
72 u16 poffset = 0;
73 u16 plen;
74 __wsum csum, delta;
75 __sum16 *psum;
76
77 if (skb->remcsum_offload) {
78 /* Already processed in GRO path */
79 skb->remcsum_offload = 0;
80 return guehdr;
81 }
82
83 if (start > skb->len - hdrlen ||
84 offset > skb->len - hdrlen - sizeof(u16))
85 return NULL;
86
87 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE))
88 __skb_checksum_complete(skb);
89
90 plen = hdrlen + offset + sizeof(u16);
91 if (!pskb_may_pull(skb, plen))
92 return NULL;
93 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
94
95 if (ipproto == IPPROTO_IP && sizeof(struct iphdr) < plen) {
96 struct iphdr *ip = (struct iphdr *)(skb->data + hdrlen);
97
98 /* If next header happens to be IP we can skip that for the
99 * checksum calculation since the IP header checksum is zero
100 * if correct.
101 */
102 poffset = ip->ihl * 4;
103 }
104
105 csum = csum_sub(skb->csum, skb_checksum(skb, poffset + hdrlen,
106 start - poffset - hdrlen, 0));
107
108 /* Set derived checksum in packet */
109 psum = (__sum16 *)(skb->data + hdrlen + offset);
110 delta = csum_sub(csum_fold(csum), *psum);
111 *psum = csum_fold(csum);
112
113 /* Adjust skb->csum since we changed the packet */
114 skb->csum = csum_add(skb->csum, delta);
115
116 return guehdr;
117 }
118
119 static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
120 {
121 /* No support yet */
122 kfree_skb(skb);
123 return 0;
124 }
125
126 static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
127 {
128 struct fou *fou = fou_from_sock(sk);
129 size_t len, optlen, hdrlen;
130 struct guehdr *guehdr;
131 void *data;
132 u16 doffset = 0;
133
134 if (!fou)
135 return 1;
136
137 len = sizeof(struct udphdr) + sizeof(struct guehdr);
138 if (!pskb_may_pull(skb, len))
139 goto drop;
140
141 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
142
143 optlen = guehdr->hlen << 2;
144 len += optlen;
145
146 if (!pskb_may_pull(skb, len))
147 goto drop;
148
149 /* guehdr may change after pull */
150 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
151
152 hdrlen = sizeof(struct guehdr) + optlen;
153
154 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
155 goto drop;
156
157 hdrlen = sizeof(struct guehdr) + optlen;
158
159 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
160
161 /* Pull UDP header now, skb->data points to guehdr */
162 __skb_pull(skb, sizeof(struct udphdr));
163
164 /* Pull csum through the guehdr now . This can be used if
165 * there is a remote checksum offload.
166 */
167 skb_postpull_rcsum(skb, udp_hdr(skb), len);
168
169 data = &guehdr[1];
170
171 if (guehdr->flags & GUE_FLAG_PRIV) {
172 __be32 flags = *(__be32 *)(data + doffset);
173
174 doffset += GUE_LEN_PRIV;
175
176 if (flags & GUE_PFLAG_REMCSUM) {
177 guehdr = gue_remcsum(skb, guehdr, data + doffset,
178 hdrlen, guehdr->proto_ctype);
179 if (!guehdr)
180 goto drop;
181
182 data = &guehdr[1];
183
184 doffset += GUE_PLEN_REMCSUM;
185 }
186 }
187
188 if (unlikely(guehdr->control))
189 return gue_control_message(skb, guehdr);
190
191 __skb_pull(skb, hdrlen);
192 skb_reset_transport_header(skb);
193
194 return -guehdr->proto_ctype;
195
196 drop:
197 kfree_skb(skb);
198 return 0;
199 }
200
201 static struct sk_buff **fou_gro_receive(struct sk_buff **head,
202 struct sk_buff *skb)
203 {
204 const struct net_offload *ops;
205 struct sk_buff **pp = NULL;
206 u8 proto = NAPI_GRO_CB(skb)->proto;
207 const struct net_offload **offloads;
208
209 rcu_read_lock();
210 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
211 ops = rcu_dereference(offloads[proto]);
212 if (!ops || !ops->callbacks.gro_receive)
213 goto out_unlock;
214
215 pp = ops->callbacks.gro_receive(head, skb);
216
217 out_unlock:
218 rcu_read_unlock();
219
220 return pp;
221 }
222
223 static int fou_gro_complete(struct sk_buff *skb, int nhoff)
224 {
225 const struct net_offload *ops;
226 u8 proto = NAPI_GRO_CB(skb)->proto;
227 int err = -ENOSYS;
228 const struct net_offload **offloads;
229
230 rcu_read_lock();
231 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
232 ops = rcu_dereference(offloads[proto]);
233 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
234 goto out_unlock;
235
236 err = ops->callbacks.gro_complete(skb, nhoff);
237
238 out_unlock:
239 rcu_read_unlock();
240
241 return err;
242 }
243
244 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
245 struct guehdr *guehdr, void *data,
246 size_t hdrlen, u8 ipproto)
247 {
248 __be16 *pd = data;
249 u16 start = ntohs(pd[0]);
250 u16 offset = ntohs(pd[1]);
251 u16 poffset = 0;
252 u16 plen;
253 void *ptr;
254 __wsum csum, delta;
255 __sum16 *psum;
256
257 if (skb->remcsum_offload)
258 return guehdr;
259
260 if (start > skb_gro_len(skb) - hdrlen ||
261 offset > skb_gro_len(skb) - hdrlen - sizeof(u16) ||
262 !NAPI_GRO_CB(skb)->csum_valid || skb->remcsum_offload)
263 return NULL;
264
265 plen = hdrlen + offset + sizeof(u16);
266
267 /* Pull checksum that will be written */
268 if (skb_gro_header_hard(skb, off + plen)) {
269 guehdr = skb_gro_header_slow(skb, off + plen, off);
270 if (!guehdr)
271 return NULL;
272 }
273
274 ptr = (void *)guehdr + hdrlen;
275
276 if (ipproto == IPPROTO_IP &&
277 (hdrlen + sizeof(struct iphdr) < plen)) {
278 struct iphdr *ip = (struct iphdr *)(ptr + hdrlen);
279
280 /* If next header happens to be IP we can skip
281 * that for the checksum calculation since the
282 * IP header checksum is zero if correct.
283 */
284 poffset = ip->ihl * 4;
285 }
286
287 csum = csum_sub(NAPI_GRO_CB(skb)->csum,
288 csum_partial(ptr + poffset, start - poffset, 0));
289
290 /* Set derived checksum in packet */
291 psum = (__sum16 *)(ptr + offset);
292 delta = csum_sub(csum_fold(csum), *psum);
293 *psum = csum_fold(csum);
294
295 /* Adjust skb->csum since we changed the packet */
296 skb->csum = csum_add(skb->csum, delta);
297 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
298
299 skb->remcsum_offload = 1;
300
301 return guehdr;
302 }
303
304 static struct sk_buff **gue_gro_receive(struct sk_buff **head,
305 struct sk_buff *skb)
306 {
307 const struct net_offload **offloads;
308 const struct net_offload *ops;
309 struct sk_buff **pp = NULL;
310 struct sk_buff *p;
311 struct guehdr *guehdr;
312 size_t len, optlen, hdrlen, off;
313 void *data;
314 u16 doffset = 0;
315 int flush = 1;
316
317 off = skb_gro_offset(skb);
318 len = off + sizeof(*guehdr);
319
320 guehdr = skb_gro_header_fast(skb, off);
321 if (skb_gro_header_hard(skb, len)) {
322 guehdr = skb_gro_header_slow(skb, len, off);
323 if (unlikely(!guehdr))
324 goto out;
325 }
326
327 optlen = guehdr->hlen << 2;
328 len += optlen;
329
330 if (skb_gro_header_hard(skb, len)) {
331 guehdr = skb_gro_header_slow(skb, len, off);
332 if (unlikely(!guehdr))
333 goto out;
334 }
335
336 if (unlikely(guehdr->control) || guehdr->version != 0 ||
337 validate_gue_flags(guehdr, optlen))
338 goto out;
339
340 hdrlen = sizeof(*guehdr) + optlen;
341
342 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
343 * this is needed if there is a remote checkcsum offload.
344 */
345 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
346
347 data = &guehdr[1];
348
349 if (guehdr->flags & GUE_FLAG_PRIV) {
350 __be32 flags = *(__be32 *)(data + doffset);
351
352 doffset += GUE_LEN_PRIV;
353
354 if (flags & GUE_PFLAG_REMCSUM) {
355 guehdr = gue_gro_remcsum(skb, off, guehdr,
356 data + doffset, hdrlen,
357 guehdr->proto_ctype);
358 if (!guehdr)
359 goto out;
360
361 data = &guehdr[1];
362
363 doffset += GUE_PLEN_REMCSUM;
364 }
365 }
366
367 skb_gro_pull(skb, hdrlen);
368
369 flush = 0;
370
371 for (p = *head; p; p = p->next) {
372 const struct guehdr *guehdr2;
373
374 if (!NAPI_GRO_CB(p)->same_flow)
375 continue;
376
377 guehdr2 = (struct guehdr *)(p->data + off);
378
379 /* Compare base GUE header to be equal (covers
380 * hlen, version, proto_ctype, and flags.
381 */
382 if (guehdr->word != guehdr2->word) {
383 NAPI_GRO_CB(p)->same_flow = 0;
384 continue;
385 }
386
387 /* Compare optional fields are the same. */
388 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
389 guehdr->hlen << 2)) {
390 NAPI_GRO_CB(p)->same_flow = 0;
391 continue;
392 }
393 }
394
395 rcu_read_lock();
396 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
397 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
398 if (WARN_ON(!ops || !ops->callbacks.gro_receive))
399 goto out_unlock;
400
401 pp = ops->callbacks.gro_receive(head, skb);
402
403 out_unlock:
404 rcu_read_unlock();
405 out:
406 NAPI_GRO_CB(skb)->flush |= flush;
407
408 return pp;
409 }
410
411 static int gue_gro_complete(struct sk_buff *skb, int nhoff)
412 {
413 const struct net_offload **offloads;
414 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
415 const struct net_offload *ops;
416 unsigned int guehlen;
417 u8 proto;
418 int err = -ENOENT;
419
420 proto = guehdr->proto_ctype;
421
422 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
423
424 rcu_read_lock();
425 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
426 ops = rcu_dereference(offloads[proto]);
427 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
428 goto out_unlock;
429
430 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
431
432 out_unlock:
433 rcu_read_unlock();
434 return err;
435 }
436
437 static int fou_add_to_port_list(struct fou *fou)
438 {
439 struct fou *fout;
440
441 spin_lock(&fou_lock);
442 list_for_each_entry(fout, &fou_list, list) {
443 if (fou->port == fout->port) {
444 spin_unlock(&fou_lock);
445 return -EALREADY;
446 }
447 }
448
449 list_add(&fou->list, &fou_list);
450 spin_unlock(&fou_lock);
451
452 return 0;
453 }
454
455 static void fou_release(struct fou *fou)
456 {
457 struct socket *sock = fou->sock;
458 struct sock *sk = sock->sk;
459
460 udp_del_offload(&fou->udp_offloads);
461
462 list_del(&fou->list);
463
464 /* Remove hooks into tunnel socket */
465 sk->sk_user_data = NULL;
466
467 sock_release(sock);
468
469 kfree(fou);
470 }
471
472 static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
473 {
474 udp_sk(sk)->encap_rcv = fou_udp_recv;
475 fou->protocol = cfg->protocol;
476 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
477 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
478 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
479 fou->udp_offloads.ipproto = cfg->protocol;
480
481 return 0;
482 }
483
484 static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
485 {
486 udp_sk(sk)->encap_rcv = gue_udp_recv;
487 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
488 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
489 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
490
491 return 0;
492 }
493
494 static int fou_create(struct net *net, struct fou_cfg *cfg,
495 struct socket **sockp)
496 {
497 struct fou *fou = NULL;
498 int err;
499 struct socket *sock = NULL;
500 struct sock *sk;
501
502 /* Open UDP socket */
503 err = udp_sock_create(net, &cfg->udp_config, &sock);
504 if (err < 0)
505 goto error;
506
507 /* Allocate FOU port structure */
508 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
509 if (!fou) {
510 err = -ENOMEM;
511 goto error;
512 }
513
514 sk = sock->sk;
515
516 fou->port = cfg->udp_config.local_udp_port;
517
518 /* Initial for fou type */
519 switch (cfg->type) {
520 case FOU_ENCAP_DIRECT:
521 err = fou_encap_init(sk, fou, cfg);
522 if (err)
523 goto error;
524 break;
525 case FOU_ENCAP_GUE:
526 err = gue_encap_init(sk, fou, cfg);
527 if (err)
528 goto error;
529 break;
530 default:
531 err = -EINVAL;
532 goto error;
533 }
534
535 udp_sk(sk)->encap_type = 1;
536 udp_encap_enable();
537
538 sk->sk_user_data = fou;
539 fou->sock = sock;
540
541 udp_set_convert_csum(sk, true);
542
543 sk->sk_allocation = GFP_ATOMIC;
544
545 if (cfg->udp_config.family == AF_INET) {
546 err = udp_add_offload(&fou->udp_offloads);
547 if (err)
548 goto error;
549 }
550
551 err = fou_add_to_port_list(fou);
552 if (err)
553 goto error;
554
555 if (sockp)
556 *sockp = sock;
557
558 return 0;
559
560 error:
561 kfree(fou);
562 if (sock)
563 sock_release(sock);
564
565 return err;
566 }
567
568 static int fou_destroy(struct net *net, struct fou_cfg *cfg)
569 {
570 struct fou *fou;
571 u16 port = cfg->udp_config.local_udp_port;
572 int err = -EINVAL;
573
574 spin_lock(&fou_lock);
575 list_for_each_entry(fou, &fou_list, list) {
576 if (fou->port == port) {
577 udp_del_offload(&fou->udp_offloads);
578 fou_release(fou);
579 err = 0;
580 break;
581 }
582 }
583 spin_unlock(&fou_lock);
584
585 return err;
586 }
587
588 static struct genl_family fou_nl_family = {
589 .id = GENL_ID_GENERATE,
590 .hdrsize = 0,
591 .name = FOU_GENL_NAME,
592 .version = FOU_GENL_VERSION,
593 .maxattr = FOU_ATTR_MAX,
594 .netnsok = true,
595 };
596
597 static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
598 [FOU_ATTR_PORT] = { .type = NLA_U16, },
599 [FOU_ATTR_AF] = { .type = NLA_U8, },
600 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
601 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
602 };
603
604 static int parse_nl_config(struct genl_info *info,
605 struct fou_cfg *cfg)
606 {
607 memset(cfg, 0, sizeof(*cfg));
608
609 cfg->udp_config.family = AF_INET;
610
611 if (info->attrs[FOU_ATTR_AF]) {
612 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
613
614 if (family != AF_INET && family != AF_INET6)
615 return -EINVAL;
616
617 cfg->udp_config.family = family;
618 }
619
620 if (info->attrs[FOU_ATTR_PORT]) {
621 u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
622
623 cfg->udp_config.local_udp_port = port;
624 }
625
626 if (info->attrs[FOU_ATTR_IPPROTO])
627 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
628
629 if (info->attrs[FOU_ATTR_TYPE])
630 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
631
632 return 0;
633 }
634
635 static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
636 {
637 struct fou_cfg cfg;
638 int err;
639
640 err = parse_nl_config(info, &cfg);
641 if (err)
642 return err;
643
644 return fou_create(&init_net, &cfg, NULL);
645 }
646
647 static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
648 {
649 struct fou_cfg cfg;
650
651 parse_nl_config(info, &cfg);
652
653 return fou_destroy(&init_net, &cfg);
654 }
655
656 static const struct genl_ops fou_nl_ops[] = {
657 {
658 .cmd = FOU_CMD_ADD,
659 .doit = fou_nl_cmd_add_port,
660 .policy = fou_nl_policy,
661 .flags = GENL_ADMIN_PERM,
662 },
663 {
664 .cmd = FOU_CMD_DEL,
665 .doit = fou_nl_cmd_rm_port,
666 .policy = fou_nl_policy,
667 .flags = GENL_ADMIN_PERM,
668 },
669 };
670
671 static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
672 struct flowi4 *fl4, u8 *protocol, __be16 sport)
673 {
674 struct udphdr *uh;
675
676 skb_push(skb, sizeof(struct udphdr));
677 skb_reset_transport_header(skb);
678
679 uh = udp_hdr(skb);
680
681 uh->dest = e->dport;
682 uh->source = sport;
683 uh->len = htons(skb->len);
684 uh->check = 0;
685 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
686 fl4->saddr, fl4->daddr, skb->len);
687
688 *protocol = IPPROTO_UDP;
689 }
690
691 int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
692 u8 *protocol, struct flowi4 *fl4)
693 {
694 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
695 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
696 __be16 sport;
697
698 skb = iptunnel_handle_offloads(skb, csum, type);
699
700 if (IS_ERR(skb))
701 return PTR_ERR(skb);
702
703 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
704 skb, 0, 0, false);
705 fou_build_udp(skb, e, fl4, protocol, sport);
706
707 return 0;
708 }
709 EXPORT_SYMBOL(fou_build_header);
710
711 int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
712 u8 *protocol, struct flowi4 *fl4)
713 {
714 bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
715 int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
716 struct guehdr *guehdr;
717 size_t hdrlen, optlen = 0;
718 __be16 sport;
719 void *data;
720 bool need_priv = false;
721
722 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
723 skb->ip_summed == CHECKSUM_PARTIAL) {
724 csum = false;
725 optlen += GUE_PLEN_REMCSUM;
726 type |= SKB_GSO_TUNNEL_REMCSUM;
727 need_priv = true;
728 }
729
730 optlen += need_priv ? GUE_LEN_PRIV : 0;
731
732 skb = iptunnel_handle_offloads(skb, csum, type);
733
734 if (IS_ERR(skb))
735 return PTR_ERR(skb);
736
737 /* Get source port (based on flow hash) before skb_push */
738 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
739 skb, 0, 0, false);
740
741 hdrlen = sizeof(struct guehdr) + optlen;
742
743 skb_push(skb, hdrlen);
744
745 guehdr = (struct guehdr *)skb->data;
746
747 guehdr->control = 0;
748 guehdr->version = 0;
749 guehdr->hlen = optlen >> 2;
750 guehdr->flags = 0;
751 guehdr->proto_ctype = *protocol;
752
753 data = &guehdr[1];
754
755 if (need_priv) {
756 __be32 *flags = data;
757
758 guehdr->flags |= GUE_FLAG_PRIV;
759 *flags = 0;
760 data += GUE_LEN_PRIV;
761
762 if (type & SKB_GSO_TUNNEL_REMCSUM) {
763 u16 csum_start = skb_checksum_start_offset(skb);
764 __be16 *pd = data;
765
766 if (csum_start < hdrlen)
767 return -EINVAL;
768
769 csum_start -= hdrlen;
770 pd[0] = htons(csum_start);
771 pd[1] = htons(csum_start + skb->csum_offset);
772
773 if (!skb_is_gso(skb)) {
774 skb->ip_summed = CHECKSUM_NONE;
775 skb->encapsulation = 0;
776 }
777
778 *flags |= GUE_PFLAG_REMCSUM;
779 data += GUE_PLEN_REMCSUM;
780 }
781
782 }
783
784 fou_build_udp(skb, e, fl4, protocol, sport);
785
786 return 0;
787 }
788 EXPORT_SYMBOL(gue_build_header);
789
790 static int __init fou_init(void)
791 {
792 int ret;
793
794 ret = genl_register_family_with_ops(&fou_nl_family,
795 fou_nl_ops);
796
797 return ret;
798 }
799
800 static void __exit fou_fini(void)
801 {
802 struct fou *fou, *next;
803
804 genl_unregister_family(&fou_nl_family);
805
806 /* Close all the FOU sockets */
807
808 spin_lock(&fou_lock);
809 list_for_each_entry_safe(fou, next, &fou_list, list)
810 fou_release(fou);
811 spin_unlock(&fou_lock);
812 }
813
814 module_init(fou_init);
815 module_exit(fou_fini);
816 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
817 MODULE_LICENSE("GPL");
This page took 0.050254 seconds and 5 git commands to generate.