Merge tag 'platform-drivers-x86-v4.6-1' of git://git.infradead.org/users/dvhart/linux...
[deliverable/linux.git] / net / ipv4 / fou.c
... / ...
CommitLineData
1#include <linux/module.h>
2#include <linux/errno.h>
3#include <linux/socket.h>
4#include <linux/skbuff.h>
5#include <linux/ip.h>
6#include <linux/udp.h>
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <net/genetlink.h>
10#include <net/gue.h>
11#include <net/ip.h>
12#include <net/protocol.h>
13#include <net/udp.h>
14#include <net/udp_tunnel.h>
15#include <net/xfrm.h>
16#include <uapi/linux/fou.h>
17#include <uapi/linux/genetlink.h>
18
19struct fou {
20 struct socket *sock;
21 u8 protocol;
22 u8 flags;
23 __be16 port;
24 u16 type;
25 struct udp_offload udp_offloads;
26 struct list_head list;
27 struct rcu_head rcu;
28};
29
30#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
31
32struct fou_cfg {
33 u16 type;
34 u8 protocol;
35 u8 flags;
36 struct udp_port_cfg udp_config;
37};
38
39static unsigned int fou_net_id;
40
41struct fou_net {
42 struct list_head fou_list;
43 struct mutex fou_lock;
44};
45
46static inline struct fou *fou_from_sock(struct sock *sk)
47{
48 return sk->sk_user_data;
49}
50
51static void fou_recv_pull(struct sk_buff *skb, size_t len)
52{
53 struct iphdr *iph = ip_hdr(skb);
54
55 /* Remove 'len' bytes from the packet (UDP header and
56 * FOU header if present).
57 */
58 iph->tot_len = htons(ntohs(iph->tot_len) - len);
59 __skb_pull(skb, len);
60 skb_postpull_rcsum(skb, udp_hdr(skb), len);
61 skb_reset_transport_header(skb);
62}
63
64static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
65{
66 struct fou *fou = fou_from_sock(sk);
67
68 if (!fou)
69 return 1;
70
71 fou_recv_pull(skb, sizeof(struct udphdr));
72
73 return -fou->protocol;
74}
75
76static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
77 void *data, size_t hdrlen, u8 ipproto,
78 bool nopartial)
79{
80 __be16 *pd = data;
81 size_t start = ntohs(pd[0]);
82 size_t offset = ntohs(pd[1]);
83 size_t plen = sizeof(struct udphdr) + hdrlen +
84 max_t(size_t, offset + sizeof(u16), start);
85
86 if (skb->remcsum_offload)
87 return guehdr;
88
89 if (!pskb_may_pull(skb, plen))
90 return NULL;
91 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
92
93 skb_remcsum_process(skb, (void *)guehdr + hdrlen,
94 start, offset, nopartial);
95
96 return guehdr;
97}
98
99static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
100{
101 /* No support yet */
102 kfree_skb(skb);
103 return 0;
104}
105
106static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
107{
108 struct fou *fou = fou_from_sock(sk);
109 size_t len, optlen, hdrlen;
110 struct guehdr *guehdr;
111 void *data;
112 u16 doffset = 0;
113
114 if (!fou)
115 return 1;
116
117 len = sizeof(struct udphdr) + sizeof(struct guehdr);
118 if (!pskb_may_pull(skb, len))
119 goto drop;
120
121 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
122
123 optlen = guehdr->hlen << 2;
124 len += optlen;
125
126 if (!pskb_may_pull(skb, len))
127 goto drop;
128
129 /* guehdr may change after pull */
130 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
131
132 hdrlen = sizeof(struct guehdr) + optlen;
133
134 if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen))
135 goto drop;
136
137 hdrlen = sizeof(struct guehdr) + optlen;
138
139 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
140
141 /* Pull csum through the guehdr now . This can be used if
142 * there is a remote checksum offload.
143 */
144 skb_postpull_rcsum(skb, udp_hdr(skb), len);
145
146 data = &guehdr[1];
147
148 if (guehdr->flags & GUE_FLAG_PRIV) {
149 __be32 flags = *(__be32 *)(data + doffset);
150
151 doffset += GUE_LEN_PRIV;
152
153 if (flags & GUE_PFLAG_REMCSUM) {
154 guehdr = gue_remcsum(skb, guehdr, data + doffset,
155 hdrlen, guehdr->proto_ctype,
156 !!(fou->flags &
157 FOU_F_REMCSUM_NOPARTIAL));
158 if (!guehdr)
159 goto drop;
160
161 data = &guehdr[1];
162
163 doffset += GUE_PLEN_REMCSUM;
164 }
165 }
166
167 if (unlikely(guehdr->control))
168 return gue_control_message(skb, guehdr);
169
170 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
171 skb_reset_transport_header(skb);
172
173 return -guehdr->proto_ctype;
174
175drop:
176 kfree_skb(skb);
177 return 0;
178}
179
180static struct sk_buff **fou_gro_receive(struct sk_buff **head,
181 struct sk_buff *skb,
182 struct udp_offload *uoff)
183{
184 const struct net_offload *ops;
185 struct sk_buff **pp = NULL;
186 u8 proto = NAPI_GRO_CB(skb)->proto;
187 const struct net_offload **offloads;
188
189 rcu_read_lock();
190 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
191 ops = rcu_dereference(offloads[proto]);
192 if (!ops || !ops->callbacks.gro_receive)
193 goto out_unlock;
194
195 pp = ops->callbacks.gro_receive(head, skb);
196
197out_unlock:
198 rcu_read_unlock();
199
200 return pp;
201}
202
203static int fou_gro_complete(struct sk_buff *skb, int nhoff,
204 struct udp_offload *uoff)
205{
206 const struct net_offload *ops;
207 u8 proto = NAPI_GRO_CB(skb)->proto;
208 int err = -ENOSYS;
209 const struct net_offload **offloads;
210
211 udp_tunnel_gro_complete(skb, nhoff);
212
213 rcu_read_lock();
214 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
215 ops = rcu_dereference(offloads[proto]);
216 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
217 goto out_unlock;
218
219 err = ops->callbacks.gro_complete(skb, nhoff);
220
221out_unlock:
222 rcu_read_unlock();
223
224 return err;
225}
226
227static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
228 struct guehdr *guehdr, void *data,
229 size_t hdrlen, struct gro_remcsum *grc,
230 bool nopartial)
231{
232 __be16 *pd = data;
233 size_t start = ntohs(pd[0]);
234 size_t offset = ntohs(pd[1]);
235
236 if (skb->remcsum_offload)
237 return guehdr;
238
239 if (!NAPI_GRO_CB(skb)->csum_valid)
240 return NULL;
241
242 guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
243 start, offset, grc, nopartial);
244
245 skb->remcsum_offload = 1;
246
247 return guehdr;
248}
249
250static struct sk_buff **gue_gro_receive(struct sk_buff **head,
251 struct sk_buff *skb,
252 struct udp_offload *uoff)
253{
254 const struct net_offload **offloads;
255 const struct net_offload *ops;
256 struct sk_buff **pp = NULL;
257 struct sk_buff *p;
258 struct guehdr *guehdr;
259 size_t len, optlen, hdrlen, off;
260 void *data;
261 u16 doffset = 0;
262 int flush = 1;
263 struct fou *fou = container_of(uoff, struct fou, udp_offloads);
264 struct gro_remcsum grc;
265
266 skb_gro_remcsum_init(&grc);
267
268 off = skb_gro_offset(skb);
269 len = off + sizeof(*guehdr);
270
271 guehdr = skb_gro_header_fast(skb, off);
272 if (skb_gro_header_hard(skb, len)) {
273 guehdr = skb_gro_header_slow(skb, len, off);
274 if (unlikely(!guehdr))
275 goto out;
276 }
277
278 optlen = guehdr->hlen << 2;
279 len += optlen;
280
281 if (skb_gro_header_hard(skb, len)) {
282 guehdr = skb_gro_header_slow(skb, len, off);
283 if (unlikely(!guehdr))
284 goto out;
285 }
286
287 if (unlikely(guehdr->control) || guehdr->version != 0 ||
288 validate_gue_flags(guehdr, optlen))
289 goto out;
290
291 hdrlen = sizeof(*guehdr) + optlen;
292
293 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
294 * this is needed if there is a remote checkcsum offload.
295 */
296 skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
297
298 data = &guehdr[1];
299
300 if (guehdr->flags & GUE_FLAG_PRIV) {
301 __be32 flags = *(__be32 *)(data + doffset);
302
303 doffset += GUE_LEN_PRIV;
304
305 if (flags & GUE_PFLAG_REMCSUM) {
306 guehdr = gue_gro_remcsum(skb, off, guehdr,
307 data + doffset, hdrlen, &grc,
308 !!(fou->flags &
309 FOU_F_REMCSUM_NOPARTIAL));
310
311 if (!guehdr)
312 goto out;
313
314 data = &guehdr[1];
315
316 doffset += GUE_PLEN_REMCSUM;
317 }
318 }
319
320 skb_gro_pull(skb, hdrlen);
321
322 for (p = *head; p; p = p->next) {
323 const struct guehdr *guehdr2;
324
325 if (!NAPI_GRO_CB(p)->same_flow)
326 continue;
327
328 guehdr2 = (struct guehdr *)(p->data + off);
329
330 /* Compare base GUE header to be equal (covers
331 * hlen, version, proto_ctype, and flags.
332 */
333 if (guehdr->word != guehdr2->word) {
334 NAPI_GRO_CB(p)->same_flow = 0;
335 continue;
336 }
337
338 /* Compare optional fields are the same. */
339 if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
340 guehdr->hlen << 2)) {
341 NAPI_GRO_CB(p)->same_flow = 0;
342 continue;
343 }
344 }
345
346 rcu_read_lock();
347 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
348 ops = rcu_dereference(offloads[guehdr->proto_ctype]);
349 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
350 goto out_unlock;
351
352 pp = ops->callbacks.gro_receive(head, skb);
353 flush = 0;
354
355out_unlock:
356 rcu_read_unlock();
357out:
358 NAPI_GRO_CB(skb)->flush |= flush;
359 skb_gro_remcsum_cleanup(skb, &grc);
360
361 return pp;
362}
363
364static int gue_gro_complete(struct sk_buff *skb, int nhoff,
365 struct udp_offload *uoff)
366{
367 const struct net_offload **offloads;
368 struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
369 const struct net_offload *ops;
370 unsigned int guehlen;
371 u8 proto;
372 int err = -ENOENT;
373
374 proto = guehdr->proto_ctype;
375
376 guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
377
378 rcu_read_lock();
379 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
380 ops = rcu_dereference(offloads[proto]);
381 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
382 goto out_unlock;
383
384 err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
385
386out_unlock:
387 rcu_read_unlock();
388 return err;
389}
390
391static int fou_add_to_port_list(struct net *net, struct fou *fou)
392{
393 struct fou_net *fn = net_generic(net, fou_net_id);
394 struct fou *fout;
395
396 mutex_lock(&fn->fou_lock);
397 list_for_each_entry(fout, &fn->fou_list, list) {
398 if (fou->port == fout->port) {
399 mutex_unlock(&fn->fou_lock);
400 return -EALREADY;
401 }
402 }
403
404 list_add(&fou->list, &fn->fou_list);
405 mutex_unlock(&fn->fou_lock);
406
407 return 0;
408}
409
410static void fou_release(struct fou *fou)
411{
412 struct socket *sock = fou->sock;
413 struct sock *sk = sock->sk;
414
415 if (sk->sk_family == AF_INET)
416 udp_del_offload(&fou->udp_offloads);
417 list_del(&fou->list);
418 udp_tunnel_sock_release(sock);
419
420 kfree_rcu(fou, rcu);
421}
422
423static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
424{
425 udp_sk(sk)->encap_rcv = fou_udp_recv;
426 fou->protocol = cfg->protocol;
427 fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
428 fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
429 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
430 fou->udp_offloads.ipproto = cfg->protocol;
431
432 return 0;
433}
434
435static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
436{
437 udp_sk(sk)->encap_rcv = gue_udp_recv;
438 fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
439 fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
440 fou->udp_offloads.port = cfg->udp_config.local_udp_port;
441
442 return 0;
443}
444
445static int fou_create(struct net *net, struct fou_cfg *cfg,
446 struct socket **sockp)
447{
448 struct socket *sock = NULL;
449 struct fou *fou = NULL;
450 struct sock *sk;
451 int err;
452
453 /* Open UDP socket */
454 err = udp_sock_create(net, &cfg->udp_config, &sock);
455 if (err < 0)
456 goto error;
457
458 /* Allocate FOU port structure */
459 fou = kzalloc(sizeof(*fou), GFP_KERNEL);
460 if (!fou) {
461 err = -ENOMEM;
462 goto error;
463 }
464
465 sk = sock->sk;
466
467 fou->flags = cfg->flags;
468 fou->port = cfg->udp_config.local_udp_port;
469
470 /* Initial for fou type */
471 switch (cfg->type) {
472 case FOU_ENCAP_DIRECT:
473 err = fou_encap_init(sk, fou, cfg);
474 if (err)
475 goto error;
476 break;
477 case FOU_ENCAP_GUE:
478 err = gue_encap_init(sk, fou, cfg);
479 if (err)
480 goto error;
481 break;
482 default:
483 err = -EINVAL;
484 goto error;
485 }
486
487 fou->type = cfg->type;
488
489 udp_sk(sk)->encap_type = 1;
490 udp_encap_enable();
491
492 sk->sk_user_data = fou;
493 fou->sock = sock;
494
495 inet_inc_convert_csum(sk);
496
497 sk->sk_allocation = GFP_ATOMIC;
498
499 if (cfg->udp_config.family == AF_INET) {
500 err = udp_add_offload(net, &fou->udp_offloads);
501 if (err)
502 goto error;
503 }
504
505 err = fou_add_to_port_list(net, fou);
506 if (err)
507 goto error;
508
509 if (sockp)
510 *sockp = sock;
511
512 return 0;
513
514error:
515 kfree(fou);
516 if (sock)
517 udp_tunnel_sock_release(sock);
518
519 return err;
520}
521
522static int fou_destroy(struct net *net, struct fou_cfg *cfg)
523{
524 struct fou_net *fn = net_generic(net, fou_net_id);
525 __be16 port = cfg->udp_config.local_udp_port;
526 int err = -EINVAL;
527 struct fou *fou;
528
529 mutex_lock(&fn->fou_lock);
530 list_for_each_entry(fou, &fn->fou_list, list) {
531 if (fou->port == port) {
532 fou_release(fou);
533 err = 0;
534 break;
535 }
536 }
537 mutex_unlock(&fn->fou_lock);
538
539 return err;
540}
541
542static struct genl_family fou_nl_family = {
543 .id = GENL_ID_GENERATE,
544 .hdrsize = 0,
545 .name = FOU_GENL_NAME,
546 .version = FOU_GENL_VERSION,
547 .maxattr = FOU_ATTR_MAX,
548 .netnsok = true,
549};
550
551static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
552 [FOU_ATTR_PORT] = { .type = NLA_U16, },
553 [FOU_ATTR_AF] = { .type = NLA_U8, },
554 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
555 [FOU_ATTR_TYPE] = { .type = NLA_U8, },
556 [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
557};
558
559static int parse_nl_config(struct genl_info *info,
560 struct fou_cfg *cfg)
561{
562 memset(cfg, 0, sizeof(*cfg));
563
564 cfg->udp_config.family = AF_INET;
565
566 if (info->attrs[FOU_ATTR_AF]) {
567 u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
568
569 if (family != AF_INET)
570 return -EINVAL;
571
572 cfg->udp_config.family = family;
573 }
574
575 if (info->attrs[FOU_ATTR_PORT]) {
576 __be16 port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
577
578 cfg->udp_config.local_udp_port = port;
579 }
580
581 if (info->attrs[FOU_ATTR_IPPROTO])
582 cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
583
584 if (info->attrs[FOU_ATTR_TYPE])
585 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
586
587 if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
588 cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
589
590 return 0;
591}
592
593static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
594{
595 struct net *net = genl_info_net(info);
596 struct fou_cfg cfg;
597 int err;
598
599 err = parse_nl_config(info, &cfg);
600 if (err)
601 return err;
602
603 return fou_create(net, &cfg, NULL);
604}
605
606static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
607{
608 struct net *net = genl_info_net(info);
609 struct fou_cfg cfg;
610 int err;
611
612 err = parse_nl_config(info, &cfg);
613 if (err)
614 return err;
615
616 return fou_destroy(net, &cfg);
617}
618
619static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
620{
621 if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
622 nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
623 nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
624 nla_put_u8(msg, FOU_ATTR_TYPE, fou->type))
625 return -1;
626
627 if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
628 if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
629 return -1;
630 return 0;
631}
632
633static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
634 u32 flags, struct sk_buff *skb, u8 cmd)
635{
636 void *hdr;
637
638 hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
639 if (!hdr)
640 return -ENOMEM;
641
642 if (fou_fill_info(fou, skb) < 0)
643 goto nla_put_failure;
644
645 genlmsg_end(skb, hdr);
646 return 0;
647
648nla_put_failure:
649 genlmsg_cancel(skb, hdr);
650 return -EMSGSIZE;
651}
652
653static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
654{
655 struct net *net = genl_info_net(info);
656 struct fou_net *fn = net_generic(net, fou_net_id);
657 struct sk_buff *msg;
658 struct fou_cfg cfg;
659 struct fou *fout;
660 __be16 port;
661 int ret;
662
663 ret = parse_nl_config(info, &cfg);
664 if (ret)
665 return ret;
666 port = cfg.udp_config.local_udp_port;
667 if (port == 0)
668 return -EINVAL;
669
670 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
671 if (!msg)
672 return -ENOMEM;
673
674 ret = -ESRCH;
675 mutex_lock(&fn->fou_lock);
676 list_for_each_entry(fout, &fn->fou_list, list) {
677 if (port == fout->port) {
678 ret = fou_dump_info(fout, info->snd_portid,
679 info->snd_seq, 0, msg,
680 info->genlhdr->cmd);
681 break;
682 }
683 }
684 mutex_unlock(&fn->fou_lock);
685 if (ret < 0)
686 goto out_free;
687
688 return genlmsg_reply(msg, info);
689
690out_free:
691 nlmsg_free(msg);
692 return ret;
693}
694
695static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
696{
697 struct net *net = sock_net(skb->sk);
698 struct fou_net *fn = net_generic(net, fou_net_id);
699 struct fou *fout;
700 int idx = 0, ret;
701
702 mutex_lock(&fn->fou_lock);
703 list_for_each_entry(fout, &fn->fou_list, list) {
704 if (idx++ < cb->args[0])
705 continue;
706 ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
707 cb->nlh->nlmsg_seq, NLM_F_MULTI,
708 skb, FOU_CMD_GET);
709 if (ret)
710 break;
711 }
712 mutex_unlock(&fn->fou_lock);
713
714 cb->args[0] = idx;
715 return skb->len;
716}
717
718static const struct genl_ops fou_nl_ops[] = {
719 {
720 .cmd = FOU_CMD_ADD,
721 .doit = fou_nl_cmd_add_port,
722 .policy = fou_nl_policy,
723 .flags = GENL_ADMIN_PERM,
724 },
725 {
726 .cmd = FOU_CMD_DEL,
727 .doit = fou_nl_cmd_rm_port,
728 .policy = fou_nl_policy,
729 .flags = GENL_ADMIN_PERM,
730 },
731 {
732 .cmd = FOU_CMD_GET,
733 .doit = fou_nl_cmd_get_port,
734 .dumpit = fou_nl_dump,
735 .policy = fou_nl_policy,
736 },
737};
738
739size_t fou_encap_hlen(struct ip_tunnel_encap *e)
740{
741 return sizeof(struct udphdr);
742}
743EXPORT_SYMBOL(fou_encap_hlen);
744
745size_t gue_encap_hlen(struct ip_tunnel_encap *e)
746{
747 size_t len;
748 bool need_priv = false;
749
750 len = sizeof(struct udphdr) + sizeof(struct guehdr);
751
752 if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
753 len += GUE_PLEN_REMCSUM;
754 need_priv = true;
755 }
756
757 len += need_priv ? GUE_LEN_PRIV : 0;
758
759 return len;
760}
761EXPORT_SYMBOL(gue_encap_hlen);
762
763static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
764 struct flowi4 *fl4, u8 *protocol, __be16 sport)
765{
766 struct udphdr *uh;
767
768 skb_push(skb, sizeof(struct udphdr));
769 skb_reset_transport_header(skb);
770
771 uh = udp_hdr(skb);
772
773 uh->dest = e->dport;
774 uh->source = sport;
775 uh->len = htons(skb->len);
776 udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
777 fl4->saddr, fl4->daddr, skb->len);
778
779 *protocol = IPPROTO_UDP;
780}
781
782int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
783 u8 *protocol, struct flowi4 *fl4)
784{
785 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
786 SKB_GSO_UDP_TUNNEL;
787 __be16 sport;
788
789 skb = iptunnel_handle_offloads(skb, type);
790
791 if (IS_ERR(skb))
792 return PTR_ERR(skb);
793
794 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
795 skb, 0, 0, false);
796 fou_build_udp(skb, e, fl4, protocol, sport);
797
798 return 0;
799}
800EXPORT_SYMBOL(fou_build_header);
801
802int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
803 u8 *protocol, struct flowi4 *fl4)
804{
805 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
806 SKB_GSO_UDP_TUNNEL;
807 struct guehdr *guehdr;
808 size_t hdrlen, optlen = 0;
809 __be16 sport;
810 void *data;
811 bool need_priv = false;
812
813 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
814 skb->ip_summed == CHECKSUM_PARTIAL) {
815 optlen += GUE_PLEN_REMCSUM;
816 type |= SKB_GSO_TUNNEL_REMCSUM;
817 need_priv = true;
818 }
819
820 optlen += need_priv ? GUE_LEN_PRIV : 0;
821
822 skb = iptunnel_handle_offloads(skb, type);
823
824 if (IS_ERR(skb))
825 return PTR_ERR(skb);
826
827 /* Get source port (based on flow hash) before skb_push */
828 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
829 skb, 0, 0, false);
830
831 hdrlen = sizeof(struct guehdr) + optlen;
832
833 skb_push(skb, hdrlen);
834
835 guehdr = (struct guehdr *)skb->data;
836
837 guehdr->control = 0;
838 guehdr->version = 0;
839 guehdr->hlen = optlen >> 2;
840 guehdr->flags = 0;
841 guehdr->proto_ctype = *protocol;
842
843 data = &guehdr[1];
844
845 if (need_priv) {
846 __be32 *flags = data;
847
848 guehdr->flags |= GUE_FLAG_PRIV;
849 *flags = 0;
850 data += GUE_LEN_PRIV;
851
852 if (type & SKB_GSO_TUNNEL_REMCSUM) {
853 u16 csum_start = skb_checksum_start_offset(skb);
854 __be16 *pd = data;
855
856 if (csum_start < hdrlen)
857 return -EINVAL;
858
859 csum_start -= hdrlen;
860 pd[0] = htons(csum_start);
861 pd[1] = htons(csum_start + skb->csum_offset);
862
863 if (!skb_is_gso(skb)) {
864 skb->ip_summed = CHECKSUM_NONE;
865 skb->encapsulation = 0;
866 }
867
868 *flags |= GUE_PFLAG_REMCSUM;
869 data += GUE_PLEN_REMCSUM;
870 }
871
872 }
873
874 fou_build_udp(skb, e, fl4, protocol, sport);
875
876 return 0;
877}
878EXPORT_SYMBOL(gue_build_header);
879
880#ifdef CONFIG_NET_FOU_IP_TUNNELS
881
882static const struct ip_tunnel_encap_ops fou_iptun_ops = {
883 .encap_hlen = fou_encap_hlen,
884 .build_header = fou_build_header,
885};
886
887static const struct ip_tunnel_encap_ops gue_iptun_ops = {
888 .encap_hlen = gue_encap_hlen,
889 .build_header = gue_build_header,
890};
891
892static int ip_tunnel_encap_add_fou_ops(void)
893{
894 int ret;
895
896 ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
897 if (ret < 0) {
898 pr_err("can't add fou ops\n");
899 return ret;
900 }
901
902 ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
903 if (ret < 0) {
904 pr_err("can't add gue ops\n");
905 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
906 return ret;
907 }
908
909 return 0;
910}
911
912static void ip_tunnel_encap_del_fou_ops(void)
913{
914 ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
915 ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
916}
917
918#else
919
920static int ip_tunnel_encap_add_fou_ops(void)
921{
922 return 0;
923}
924
925static void ip_tunnel_encap_del_fou_ops(void)
926{
927}
928
929#endif
930
931static __net_init int fou_init_net(struct net *net)
932{
933 struct fou_net *fn = net_generic(net, fou_net_id);
934
935 INIT_LIST_HEAD(&fn->fou_list);
936 mutex_init(&fn->fou_lock);
937 return 0;
938}
939
940static __net_exit void fou_exit_net(struct net *net)
941{
942 struct fou_net *fn = net_generic(net, fou_net_id);
943 struct fou *fou, *next;
944
945 /* Close all the FOU sockets */
946 mutex_lock(&fn->fou_lock);
947 list_for_each_entry_safe(fou, next, &fn->fou_list, list)
948 fou_release(fou);
949 mutex_unlock(&fn->fou_lock);
950}
951
952static struct pernet_operations fou_net_ops = {
953 .init = fou_init_net,
954 .exit = fou_exit_net,
955 .id = &fou_net_id,
956 .size = sizeof(struct fou_net),
957};
958
959static int __init fou_init(void)
960{
961 int ret;
962
963 ret = register_pernet_device(&fou_net_ops);
964 if (ret)
965 goto exit;
966
967 ret = genl_register_family_with_ops(&fou_nl_family,
968 fou_nl_ops);
969 if (ret < 0)
970 goto unregister;
971
972 ret = ip_tunnel_encap_add_fou_ops();
973 if (ret == 0)
974 return 0;
975
976 genl_unregister_family(&fou_nl_family);
977unregister:
978 unregister_pernet_device(&fou_net_ops);
979exit:
980 return ret;
981}
982
983static void __exit fou_fini(void)
984{
985 ip_tunnel_encap_del_fou_ops();
986 genl_unregister_family(&fou_nl_family);
987 unregister_pernet_device(&fou_net_ops);
988}
989
990module_init(fou_init);
991module_exit(fou_fini);
992MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
993MODULE_LICENSE("GPL");
This page took 0.042867 seconds and 5 git commands to generate.