1 #include <linux/module.h>
2 #include <linux/errno.h>
3 #include <linux/socket.h>
4 #include <linux/skbuff.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <net/genetlink.h>
12 #include <net/protocol.h>
14 #include <net/udp_tunnel.h>
16 #include <uapi/linux/fou.h>
17 #include <uapi/linux/genetlink.h>
25 struct list_head list
;
29 #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
35 struct udp_port_cfg udp_config
;
38 static unsigned int fou_net_id
;
41 struct list_head fou_list
;
42 struct mutex fou_lock
;
45 static inline struct fou
*fou_from_sock(struct sock
*sk
)
47 return sk
->sk_user_data
;
50 static int fou_recv_pull(struct sk_buff
*skb
, size_t len
)
52 struct iphdr
*iph
= ip_hdr(skb
);
54 /* Remove 'len' bytes from the packet (UDP header and
55 * FOU header if present).
57 iph
->tot_len
= htons(ntohs(iph
->tot_len
) - len
);
59 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
60 skb_reset_transport_header(skb
);
61 return iptunnel_pull_offloads(skb
);
64 static int fou_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
66 struct fou
*fou
= fou_from_sock(sk
);
71 if (fou_recv_pull(skb
, sizeof(struct udphdr
)))
74 return -fou
->protocol
;
81 static struct guehdr
*gue_remcsum(struct sk_buff
*skb
, struct guehdr
*guehdr
,
82 void *data
, size_t hdrlen
, u8 ipproto
,
86 size_t start
= ntohs(pd
[0]);
87 size_t offset
= ntohs(pd
[1]);
88 size_t plen
= sizeof(struct udphdr
) + hdrlen
+
89 max_t(size_t, offset
+ sizeof(u16
), start
);
91 if (skb
->remcsum_offload
)
94 if (!pskb_may_pull(skb
, plen
))
96 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
98 skb_remcsum_process(skb
, (void *)guehdr
+ hdrlen
,
99 start
, offset
, nopartial
);
104 static int gue_control_message(struct sk_buff
*skb
, struct guehdr
*guehdr
)
111 static int gue_udp_recv(struct sock
*sk
, struct sk_buff
*skb
)
113 struct fou
*fou
= fou_from_sock(sk
);
114 size_t len
, optlen
, hdrlen
;
115 struct guehdr
*guehdr
;
122 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
123 if (!pskb_may_pull(skb
, len
))
126 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
128 optlen
= guehdr
->hlen
<< 2;
131 if (!pskb_may_pull(skb
, len
))
134 /* guehdr may change after pull */
135 guehdr
= (struct guehdr
*)&udp_hdr(skb
)[1];
137 hdrlen
= sizeof(struct guehdr
) + optlen
;
139 if (guehdr
->version
!= 0 || validate_gue_flags(guehdr
, optlen
))
142 hdrlen
= sizeof(struct guehdr
) + optlen
;
144 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(skb
)->tot_len
) - len
);
146 /* Pull csum through the guehdr now . This can be used if
147 * there is a remote checksum offload.
149 skb_postpull_rcsum(skb
, udp_hdr(skb
), len
);
153 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
154 __be32 flags
= *(__be32
*)(data
+ doffset
);
156 doffset
+= GUE_LEN_PRIV
;
158 if (flags
& GUE_PFLAG_REMCSUM
) {
159 guehdr
= gue_remcsum(skb
, guehdr
, data
+ doffset
,
160 hdrlen
, guehdr
->proto_ctype
,
162 FOU_F_REMCSUM_NOPARTIAL
));
168 doffset
+= GUE_PLEN_REMCSUM
;
172 if (unlikely(guehdr
->control
))
173 return gue_control_message(skb
, guehdr
);
175 __skb_pull(skb
, sizeof(struct udphdr
) + hdrlen
);
176 skb_reset_transport_header(skb
);
178 if (iptunnel_pull_offloads(skb
))
181 return -guehdr
->proto_ctype
;
188 static struct sk_buff
**fou_gro_receive(struct sock
*sk
,
189 struct sk_buff
**head
,
192 const struct net_offload
*ops
;
193 struct sk_buff
**pp
= NULL
;
194 u8 proto
= fou_from_sock(sk
)->protocol
;
195 const struct net_offload
**offloads
;
197 /* We can clear the encap_mark for FOU as we are essentially doing
198 * one of two possible things. We are either adding an L4 tunnel
199 * header to the outer L3 tunnel header, or we are are simply
200 * treating the GRE tunnel header as though it is a UDP protocol
201 * specific header such as VXLAN or GENEVE.
203 NAPI_GRO_CB(skb
)->encap_mark
= 0;
205 /* Flag this frame as already having an outer encap header */
206 NAPI_GRO_CB(skb
)->is_fou
= 1;
209 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
210 ops
= rcu_dereference(offloads
[proto
]);
211 if (!ops
|| !ops
->callbacks
.gro_receive
)
214 pp
= ops
->callbacks
.gro_receive(head
, skb
);
222 static int fou_gro_complete(struct sock
*sk
, struct sk_buff
*skb
,
225 const struct net_offload
*ops
;
226 u8 proto
= fou_from_sock(sk
)->protocol
;
228 const struct net_offload
**offloads
;
231 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
232 ops
= rcu_dereference(offloads
[proto
]);
233 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
236 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
238 skb_set_inner_mac_header(skb
, nhoff
);
246 static struct guehdr
*gue_gro_remcsum(struct sk_buff
*skb
, unsigned int off
,
247 struct guehdr
*guehdr
, void *data
,
248 size_t hdrlen
, struct gro_remcsum
*grc
,
252 size_t start
= ntohs(pd
[0]);
253 size_t offset
= ntohs(pd
[1]);
255 if (skb
->remcsum_offload
)
258 if (!NAPI_GRO_CB(skb
)->csum_valid
)
261 guehdr
= skb_gro_remcsum_process(skb
, (void *)guehdr
, off
, hdrlen
,
262 start
, offset
, grc
, nopartial
);
264 skb
->remcsum_offload
= 1;
269 static struct sk_buff
**gue_gro_receive(struct sock
*sk
,
270 struct sk_buff
**head
,
273 const struct net_offload
**offloads
;
274 const struct net_offload
*ops
;
275 struct sk_buff
**pp
= NULL
;
277 struct guehdr
*guehdr
;
278 size_t len
, optlen
, hdrlen
, off
;
282 struct fou
*fou
= fou_from_sock(sk
);
283 struct gro_remcsum grc
;
285 skb_gro_remcsum_init(&grc
);
287 off
= skb_gro_offset(skb
);
288 len
= off
+ sizeof(*guehdr
);
290 guehdr
= skb_gro_header_fast(skb
, off
);
291 if (skb_gro_header_hard(skb
, len
)) {
292 guehdr
= skb_gro_header_slow(skb
, len
, off
);
293 if (unlikely(!guehdr
))
297 optlen
= guehdr
->hlen
<< 2;
300 if (skb_gro_header_hard(skb
, len
)) {
301 guehdr
= skb_gro_header_slow(skb
, len
, off
);
302 if (unlikely(!guehdr
))
306 if (unlikely(guehdr
->control
) || guehdr
->version
!= 0 ||
307 validate_gue_flags(guehdr
, optlen
))
310 hdrlen
= sizeof(*guehdr
) + optlen
;
312 /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
313 * this is needed if there is a remote checkcsum offload.
315 skb_gro_postpull_rcsum(skb
, guehdr
, hdrlen
);
319 if (guehdr
->flags
& GUE_FLAG_PRIV
) {
320 __be32 flags
= *(__be32
*)(data
+ doffset
);
322 doffset
+= GUE_LEN_PRIV
;
324 if (flags
& GUE_PFLAG_REMCSUM
) {
325 guehdr
= gue_gro_remcsum(skb
, off
, guehdr
,
326 data
+ doffset
, hdrlen
, &grc
,
328 FOU_F_REMCSUM_NOPARTIAL
));
335 doffset
+= GUE_PLEN_REMCSUM
;
339 skb_gro_pull(skb
, hdrlen
);
341 for (p
= *head
; p
; p
= p
->next
) {
342 const struct guehdr
*guehdr2
;
344 if (!NAPI_GRO_CB(p
)->same_flow
)
347 guehdr2
= (struct guehdr
*)(p
->data
+ off
);
349 /* Compare base GUE header to be equal (covers
350 * hlen, version, proto_ctype, and flags.
352 if (guehdr
->word
!= guehdr2
->word
) {
353 NAPI_GRO_CB(p
)->same_flow
= 0;
357 /* Compare optional fields are the same. */
358 if (guehdr
->hlen
&& memcmp(&guehdr
[1], &guehdr2
[1],
359 guehdr
->hlen
<< 2)) {
360 NAPI_GRO_CB(p
)->same_flow
= 0;
365 /* We can clear the encap_mark for GUE as we are essentially doing
366 * one of two possible things. We are either adding an L4 tunnel
367 * header to the outer L3 tunnel header, or we are are simply
368 * treating the GRE tunnel header as though it is a UDP protocol
369 * specific header such as VXLAN or GENEVE.
371 NAPI_GRO_CB(skb
)->encap_mark
= 0;
373 /* Flag this frame as already having an outer encap header */
374 NAPI_GRO_CB(skb
)->is_fou
= 1;
377 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
378 ops
= rcu_dereference(offloads
[guehdr
->proto_ctype
]);
379 if (WARN_ON_ONCE(!ops
|| !ops
->callbacks
.gro_receive
))
382 pp
= ops
->callbacks
.gro_receive(head
, skb
);
388 NAPI_GRO_CB(skb
)->flush
|= flush
;
389 skb_gro_remcsum_cleanup(skb
, &grc
);
394 static int gue_gro_complete(struct sock
*sk
, struct sk_buff
*skb
, int nhoff
)
396 const struct net_offload
**offloads
;
397 struct guehdr
*guehdr
= (struct guehdr
*)(skb
->data
+ nhoff
);
398 const struct net_offload
*ops
;
399 unsigned int guehlen
;
403 proto
= guehdr
->proto_ctype
;
405 guehlen
= sizeof(*guehdr
) + (guehdr
->hlen
<< 2);
408 offloads
= NAPI_GRO_CB(skb
)->is_ipv6
? inet6_offloads
: inet_offloads
;
409 ops
= rcu_dereference(offloads
[proto
]);
410 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
413 err
= ops
->callbacks
.gro_complete(skb
, nhoff
+ guehlen
);
415 skb_set_inner_mac_header(skb
, nhoff
+ guehlen
);
422 static int fou_add_to_port_list(struct net
*net
, struct fou
*fou
)
424 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
427 mutex_lock(&fn
->fou_lock
);
428 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
429 if (fou
->port
== fout
->port
) {
430 mutex_unlock(&fn
->fou_lock
);
435 list_add(&fou
->list
, &fn
->fou_list
);
436 mutex_unlock(&fn
->fou_lock
);
441 static void fou_release(struct fou
*fou
)
443 struct socket
*sock
= fou
->sock
;
445 list_del(&fou
->list
);
446 udp_tunnel_sock_release(sock
);
451 static int fou_create(struct net
*net
, struct fou_cfg
*cfg
,
452 struct socket
**sockp
)
454 struct socket
*sock
= NULL
;
455 struct fou
*fou
= NULL
;
457 struct udp_tunnel_sock_cfg tunnel_cfg
;
460 /* Open UDP socket */
461 err
= udp_sock_create(net
, &cfg
->udp_config
, &sock
);
465 /* Allocate FOU port structure */
466 fou
= kzalloc(sizeof(*fou
), GFP_KERNEL
);
474 fou
->flags
= cfg
->flags
;
475 fou
->port
= cfg
->udp_config
.local_udp_port
;
476 fou
->type
= cfg
->type
;
479 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
480 tunnel_cfg
.encap_type
= 1;
481 tunnel_cfg
.sk_user_data
= fou
;
482 tunnel_cfg
.encap_destroy
= NULL
;
484 /* Initial for fou type */
486 case FOU_ENCAP_DIRECT
:
487 tunnel_cfg
.encap_rcv
= fou_udp_recv
;
488 tunnel_cfg
.gro_receive
= fou_gro_receive
;
489 tunnel_cfg
.gro_complete
= fou_gro_complete
;
490 fou
->protocol
= cfg
->protocol
;
493 tunnel_cfg
.encap_rcv
= gue_udp_recv
;
494 tunnel_cfg
.gro_receive
= gue_gro_receive
;
495 tunnel_cfg
.gro_complete
= gue_gro_complete
;
502 setup_udp_tunnel_sock(net
, sock
, &tunnel_cfg
);
504 sk
->sk_allocation
= GFP_ATOMIC
;
506 err
= fou_add_to_port_list(net
, fou
);
518 udp_tunnel_sock_release(sock
);
523 static int fou_destroy(struct net
*net
, struct fou_cfg
*cfg
)
525 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
526 __be16 port
= cfg
->udp_config
.local_udp_port
;
530 mutex_lock(&fn
->fou_lock
);
531 list_for_each_entry(fou
, &fn
->fou_list
, list
) {
532 if (fou
->port
== port
) {
538 mutex_unlock(&fn
->fou_lock
);
543 static struct genl_family fou_nl_family
= {
544 .id
= GENL_ID_GENERATE
,
546 .name
= FOU_GENL_NAME
,
547 .version
= FOU_GENL_VERSION
,
548 .maxattr
= FOU_ATTR_MAX
,
552 static struct nla_policy fou_nl_policy
[FOU_ATTR_MAX
+ 1] = {
553 [FOU_ATTR_PORT
] = { .type
= NLA_U16
, },
554 [FOU_ATTR_AF
] = { .type
= NLA_U8
, },
555 [FOU_ATTR_IPPROTO
] = { .type
= NLA_U8
, },
556 [FOU_ATTR_TYPE
] = { .type
= NLA_U8
, },
557 [FOU_ATTR_REMCSUM_NOPARTIAL
] = { .type
= NLA_FLAG
, },
560 static int parse_nl_config(struct genl_info
*info
,
563 memset(cfg
, 0, sizeof(*cfg
));
565 cfg
->udp_config
.family
= AF_INET
;
567 if (info
->attrs
[FOU_ATTR_AF
]) {
568 u8 family
= nla_get_u8(info
->attrs
[FOU_ATTR_AF
]);
570 if (family
!= AF_INET
)
573 cfg
->udp_config
.family
= family
;
576 if (info
->attrs
[FOU_ATTR_PORT
]) {
577 __be16 port
= nla_get_be16(info
->attrs
[FOU_ATTR_PORT
]);
579 cfg
->udp_config
.local_udp_port
= port
;
582 if (info
->attrs
[FOU_ATTR_IPPROTO
])
583 cfg
->protocol
= nla_get_u8(info
->attrs
[FOU_ATTR_IPPROTO
]);
585 if (info
->attrs
[FOU_ATTR_TYPE
])
586 cfg
->type
= nla_get_u8(info
->attrs
[FOU_ATTR_TYPE
]);
588 if (info
->attrs
[FOU_ATTR_REMCSUM_NOPARTIAL
])
589 cfg
->flags
|= FOU_F_REMCSUM_NOPARTIAL
;
594 static int fou_nl_cmd_add_port(struct sk_buff
*skb
, struct genl_info
*info
)
596 struct net
*net
= genl_info_net(info
);
600 err
= parse_nl_config(info
, &cfg
);
604 return fou_create(net
, &cfg
, NULL
);
607 static int fou_nl_cmd_rm_port(struct sk_buff
*skb
, struct genl_info
*info
)
609 struct net
*net
= genl_info_net(info
);
613 err
= parse_nl_config(info
, &cfg
);
617 return fou_destroy(net
, &cfg
);
620 static int fou_fill_info(struct fou
*fou
, struct sk_buff
*msg
)
622 if (nla_put_u8(msg
, FOU_ATTR_AF
, fou
->sock
->sk
->sk_family
) ||
623 nla_put_be16(msg
, FOU_ATTR_PORT
, fou
->port
) ||
624 nla_put_u8(msg
, FOU_ATTR_IPPROTO
, fou
->protocol
) ||
625 nla_put_u8(msg
, FOU_ATTR_TYPE
, fou
->type
))
628 if (fou
->flags
& FOU_F_REMCSUM_NOPARTIAL
)
629 if (nla_put_flag(msg
, FOU_ATTR_REMCSUM_NOPARTIAL
))
634 static int fou_dump_info(struct fou
*fou
, u32 portid
, u32 seq
,
635 u32 flags
, struct sk_buff
*skb
, u8 cmd
)
639 hdr
= genlmsg_put(skb
, portid
, seq
, &fou_nl_family
, flags
, cmd
);
643 if (fou_fill_info(fou
, skb
) < 0)
644 goto nla_put_failure
;
646 genlmsg_end(skb
, hdr
);
650 genlmsg_cancel(skb
, hdr
);
654 static int fou_nl_cmd_get_port(struct sk_buff
*skb
, struct genl_info
*info
)
656 struct net
*net
= genl_info_net(info
);
657 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
664 ret
= parse_nl_config(info
, &cfg
);
667 port
= cfg
.udp_config
.local_udp_port
;
671 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
676 mutex_lock(&fn
->fou_lock
);
677 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
678 if (port
== fout
->port
) {
679 ret
= fou_dump_info(fout
, info
->snd_portid
,
680 info
->snd_seq
, 0, msg
,
685 mutex_unlock(&fn
->fou_lock
);
689 return genlmsg_reply(msg
, info
);
696 static int fou_nl_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
698 struct net
*net
= sock_net(skb
->sk
);
699 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
703 mutex_lock(&fn
->fou_lock
);
704 list_for_each_entry(fout
, &fn
->fou_list
, list
) {
705 if (idx
++ < cb
->args
[0])
707 ret
= fou_dump_info(fout
, NETLINK_CB(cb
->skb
).portid
,
708 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
713 mutex_unlock(&fn
->fou_lock
);
719 static const struct genl_ops fou_nl_ops
[] = {
722 .doit
= fou_nl_cmd_add_port
,
723 .policy
= fou_nl_policy
,
724 .flags
= GENL_ADMIN_PERM
,
728 .doit
= fou_nl_cmd_rm_port
,
729 .policy
= fou_nl_policy
,
730 .flags
= GENL_ADMIN_PERM
,
734 .doit
= fou_nl_cmd_get_port
,
735 .dumpit
= fou_nl_dump
,
736 .policy
= fou_nl_policy
,
740 size_t fou_encap_hlen(struct ip_tunnel_encap
*e
)
742 return sizeof(struct udphdr
);
744 EXPORT_SYMBOL(fou_encap_hlen
);
746 size_t gue_encap_hlen(struct ip_tunnel_encap
*e
)
749 bool need_priv
= false;
751 len
= sizeof(struct udphdr
) + sizeof(struct guehdr
);
753 if (e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) {
754 len
+= GUE_PLEN_REMCSUM
;
758 len
+= need_priv
? GUE_LEN_PRIV
: 0;
762 EXPORT_SYMBOL(gue_encap_hlen
);
764 static void fou_build_udp(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
765 struct flowi4
*fl4
, u8
*protocol
, __be16 sport
)
769 skb_push(skb
, sizeof(struct udphdr
));
770 skb_reset_transport_header(skb
);
776 uh
->len
= htons(skb
->len
);
777 udp_set_csum(!(e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
), skb
,
778 fl4
->saddr
, fl4
->daddr
, skb
->len
);
780 *protocol
= IPPROTO_UDP
;
783 int __fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
784 u8
*protocol
, __be16
*sport
, int type
)
788 err
= iptunnel_handle_offloads(skb
, type
);
792 *sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
797 EXPORT_SYMBOL(__fou_build_header
);
799 int fou_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
800 u8
*protocol
, struct flowi4
*fl4
)
802 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
807 err
= __fou_build_header(skb
, e
, protocol
, &sport
, type
);
811 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
815 EXPORT_SYMBOL(fou_build_header
);
817 int __gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
818 u8
*protocol
, __be16
*sport
, int type
)
820 struct guehdr
*guehdr
;
821 size_t hdrlen
, optlen
= 0;
823 bool need_priv
= false;
826 if ((e
->flags
& TUNNEL_ENCAP_FLAG_REMCSUM
) &&
827 skb
->ip_summed
== CHECKSUM_PARTIAL
) {
828 optlen
+= GUE_PLEN_REMCSUM
;
829 type
|= SKB_GSO_TUNNEL_REMCSUM
;
833 optlen
+= need_priv
? GUE_LEN_PRIV
: 0;
835 err
= iptunnel_handle_offloads(skb
, type
);
839 /* Get source port (based on flow hash) before skb_push */
840 *sport
= e
->sport
? : udp_flow_src_port(dev_net(skb
->dev
),
843 hdrlen
= sizeof(struct guehdr
) + optlen
;
845 skb_push(skb
, hdrlen
);
847 guehdr
= (struct guehdr
*)skb
->data
;
851 guehdr
->hlen
= optlen
>> 2;
853 guehdr
->proto_ctype
= *protocol
;
858 __be32
*flags
= data
;
860 guehdr
->flags
|= GUE_FLAG_PRIV
;
862 data
+= GUE_LEN_PRIV
;
864 if (type
& SKB_GSO_TUNNEL_REMCSUM
) {
865 u16 csum_start
= skb_checksum_start_offset(skb
);
868 if (csum_start
< hdrlen
)
871 csum_start
-= hdrlen
;
872 pd
[0] = htons(csum_start
);
873 pd
[1] = htons(csum_start
+ skb
->csum_offset
);
875 if (!skb_is_gso(skb
)) {
876 skb
->ip_summed
= CHECKSUM_NONE
;
877 skb
->encapsulation
= 0;
880 *flags
|= GUE_PFLAG_REMCSUM
;
881 data
+= GUE_PLEN_REMCSUM
;
888 EXPORT_SYMBOL(__gue_build_header
);
890 int gue_build_header(struct sk_buff
*skb
, struct ip_tunnel_encap
*e
,
891 u8
*protocol
, struct flowi4
*fl4
)
893 int type
= e
->flags
& TUNNEL_ENCAP_FLAG_CSUM
? SKB_GSO_UDP_TUNNEL_CSUM
:
898 err
= __gue_build_header(skb
, e
, protocol
, &sport
, type
);
902 fou_build_udp(skb
, e
, fl4
, protocol
, sport
);
906 EXPORT_SYMBOL(gue_build_header
);
908 #ifdef CONFIG_NET_FOU_IP_TUNNELS
910 static const struct ip_tunnel_encap_ops fou_iptun_ops
= {
911 .encap_hlen
= fou_encap_hlen
,
912 .build_header
= fou_build_header
,
915 static const struct ip_tunnel_encap_ops gue_iptun_ops
= {
916 .encap_hlen
= gue_encap_hlen
,
917 .build_header
= gue_build_header
,
920 static int ip_tunnel_encap_add_fou_ops(void)
924 ret
= ip_tunnel_encap_add_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
926 pr_err("can't add fou ops\n");
930 ret
= ip_tunnel_encap_add_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
932 pr_err("can't add gue ops\n");
933 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
940 static void ip_tunnel_encap_del_fou_ops(void)
942 ip_tunnel_encap_del_ops(&fou_iptun_ops
, TUNNEL_ENCAP_FOU
);
943 ip_tunnel_encap_del_ops(&gue_iptun_ops
, TUNNEL_ENCAP_GUE
);
948 static int ip_tunnel_encap_add_fou_ops(void)
953 static void ip_tunnel_encap_del_fou_ops(void)
959 static __net_init
int fou_init_net(struct net
*net
)
961 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
963 INIT_LIST_HEAD(&fn
->fou_list
);
964 mutex_init(&fn
->fou_lock
);
968 static __net_exit
void fou_exit_net(struct net
*net
)
970 struct fou_net
*fn
= net_generic(net
, fou_net_id
);
971 struct fou
*fou
, *next
;
973 /* Close all the FOU sockets */
974 mutex_lock(&fn
->fou_lock
);
975 list_for_each_entry_safe(fou
, next
, &fn
->fou_list
, list
)
977 mutex_unlock(&fn
->fou_lock
);
980 static struct pernet_operations fou_net_ops
= {
981 .init
= fou_init_net
,
982 .exit
= fou_exit_net
,
984 .size
= sizeof(struct fou_net
),
987 static int __init
fou_init(void)
991 ret
= register_pernet_device(&fou_net_ops
);
995 ret
= genl_register_family_with_ops(&fou_nl_family
,
1000 ret
= ip_tunnel_encap_add_fou_ops();
1004 genl_unregister_family(&fou_nl_family
);
1006 unregister_pernet_device(&fou_net_ops
);
1011 static void __exit
fou_fini(void)
1013 ip_tunnel_encap_del_fou_ops();
1014 genl_unregister_family(&fou_nl_family
);
1015 unregister_pernet_device(&fou_net_ops
);
1018 module_init(fou_init
);
1019 module_exit(fou_fini
);
1020 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
1021 MODULE_LICENSE("GPL");