2 * IPv6 BSD socket options interface
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_sockglue.c
10 * $Id: ipv6_sockglue.c,v 1.41 2002/02/01 22:01:04 davem Exp $
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * FIXME: Make the setsockopt code POSIX compliant: That is
19 * o Return -EINVAL for setsockopt of short lengths
20 * o Truncate getsockopt returns
21 * o Return an optlen of the truncated length if need be
24 * David L Stevens <dlstevens@us.ibm.com>:
25 * - added multicast source filtering API for MLDv2
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/sched.h>
35 #include <linux/net.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/init.h>
40 #include <linux/sysctl.h>
41 #include <linux/netfilter.h>
46 #include <net/ndisc.h>
47 #include <net/protocol.h>
48 #include <net/transp_v6.h>
49 #include <net/ip6_route.h>
50 #include <net/addrconf.h>
51 #include <net/inet_common.h>
56 #include <asm/uaccess.h>
58 DEFINE_SNMP_STAT(struct ipstats_mib
, ipv6_statistics
) __read_mostly
;
60 static struct packet_type ipv6_packet_type
= {
61 .type
= __constant_htons(ETH_P_IPV6
),
65 struct ip6_ra_chain
*ip6_ra_chain
;
66 DEFINE_RWLOCK(ip6_ra_lock
);
68 int ip6_ra_control(struct sock
*sk
, int sel
, void (*destructor
)(struct sock
*))
70 struct ip6_ra_chain
*ra
, *new_ra
, **rap
;
72 /* RA packet may be delivered ONLY to IPPROTO_RAW socket */
73 if (sk
->sk_type
!= SOCK_RAW
|| inet_sk(sk
)->num
!= IPPROTO_RAW
)
76 new_ra
= (sel
>=0) ? kmalloc(sizeof(*new_ra
), GFP_KERNEL
) : NULL
;
78 write_lock_bh(&ip6_ra_lock
);
79 for (rap
= &ip6_ra_chain
; (ra
=*rap
) != NULL
; rap
= &ra
->next
) {
82 write_unlock_bh(&ip6_ra_lock
);
89 write_unlock_bh(&ip6_ra_lock
);
99 write_unlock_bh(&ip6_ra_lock
);
104 new_ra
->destructor
= destructor
;
108 write_unlock_bh(&ip6_ra_lock
);
112 int ipv6_setsockopt(struct sock
*sk
, int level
, int optname
,
113 char __user
*optval
, int optlen
)
115 struct ipv6_pinfo
*np
= inet6_sk(sk
);
117 int retv
= -ENOPROTOOPT
;
119 if (level
== SOL_IP
&& sk
->sk_type
!= SOCK_RAW
)
120 return udp_prot
.setsockopt(sk
, level
, optname
, optval
, optlen
);
127 else if (get_user(val
, (int __user
*) optval
))
137 if (val
== PF_INET
) {
138 struct ipv6_txoptions
*opt
;
139 struct sk_buff
*pktopt
;
141 if (sk
->sk_protocol
!= IPPROTO_UDP
&&
142 sk
->sk_protocol
!= IPPROTO_TCP
)
145 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
150 if (ipv6_only_sock(sk
) ||
151 !(ipv6_addr_type(&np
->daddr
) & IPV6_ADDR_MAPPED
)) {
152 retv
= -EADDRNOTAVAIL
;
156 fl6_free_socklist(sk
);
157 ipv6_sock_mc_close(sk
);
160 * Sock is moving from IPv6 to IPv4 (sk_prot), so
161 * remove it from the refcnt debug socks count in the
164 sk_refcnt_debug_dec(sk
);
166 if (sk
->sk_protocol
== IPPROTO_TCP
) {
167 struct tcp_sock
*tp
= tcp_sk(sk
);
170 sock_prot_dec_use(sk
->sk_prot
);
171 sock_prot_inc_use(&tcp_prot
);
173 sk
->sk_prot
= &tcp_prot
;
174 tp
->af_specific
= &ipv4_specific
;
175 sk
->sk_socket
->ops
= &inet_stream_ops
;
176 sk
->sk_family
= PF_INET
;
177 tcp_sync_mss(sk
, tp
->pmtu_cookie
);
180 sock_prot_dec_use(sk
->sk_prot
);
181 sock_prot_inc_use(&udp_prot
);
183 sk
->sk_prot
= &udp_prot
;
184 sk
->sk_socket
->ops
= &inet_dgram_ops
;
185 sk
->sk_family
= PF_INET
;
187 opt
= xchg(&np
->opt
, NULL
);
189 sock_kfree_s(sk
, opt
, opt
->tot_len
);
190 pktopt
= xchg(&np
->pktoptions
, NULL
);
194 sk
->sk_destruct
= inet_sock_destruct
;
196 * ... and add it to the refcnt debug socks count
197 * in the new family. -acme
199 sk_refcnt_debug_inc(sk
);
200 module_put(THIS_MODULE
);
207 if (inet_sk(sk
)->num
)
209 np
->ipv6only
= valbool
;
214 np
->rxopt
.bits
.rxinfo
= valbool
;
219 np
->rxopt
.bits
.rxhlim
= valbool
;
224 if (val
< 0 || val
> 2)
226 np
->rxopt
.bits
.srcrt
= val
;
231 np
->rxopt
.bits
.hopopts
= valbool
;
236 np
->rxopt
.bits
.dstopts
= valbool
;
241 np
->rxopt
.bits
.rxflow
= valbool
;
245 case IPV6_PKTOPTIONS
:
247 struct ipv6_txoptions
*opt
= NULL
;
252 fl
.fl6_flowlabel
= 0;
253 fl
.oif
= sk
->sk_bound_dev_if
;
258 /* 1K is probably excessive
259 * 1K is surely not enough, 2K per standard header is 16K.
262 if (optlen
> 64*1024)
265 opt
= sock_kmalloc(sk
, sizeof(*opt
) + optlen
, GFP_KERNEL
);
270 memset(opt
, 0, sizeof(*opt
));
271 opt
->tot_len
= sizeof(*opt
) + optlen
;
273 if (copy_from_user(opt
+1, optval
, optlen
))
276 msg
.msg_controllen
= optlen
;
277 msg
.msg_control
= (void*)(opt
+1);
279 retv
= datagram_send_ctl(&msg
, &fl
, opt
, &junk
);
284 if (sk
->sk_type
== SOCK_STREAM
) {
286 struct tcp_sock
*tp
= tcp_sk(sk
);
287 if (!((1 << sk
->sk_state
) &
288 (TCPF_LISTEN
| TCPF_CLOSE
))
289 && inet_sk(sk
)->daddr
!= LOOPBACK4_IPV6
) {
290 tp
->ext_header_len
= opt
->opt_flen
+ opt
->opt_nflen
;
291 tcp_sync_mss(sk
, tp
->pmtu_cookie
);
294 opt
= xchg(&np
->opt
, opt
);
297 write_lock(&sk
->sk_dst_lock
);
298 opt
= xchg(&np
->opt
, opt
);
299 write_unlock(&sk
->sk_dst_lock
);
305 sock_kfree_s(sk
, opt
, opt
->tot_len
);
308 case IPV6_UNICAST_HOPS
:
309 if (val
> 255 || val
< -1)
315 case IPV6_MULTICAST_HOPS
:
316 if (sk
->sk_type
== SOCK_STREAM
)
318 if (val
> 255 || val
< -1)
320 np
->mcast_hops
= val
;
324 case IPV6_MULTICAST_LOOP
:
325 np
->mc_loop
= valbool
;
329 case IPV6_MULTICAST_IF
:
330 if (sk
->sk_type
== SOCK_STREAM
)
332 if (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= val
)
335 if (__dev_get_by_index(val
) == NULL
) {
342 case IPV6_ADD_MEMBERSHIP
:
343 case IPV6_DROP_MEMBERSHIP
:
345 struct ipv6_mreq mreq
;
348 if (copy_from_user(&mreq
, optval
, sizeof(struct ipv6_mreq
)))
351 if (optname
== IPV6_ADD_MEMBERSHIP
)
352 retv
= ipv6_sock_mc_join(sk
, mreq
.ipv6mr_ifindex
, &mreq
.ipv6mr_multiaddr
);
354 retv
= ipv6_sock_mc_drop(sk
, mreq
.ipv6mr_ifindex
, &mreq
.ipv6mr_multiaddr
);
357 case IPV6_JOIN_ANYCAST
:
358 case IPV6_LEAVE_ANYCAST
:
360 struct ipv6_mreq mreq
;
362 if (optlen
!= sizeof(struct ipv6_mreq
))
366 if (copy_from_user(&mreq
, optval
, sizeof(struct ipv6_mreq
)))
369 if (optname
== IPV6_JOIN_ANYCAST
)
370 retv
= ipv6_sock_ac_join(sk
, mreq
.ipv6mr_ifindex
, &mreq
.ipv6mr_acaddr
);
372 retv
= ipv6_sock_ac_drop(sk
, mreq
.ipv6mr_ifindex
, &mreq
.ipv6mr_acaddr
);
375 case MCAST_JOIN_GROUP
:
376 case MCAST_LEAVE_GROUP
:
378 struct group_req greq
;
379 struct sockaddr_in6
*psin6
;
382 if (copy_from_user(&greq
, optval
, sizeof(struct group_req
)))
384 if (greq
.gr_group
.ss_family
!= AF_INET6
) {
385 retv
= -EADDRNOTAVAIL
;
388 psin6
= (struct sockaddr_in6
*)&greq
.gr_group
;
389 if (optname
== MCAST_JOIN_GROUP
)
390 retv
= ipv6_sock_mc_join(sk
, greq
.gr_interface
,
393 retv
= ipv6_sock_mc_drop(sk
, greq
.gr_interface
,
397 case MCAST_JOIN_SOURCE_GROUP
:
398 case MCAST_LEAVE_SOURCE_GROUP
:
399 case MCAST_BLOCK_SOURCE
:
400 case MCAST_UNBLOCK_SOURCE
:
402 struct group_source_req greqs
;
405 if (optlen
!= sizeof(struct group_source_req
))
407 if (copy_from_user(&greqs
, optval
, sizeof(greqs
))) {
411 if (greqs
.gsr_group
.ss_family
!= AF_INET6
||
412 greqs
.gsr_source
.ss_family
!= AF_INET6
) {
413 retv
= -EADDRNOTAVAIL
;
416 if (optname
== MCAST_BLOCK_SOURCE
) {
417 omode
= MCAST_EXCLUDE
;
419 } else if (optname
== MCAST_UNBLOCK_SOURCE
) {
420 omode
= MCAST_EXCLUDE
;
422 } else if (optname
== MCAST_JOIN_SOURCE_GROUP
) {
423 struct sockaddr_in6
*psin6
;
425 psin6
= (struct sockaddr_in6
*)&greqs
.gsr_group
;
426 retv
= ipv6_sock_mc_join(sk
, greqs
.gsr_interface
,
428 /* prior join w/ different source is ok */
429 if (retv
&& retv
!= -EADDRINUSE
)
431 omode
= MCAST_INCLUDE
;
433 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
434 omode
= MCAST_INCLUDE
;
437 retv
= ip6_mc_source(add
, omode
, sk
, &greqs
);
442 extern int sysctl_mld_max_msf
;
443 struct group_filter
*gsf
;
445 if (optlen
< GROUP_FILTER_SIZE(0))
447 if (optlen
> sysctl_optmem_max
) {
451 gsf
= (struct group_filter
*)kmalloc(optlen
,GFP_KERNEL
);
457 if (copy_from_user(gsf
, optval
, optlen
)) {
461 /* numsrc >= (4G-140)/128 overflow in 32 bits */
462 if (gsf
->gf_numsrc
>= 0x1ffffffU
||
463 gsf
->gf_numsrc
> sysctl_mld_max_msf
) {
468 if (GROUP_FILTER_SIZE(gsf
->gf_numsrc
) > optlen
) {
473 retv
= ip6_mc_msfilter(sk
, gsf
);
478 case IPV6_ROUTER_ALERT
:
479 retv
= ip6_ra_control(sk
, val
, NULL
);
481 case IPV6_MTU_DISCOVER
:
488 if (val
&& val
< IPV6_MIN_MTU
)
494 np
->recverr
= valbool
;
496 skb_queue_purge(&sk
->sk_error_queue
);
499 case IPV6_FLOWINFO_SEND
:
500 np
->sndflow
= valbool
;
503 case IPV6_FLOWLABEL_MGR
:
504 retv
= ipv6_flowlabel_opt(sk
, optval
, optlen
);
506 case IPV6_IPSEC_POLICY
:
507 case IPV6_XFRM_POLICY
:
509 if (!capable(CAP_NET_ADMIN
))
511 retv
= xfrm_user_policy(sk
, optname
, optval
, optlen
);
514 #ifdef CONFIG_NETFILTER
516 retv
= nf_setsockopt(sk
, PF_INET6
, optname
, optval
,
532 int ipv6_getsockopt(struct sock
*sk
, int level
, int optname
,
533 char __user
*optval
, int __user
*optlen
)
535 struct ipv6_pinfo
*np
= inet6_sk(sk
);
539 if (level
== SOL_IP
&& sk
->sk_type
!= SOCK_RAW
)
540 return udp_prot
.getsockopt(sk
, level
, optname
, optval
, optlen
);
543 if (get_user(len
, optlen
))
547 if (sk
->sk_protocol
!= IPPROTO_UDP
&&
548 sk
->sk_protocol
!= IPPROTO_TCP
)
550 if (sk
->sk_state
!= TCP_ESTABLISHED
)
556 struct group_filter gsf
;
559 if (len
< GROUP_FILTER_SIZE(0))
561 if (copy_from_user(&gsf
, optval
, GROUP_FILTER_SIZE(0)))
564 err
= ip6_mc_msfget(sk
, &gsf
,
565 (struct group_filter __user
*)optval
, optlen
);
570 case IPV6_PKTOPTIONS
:
575 if (sk
->sk_type
!= SOCK_STREAM
)
578 msg
.msg_control
= optval
;
579 msg
.msg_controllen
= len
;
583 skb
= np
->pktoptions
;
585 atomic_inc(&skb
->users
);
589 int err
= datagram_recv_ctl(sk
, &msg
, skb
);
594 if (np
->rxopt
.bits
.rxinfo
) {
595 struct in6_pktinfo src_info
;
596 src_info
.ipi6_ifindex
= np
->mcast_oif
;
597 ipv6_addr_copy(&src_info
.ipi6_addr
, &np
->daddr
);
598 put_cmsg(&msg
, SOL_IPV6
, IPV6_PKTINFO
, sizeof(src_info
), &src_info
);
600 if (np
->rxopt
.bits
.rxhlim
) {
601 int hlim
= np
->mcast_hops
;
602 put_cmsg(&msg
, SOL_IPV6
, IPV6_HOPLIMIT
, sizeof(hlim
), &hlim
);
605 len
-= msg
.msg_controllen
;
606 return put_user(len
, optlen
);
610 struct dst_entry
*dst
;
613 dst
= sk_dst_get(sk
);
629 val
= np
->rxopt
.bits
.rxinfo
;
633 val
= np
->rxopt
.bits
.rxhlim
;
637 val
= np
->rxopt
.bits
.srcrt
;
641 val
= np
->rxopt
.bits
.hopopts
;
645 val
= np
->rxopt
.bits
.dstopts
;
649 val
= np
->rxopt
.bits
.rxflow
;
652 case IPV6_UNICAST_HOPS
:
656 case IPV6_MULTICAST_HOPS
:
657 val
= np
->mcast_hops
;
660 case IPV6_MULTICAST_LOOP
:
664 case IPV6_MULTICAST_IF
:
668 case IPV6_MTU_DISCOVER
:
676 case IPV6_FLOWINFO_SEND
:
681 #ifdef CONFIG_NETFILTER
683 val
= nf_getsockopt(sk
, PF_INET6
, optname
, optval
,
687 val
= put_user(len
, optlen
);
693 len
= min_t(unsigned int, sizeof(int), len
);
694 if(put_user(len
, optlen
))
696 if(copy_to_user(optval
,&val
,len
))
701 void __init
ipv6_packet_init(void)
703 dev_add_pack(&ipv6_packet_type
);
706 void ipv6_packet_cleanup(void)
708 dev_remove_pack(&ipv6_packet_type
);