[NETFILTER]: Introduce NF_INET_ hook values
[deliverable/linux.git] / net / ipv6 / raw.c
1 /*
2 * RAW sockets for IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Adapted from linux/net/ipv4/raw.c
9 *
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
11 *
12 * Fixes:
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/netfilter.h>
33 #include <linux/netfilter_ipv6.h>
34 #include <linux/skbuff.h>
35 #include <asm/uaccess.h>
36 #include <asm/ioctls.h>
37
38 #include <net/net_namespace.h>
39 #include <net/ip.h>
40 #include <net/sock.h>
41 #include <net/snmp.h>
42
43 #include <net/ipv6.h>
44 #include <net/ndisc.h>
45 #include <net/protocol.h>
46 #include <net/ip6_route.h>
47 #include <net/ip6_checksum.h>
48 #include <net/addrconf.h>
49 #include <net/transp_v6.h>
50 #include <net/udp.h>
51 #include <net/inet_common.h>
52 #include <net/tcp_states.h>
53 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
54 #include <net/mip6.h>
55 #endif
56
57 #include <net/rawv6.h>
58 #include <net/xfrm.h>
59
60 #include <linux/proc_fs.h>
61 #include <linux/seq_file.h>
62
63 struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE];
64 DEFINE_RWLOCK(raw_v6_lock);
65
66 static void raw_v6_hash(struct sock *sk)
67 {
68 struct hlist_head *list = &raw_v6_htable[inet_sk(sk)->num &
69 (RAWV6_HTABLE_SIZE - 1)];
70
71 write_lock_bh(&raw_v6_lock);
72 sk_add_node(sk, list);
73 sock_prot_inc_use(sk->sk_prot);
74 write_unlock_bh(&raw_v6_lock);
75 }
76
77 static void raw_v6_unhash(struct sock *sk)
78 {
79 write_lock_bh(&raw_v6_lock);
80 if (sk_del_node_init(sk))
81 sock_prot_dec_use(sk->sk_prot);
82 write_unlock_bh(&raw_v6_lock);
83 }
84
85
86 /* Grumble... icmp and ip_input want to get at this... */
87 struct sock *__raw_v6_lookup(struct sock *sk, unsigned short num,
88 struct in6_addr *loc_addr, struct in6_addr *rmt_addr,
89 int dif)
90 {
91 struct hlist_node *node;
92 int is_multicast = ipv6_addr_is_multicast(loc_addr);
93
94 sk_for_each_from(sk, node)
95 if (inet_sk(sk)->num == num) {
96 struct ipv6_pinfo *np = inet6_sk(sk);
97
98 if (!ipv6_addr_any(&np->daddr) &&
99 !ipv6_addr_equal(&np->daddr, rmt_addr))
100 continue;
101
102 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
103 continue;
104
105 if (!ipv6_addr_any(&np->rcv_saddr)) {
106 if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
107 goto found;
108 if (is_multicast &&
109 inet6_mc_check(sk, loc_addr, rmt_addr))
110 goto found;
111 continue;
112 }
113 goto found;
114 }
115 sk = NULL;
116 found:
117 return sk;
118 }
119
120 /*
121 * 0 - deliver
122 * 1 - block
123 */
124 static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
125 {
126 struct icmp6hdr *icmph;
127 struct raw6_sock *rp = raw6_sk(sk);
128
129 if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
130 __u32 *data = &rp->filter.data[0];
131 int bit_nr;
132
133 icmph = (struct icmp6hdr *) skb->data;
134 bit_nr = icmph->icmp6_type;
135
136 return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
137 }
138 return 0;
139 }
140
141 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
142 static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
143
144 int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
145 struct sk_buff *skb))
146 {
147 rcu_assign_pointer(mh_filter, filter);
148 return 0;
149 }
150 EXPORT_SYMBOL(rawv6_mh_filter_register);
151
152 int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
153 struct sk_buff *skb))
154 {
155 rcu_assign_pointer(mh_filter, NULL);
156 synchronize_rcu();
157 return 0;
158 }
159 EXPORT_SYMBOL(rawv6_mh_filter_unregister);
160
161 #endif
162
163 /*
164 * demultiplex raw sockets.
165 * (should consider queueing the skb in the sock receive_queue
166 * without calling rawv6.c)
167 *
168 * Caller owns SKB so we must make clones.
169 */
170 int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
171 {
172 struct in6_addr *saddr;
173 struct in6_addr *daddr;
174 struct sock *sk;
175 int delivered = 0;
176 __u8 hash;
177
178 saddr = &ipv6_hdr(skb)->saddr;
179 daddr = saddr + 1;
180
181 hash = nexthdr & (MAX_INET_PROTOS - 1);
182
183 read_lock(&raw_v6_lock);
184 sk = sk_head(&raw_v6_htable[hash]);
185
186 /*
187 * The first socket found will be delivered after
188 * delivery to transport protocols.
189 */
190
191 if (sk == NULL)
192 goto out;
193
194 sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
195
196 while (sk) {
197 int filtered;
198
199 delivered = 1;
200 switch (nexthdr) {
201 case IPPROTO_ICMPV6:
202 filtered = icmpv6_filter(sk, skb);
203 break;
204
205 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
206 case IPPROTO_MH:
207 {
208 /* XXX: To validate MH only once for each packet,
209 * this is placed here. It should be after checking
210 * xfrm policy, however it doesn't. The checking xfrm
211 * policy is placed in rawv6_rcv() because it is
212 * required for each socket.
213 */
214 int (*filter)(struct sock *sock, struct sk_buff *skb);
215
216 filter = rcu_dereference(mh_filter);
217 filtered = filter ? filter(sk, skb) : 0;
218 break;
219 }
220 #endif
221 default:
222 filtered = 0;
223 break;
224 }
225
226 if (filtered < 0)
227 break;
228 if (filtered == 0) {
229 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
230
231 /* Not releasing hash table! */
232 if (clone) {
233 nf_reset(clone);
234 rawv6_rcv(sk, clone);
235 }
236 }
237 sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
238 IP6CB(skb)->iif);
239 }
240 out:
241 read_unlock(&raw_v6_lock);
242 return delivered;
243 }
244
245 /* This cleans up af_inet6 a bit. -DaveM */
246 static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
247 {
248 struct inet_sock *inet = inet_sk(sk);
249 struct ipv6_pinfo *np = inet6_sk(sk);
250 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
251 __be32 v4addr = 0;
252 int addr_type;
253 int err;
254
255 if (addr_len < SIN6_LEN_RFC2133)
256 return -EINVAL;
257 addr_type = ipv6_addr_type(&addr->sin6_addr);
258
259 /* Raw sockets are IPv6 only */
260 if (addr_type == IPV6_ADDR_MAPPED)
261 return(-EADDRNOTAVAIL);
262
263 lock_sock(sk);
264
265 err = -EINVAL;
266 if (sk->sk_state != TCP_CLOSE)
267 goto out;
268
269 /* Check if the address belongs to the host. */
270 if (addr_type != IPV6_ADDR_ANY) {
271 struct net_device *dev = NULL;
272
273 if (addr_type & IPV6_ADDR_LINKLOCAL) {
274 if (addr_len >= sizeof(struct sockaddr_in6) &&
275 addr->sin6_scope_id) {
276 /* Override any existing binding, if another
277 * one is supplied by user.
278 */
279 sk->sk_bound_dev_if = addr->sin6_scope_id;
280 }
281
282 /* Binding to link-local address requires an interface */
283 if (!sk->sk_bound_dev_if)
284 goto out;
285
286 dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
287 if (!dev) {
288 err = -ENODEV;
289 goto out;
290 }
291 }
292
293 /* ipv4 addr of the socket is invalid. Only the
294 * unspecified and mapped address have a v4 equivalent.
295 */
296 v4addr = LOOPBACK4_IPV6;
297 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
298 err = -EADDRNOTAVAIL;
299 if (!ipv6_chk_addr(&addr->sin6_addr, dev, 0)) {
300 if (dev)
301 dev_put(dev);
302 goto out;
303 }
304 }
305 if (dev)
306 dev_put(dev);
307 }
308
309 inet->rcv_saddr = inet->saddr = v4addr;
310 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
311 if (!(addr_type & IPV6_ADDR_MULTICAST))
312 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
313 err = 0;
314 out:
315 release_sock(sk);
316 return err;
317 }
318
319 void rawv6_err(struct sock *sk, struct sk_buff *skb,
320 struct inet6_skb_parm *opt,
321 int type, int code, int offset, __be32 info)
322 {
323 struct inet_sock *inet = inet_sk(sk);
324 struct ipv6_pinfo *np = inet6_sk(sk);
325 int err;
326 int harderr;
327
328 /* Report error on raw socket, if:
329 1. User requested recverr.
330 2. Socket is connected (otherwise the error indication
331 is useless without recverr and error is hard.
332 */
333 if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
334 return;
335
336 harderr = icmpv6_err_convert(type, code, &err);
337 if (type == ICMPV6_PKT_TOOBIG)
338 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
339
340 if (np->recverr) {
341 u8 *payload = skb->data;
342 if (!inet->hdrincl)
343 payload += offset;
344 ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
345 }
346
347 if (np->recverr || harderr) {
348 sk->sk_err = err;
349 sk->sk_error_report(sk);
350 }
351 }
352
353 static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
354 {
355 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
356 skb_checksum_complete(skb)) {
357 atomic_inc(&sk->sk_drops);
358 kfree_skb(skb);
359 return 0;
360 }
361
362 /* Charge it to the socket. */
363 if (sock_queue_rcv_skb(sk,skb)<0) {
364 atomic_inc(&sk->sk_drops);
365 kfree_skb(skb);
366 return 0;
367 }
368
369 return 0;
370 }
371
372 /*
373 * This is next to useless...
374 * if we demultiplex in network layer we don't need the extra call
375 * just to queue the skb...
376 * maybe we could have the network decide upon a hint if it
377 * should call raw_rcv for demultiplexing
378 */
379 int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
380 {
381 struct inet_sock *inet = inet_sk(sk);
382 struct raw6_sock *rp = raw6_sk(sk);
383
384 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
385 atomic_inc(&sk->sk_drops);
386 kfree_skb(skb);
387 return NET_RX_DROP;
388 }
389
390 if (!rp->checksum)
391 skb->ip_summed = CHECKSUM_UNNECESSARY;
392
393 if (skb->ip_summed == CHECKSUM_COMPLETE) {
394 skb_postpull_rcsum(skb, skb_network_header(skb),
395 skb_network_header_len(skb));
396 if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
397 &ipv6_hdr(skb)->daddr,
398 skb->len, inet->num, skb->csum))
399 skb->ip_summed = CHECKSUM_UNNECESSARY;
400 }
401 if (!skb_csum_unnecessary(skb))
402 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
403 &ipv6_hdr(skb)->daddr,
404 skb->len,
405 inet->num, 0));
406
407 if (inet->hdrincl) {
408 if (skb_checksum_complete(skb)) {
409 atomic_inc(&sk->sk_drops);
410 kfree_skb(skb);
411 return 0;
412 }
413 }
414
415 rawv6_rcv_skb(sk, skb);
416 return 0;
417 }
418
419
420 /*
421 * This should be easy, if there is something there
422 * we return it, otherwise we block.
423 */
424
425 static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
426 struct msghdr *msg, size_t len,
427 int noblock, int flags, int *addr_len)
428 {
429 struct ipv6_pinfo *np = inet6_sk(sk);
430 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
431 struct sk_buff *skb;
432 size_t copied;
433 int err;
434
435 if (flags & MSG_OOB)
436 return -EOPNOTSUPP;
437
438 if (addr_len)
439 *addr_len=sizeof(*sin6);
440
441 if (flags & MSG_ERRQUEUE)
442 return ipv6_recv_error(sk, msg, len);
443
444 skb = skb_recv_datagram(sk, flags, noblock, &err);
445 if (!skb)
446 goto out;
447
448 copied = skb->len;
449 if (copied > len) {
450 copied = len;
451 msg->msg_flags |= MSG_TRUNC;
452 }
453
454 if (skb_csum_unnecessary(skb)) {
455 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
456 } else if (msg->msg_flags&MSG_TRUNC) {
457 if (__skb_checksum_complete(skb))
458 goto csum_copy_err;
459 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
460 } else {
461 err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
462 if (err == -EINVAL)
463 goto csum_copy_err;
464 }
465 if (err)
466 goto out_free;
467
468 /* Copy the address. */
469 if (sin6) {
470 sin6->sin6_family = AF_INET6;
471 sin6->sin6_port = 0;
472 ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
473 sin6->sin6_flowinfo = 0;
474 sin6->sin6_scope_id = 0;
475 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
476 sin6->sin6_scope_id = IP6CB(skb)->iif;
477 }
478
479 sock_recv_timestamp(msg, sk, skb);
480
481 if (np->rxopt.all)
482 datagram_recv_ctl(sk, msg, skb);
483
484 err = copied;
485 if (flags & MSG_TRUNC)
486 err = skb->len;
487
488 out_free:
489 skb_free_datagram(sk, skb);
490 out:
491 return err;
492
493 csum_copy_err:
494 skb_kill_datagram(sk, skb, flags);
495
496 /* Error for blocking case is chosen to masquerade
497 as some normal condition.
498 */
499 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
500 atomic_inc(&sk->sk_drops);
501 goto out;
502 }
503
504 static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl,
505 struct raw6_sock *rp)
506 {
507 struct sk_buff *skb;
508 int err = 0;
509 int offset;
510 int len;
511 int total_len;
512 __wsum tmp_csum;
513 __sum16 csum;
514
515 if (!rp->checksum)
516 goto send;
517
518 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
519 goto out;
520
521 offset = rp->offset;
522 total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) -
523 skb->data);
524 if (offset >= total_len - 1) {
525 err = -EINVAL;
526 ip6_flush_pending_frames(sk);
527 goto out;
528 }
529
530 /* should be check HW csum miyazawa */
531 if (skb_queue_len(&sk->sk_write_queue) == 1) {
532 /*
533 * Only one fragment on the socket.
534 */
535 tmp_csum = skb->csum;
536 } else {
537 struct sk_buff *csum_skb = NULL;
538 tmp_csum = 0;
539
540 skb_queue_walk(&sk->sk_write_queue, skb) {
541 tmp_csum = csum_add(tmp_csum, skb->csum);
542
543 if (csum_skb)
544 continue;
545
546 len = skb->len - skb_transport_offset(skb);
547 if (offset >= len) {
548 offset -= len;
549 continue;
550 }
551
552 csum_skb = skb;
553 }
554
555 skb = csum_skb;
556 }
557
558 offset += skb_transport_offset(skb);
559 if (skb_copy_bits(skb, offset, &csum, 2))
560 BUG();
561
562 /* in case cksum was not initialized */
563 if (unlikely(csum))
564 tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
565
566 csum = csum_ipv6_magic(&fl->fl6_src,
567 &fl->fl6_dst,
568 total_len, fl->proto, tmp_csum);
569
570 if (csum == 0 && fl->proto == IPPROTO_UDP)
571 csum = CSUM_MANGLED_0;
572
573 if (skb_store_bits(skb, offset, &csum, 2))
574 BUG();
575
576 send:
577 err = ip6_push_pending_frames(sk);
578 out:
579 return err;
580 }
581
582 static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
583 struct flowi *fl, struct rt6_info *rt,
584 unsigned int flags)
585 {
586 struct ipv6_pinfo *np = inet6_sk(sk);
587 struct ipv6hdr *iph;
588 struct sk_buff *skb;
589 unsigned int hh_len;
590 int err;
591
592 if (length > rt->u.dst.dev->mtu) {
593 ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu);
594 return -EMSGSIZE;
595 }
596 if (flags&MSG_PROBE)
597 goto out;
598
599 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
600
601 skb = sock_alloc_send_skb(sk, length+hh_len+15,
602 flags&MSG_DONTWAIT, &err);
603 if (skb == NULL)
604 goto error;
605 skb_reserve(skb, hh_len);
606
607 skb->priority = sk->sk_priority;
608 skb->dst = dst_clone(&rt->u.dst);
609
610 skb_put(skb, length);
611 skb_reset_network_header(skb);
612 iph = ipv6_hdr(skb);
613
614 skb->ip_summed = CHECKSUM_NONE;
615
616 skb->transport_header = skb->network_header;
617 err = memcpy_fromiovecend((void *)iph, from, 0, length);
618 if (err)
619 goto error_fault;
620
621 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
622 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
623 dst_output);
624 if (err > 0)
625 err = np->recverr ? net_xmit_errno(err) : 0;
626 if (err)
627 goto error;
628 out:
629 return 0;
630
631 error_fault:
632 err = -EFAULT;
633 kfree_skb(skb);
634 error:
635 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
636 return err;
637 }
638
639 static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
640 {
641 struct iovec *iov;
642 u8 __user *type = NULL;
643 u8 __user *code = NULL;
644 u8 len = 0;
645 int probed = 0;
646 int i;
647
648 if (!msg->msg_iov)
649 return 0;
650
651 for (i = 0; i < msg->msg_iovlen; i++) {
652 iov = &msg->msg_iov[i];
653 if (!iov)
654 continue;
655
656 switch (fl->proto) {
657 case IPPROTO_ICMPV6:
658 /* check if one-byte field is readable or not. */
659 if (iov->iov_base && iov->iov_len < 1)
660 break;
661
662 if (!type) {
663 type = iov->iov_base;
664 /* check if code field is readable or not. */
665 if (iov->iov_len > 1)
666 code = type + 1;
667 } else if (!code)
668 code = iov->iov_base;
669
670 if (type && code) {
671 if (get_user(fl->fl_icmp_type, type) ||
672 get_user(fl->fl_icmp_code, code))
673 return -EFAULT;
674 probed = 1;
675 }
676 break;
677 case IPPROTO_MH:
678 if (iov->iov_base && iov->iov_len < 1)
679 break;
680 /* check if type field is readable or not. */
681 if (iov->iov_len > 2 - len) {
682 u8 __user *p = iov->iov_base;
683 if (get_user(fl->fl_mh_type, &p[2 - len]))
684 return -EFAULT;
685 probed = 1;
686 } else
687 len += iov->iov_len;
688
689 break;
690 default:
691 probed = 1;
692 break;
693 }
694 if (probed)
695 break;
696 }
697 return 0;
698 }
699
700 static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
701 struct msghdr *msg, size_t len)
702 {
703 struct ipv6_txoptions opt_space;
704 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
705 struct in6_addr *daddr, *final_p = NULL, final;
706 struct inet_sock *inet = inet_sk(sk);
707 struct ipv6_pinfo *np = inet6_sk(sk);
708 struct raw6_sock *rp = raw6_sk(sk);
709 struct ipv6_txoptions *opt = NULL;
710 struct ip6_flowlabel *flowlabel = NULL;
711 struct dst_entry *dst = NULL;
712 struct flowi fl;
713 int addr_len = msg->msg_namelen;
714 int hlimit = -1;
715 int tclass = -1;
716 u16 proto;
717 int err;
718
719 /* Rough check on arithmetic overflow,
720 better check is made in ip6_append_data().
721 */
722 if (len > INT_MAX)
723 return -EMSGSIZE;
724
725 /* Mirror BSD error message compatibility */
726 if (msg->msg_flags & MSG_OOB)
727 return -EOPNOTSUPP;
728
729 /*
730 * Get and verify the address.
731 */
732 memset(&fl, 0, sizeof(fl));
733
734 if (sin6) {
735 if (addr_len < SIN6_LEN_RFC2133)
736 return -EINVAL;
737
738 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
739 return(-EAFNOSUPPORT);
740
741 /* port is the proto value [0..255] carried in nexthdr */
742 proto = ntohs(sin6->sin6_port);
743
744 if (!proto)
745 proto = inet->num;
746 else if (proto != inet->num)
747 return(-EINVAL);
748
749 if (proto > 255)
750 return(-EINVAL);
751
752 daddr = &sin6->sin6_addr;
753 if (np->sndflow) {
754 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
755 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
756 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
757 if (flowlabel == NULL)
758 return -EINVAL;
759 daddr = &flowlabel->dst;
760 }
761 }
762
763 /*
764 * Otherwise it will be difficult to maintain
765 * sk->sk_dst_cache.
766 */
767 if (sk->sk_state == TCP_ESTABLISHED &&
768 ipv6_addr_equal(daddr, &np->daddr))
769 daddr = &np->daddr;
770
771 if (addr_len >= sizeof(struct sockaddr_in6) &&
772 sin6->sin6_scope_id &&
773 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
774 fl.oif = sin6->sin6_scope_id;
775 } else {
776 if (sk->sk_state != TCP_ESTABLISHED)
777 return -EDESTADDRREQ;
778
779 proto = inet->num;
780 daddr = &np->daddr;
781 fl.fl6_flowlabel = np->flow_label;
782 }
783
784 if (ipv6_addr_any(daddr)) {
785 /*
786 * unspecified destination address
787 * treated as error... is this correct ?
788 */
789 fl6_sock_release(flowlabel);
790 return(-EINVAL);
791 }
792
793 if (fl.oif == 0)
794 fl.oif = sk->sk_bound_dev_if;
795
796 if (msg->msg_controllen) {
797 opt = &opt_space;
798 memset(opt, 0, sizeof(struct ipv6_txoptions));
799 opt->tot_len = sizeof(struct ipv6_txoptions);
800
801 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
802 if (err < 0) {
803 fl6_sock_release(flowlabel);
804 return err;
805 }
806 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
807 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
808 if (flowlabel == NULL)
809 return -EINVAL;
810 }
811 if (!(opt->opt_nflen|opt->opt_flen))
812 opt = NULL;
813 }
814 if (opt == NULL)
815 opt = np->opt;
816 if (flowlabel)
817 opt = fl6_merge_options(&opt_space, flowlabel, opt);
818 opt = ipv6_fixup_options(&opt_space, opt);
819
820 fl.proto = proto;
821 err = rawv6_probe_proto_opt(&fl, msg);
822 if (err)
823 goto out;
824
825 ipv6_addr_copy(&fl.fl6_dst, daddr);
826 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
827 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
828
829 /* merge ip6_build_xmit from ip6_output */
830 if (opt && opt->srcrt) {
831 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
832 ipv6_addr_copy(&final, &fl.fl6_dst);
833 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
834 final_p = &final;
835 }
836
837 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
838 fl.oif = np->mcast_oif;
839 security_sk_classify_flow(sk, &fl);
840
841 err = ip6_dst_lookup(sk, &dst, &fl);
842 if (err)
843 goto out;
844 if (final_p)
845 ipv6_addr_copy(&fl.fl6_dst, final_p);
846
847 if ((err = __xfrm_lookup(&dst, &fl, sk, 1)) < 0) {
848 if (err == -EREMOTE)
849 err = ip6_dst_blackhole(sk, &dst, &fl);
850 if (err < 0)
851 goto out;
852 }
853
854 if (hlimit < 0) {
855 if (ipv6_addr_is_multicast(&fl.fl6_dst))
856 hlimit = np->mcast_hops;
857 else
858 hlimit = np->hop_limit;
859 if (hlimit < 0)
860 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
861 if (hlimit < 0)
862 hlimit = ipv6_get_hoplimit(dst->dev);
863 }
864
865 if (tclass < 0) {
866 tclass = np->tclass;
867 if (tclass < 0)
868 tclass = 0;
869 }
870
871 if (msg->msg_flags&MSG_CONFIRM)
872 goto do_confirm;
873
874 back_from_confirm:
875 if (inet->hdrincl) {
876 err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags);
877 } else {
878 lock_sock(sk);
879 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
880 len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
881 msg->msg_flags);
882
883 if (err)
884 ip6_flush_pending_frames(sk);
885 else if (!(msg->msg_flags & MSG_MORE))
886 err = rawv6_push_pending_frames(sk, &fl, rp);
887 release_sock(sk);
888 }
889 done:
890 dst_release(dst);
891 out:
892 fl6_sock_release(flowlabel);
893 return err<0?err:len;
894 do_confirm:
895 dst_confirm(dst);
896 if (!(msg->msg_flags & MSG_PROBE) || len)
897 goto back_from_confirm;
898 err = 0;
899 goto done;
900 }
901
902 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
903 char __user *optval, int optlen)
904 {
905 switch (optname) {
906 case ICMPV6_FILTER:
907 if (optlen > sizeof(struct icmp6_filter))
908 optlen = sizeof(struct icmp6_filter);
909 if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
910 return -EFAULT;
911 return 0;
912 default:
913 return -ENOPROTOOPT;
914 }
915
916 return 0;
917 }
918
919 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
920 char __user *optval, int __user *optlen)
921 {
922 int len;
923
924 switch (optname) {
925 case ICMPV6_FILTER:
926 if (get_user(len, optlen))
927 return -EFAULT;
928 if (len < 0)
929 return -EINVAL;
930 if (len > sizeof(struct icmp6_filter))
931 len = sizeof(struct icmp6_filter);
932 if (put_user(len, optlen))
933 return -EFAULT;
934 if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
935 return -EFAULT;
936 return 0;
937 default:
938 return -ENOPROTOOPT;
939 }
940
941 return 0;
942 }
943
944
945 static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
946 char __user *optval, int optlen)
947 {
948 struct raw6_sock *rp = raw6_sk(sk);
949 int val;
950
951 if (get_user(val, (int __user *)optval))
952 return -EFAULT;
953
954 switch (optname) {
955 case IPV6_CHECKSUM:
956 /* You may get strange result with a positive odd offset;
957 RFC2292bis agrees with me. */
958 if (val > 0 && (val&1))
959 return(-EINVAL);
960 if (val < 0) {
961 rp->checksum = 0;
962 } else {
963 rp->checksum = 1;
964 rp->offset = val;
965 }
966
967 return 0;
968 break;
969
970 default:
971 return(-ENOPROTOOPT);
972 }
973 }
974
975 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
976 char __user *optval, int optlen)
977 {
978 switch(level) {
979 case SOL_RAW:
980 break;
981
982 case SOL_ICMPV6:
983 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
984 return -EOPNOTSUPP;
985 return rawv6_seticmpfilter(sk, level, optname, optval,
986 optlen);
987 case SOL_IPV6:
988 if (optname == IPV6_CHECKSUM)
989 break;
990 default:
991 return ipv6_setsockopt(sk, level, optname, optval,
992 optlen);
993 }
994
995 return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
996 }
997
998 #ifdef CONFIG_COMPAT
999 static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
1000 char __user *optval, int optlen)
1001 {
1002 switch (level) {
1003 case SOL_RAW:
1004 break;
1005 case SOL_ICMPV6:
1006 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
1007 return -EOPNOTSUPP;
1008 return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
1009 case SOL_IPV6:
1010 if (optname == IPV6_CHECKSUM)
1011 break;
1012 default:
1013 return compat_ipv6_setsockopt(sk, level, optname,
1014 optval, optlen);
1015 }
1016 return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
1017 }
1018 #endif
1019
1020 static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
1021 char __user *optval, int __user *optlen)
1022 {
1023 struct raw6_sock *rp = raw6_sk(sk);
1024 int val, len;
1025
1026 if (get_user(len,optlen))
1027 return -EFAULT;
1028
1029 switch (optname) {
1030 case IPV6_CHECKSUM:
1031 if (rp->checksum == 0)
1032 val = -1;
1033 else
1034 val = rp->offset;
1035 break;
1036
1037 default:
1038 return -ENOPROTOOPT;
1039 }
1040
1041 len = min_t(unsigned int, sizeof(int), len);
1042
1043 if (put_user(len, optlen))
1044 return -EFAULT;
1045 if (copy_to_user(optval,&val,len))
1046 return -EFAULT;
1047 return 0;
1048 }
1049
1050 static int rawv6_getsockopt(struct sock *sk, int level, int optname,
1051 char __user *optval, int __user *optlen)
1052 {
1053 switch(level) {
1054 case SOL_RAW:
1055 break;
1056
1057 case SOL_ICMPV6:
1058 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
1059 return -EOPNOTSUPP;
1060 return rawv6_geticmpfilter(sk, level, optname, optval,
1061 optlen);
1062 case SOL_IPV6:
1063 if (optname == IPV6_CHECKSUM)
1064 break;
1065 default:
1066 return ipv6_getsockopt(sk, level, optname, optval,
1067 optlen);
1068 }
1069
1070 return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
1071 }
1072
1073 #ifdef CONFIG_COMPAT
1074 static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
1075 char __user *optval, int __user *optlen)
1076 {
1077 switch (level) {
1078 case SOL_RAW:
1079 break;
1080 case SOL_ICMPV6:
1081 if (inet_sk(sk)->num != IPPROTO_ICMPV6)
1082 return -EOPNOTSUPP;
1083 return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
1084 case SOL_IPV6:
1085 if (optname == IPV6_CHECKSUM)
1086 break;
1087 default:
1088 return compat_ipv6_getsockopt(sk, level, optname,
1089 optval, optlen);
1090 }
1091 return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
1092 }
1093 #endif
1094
1095 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1096 {
1097 switch(cmd) {
1098 case SIOCOUTQ:
1099 {
1100 int amount = atomic_read(&sk->sk_wmem_alloc);
1101 return put_user(amount, (int __user *)arg);
1102 }
1103 case SIOCINQ:
1104 {
1105 struct sk_buff *skb;
1106 int amount = 0;
1107
1108 spin_lock_bh(&sk->sk_receive_queue.lock);
1109 skb = skb_peek(&sk->sk_receive_queue);
1110 if (skb != NULL)
1111 amount = skb->tail - skb->transport_header;
1112 spin_unlock_bh(&sk->sk_receive_queue.lock);
1113 return put_user(amount, (int __user *)arg);
1114 }
1115
1116 default:
1117 return -ENOIOCTLCMD;
1118 }
1119 }
1120
1121 static void rawv6_close(struct sock *sk, long timeout)
1122 {
1123 if (inet_sk(sk)->num == IPPROTO_RAW)
1124 ip6_ra_control(sk, -1, NULL);
1125
1126 sk_common_release(sk);
1127 }
1128
1129 static int rawv6_init_sk(struct sock *sk)
1130 {
1131 struct raw6_sock *rp = raw6_sk(sk);
1132
1133 switch (inet_sk(sk)->num) {
1134 case IPPROTO_ICMPV6:
1135 rp->checksum = 1;
1136 rp->offset = 2;
1137 break;
1138 case IPPROTO_MH:
1139 rp->checksum = 1;
1140 rp->offset = 4;
1141 break;
1142 default:
1143 break;
1144 }
1145 return(0);
1146 }
1147
1148 DEFINE_PROTO_INUSE(rawv6)
1149
1150 struct proto rawv6_prot = {
1151 .name = "RAWv6",
1152 .owner = THIS_MODULE,
1153 .close = rawv6_close,
1154 .connect = ip6_datagram_connect,
1155 .disconnect = udp_disconnect,
1156 .ioctl = rawv6_ioctl,
1157 .init = rawv6_init_sk,
1158 .destroy = inet6_destroy_sock,
1159 .setsockopt = rawv6_setsockopt,
1160 .getsockopt = rawv6_getsockopt,
1161 .sendmsg = rawv6_sendmsg,
1162 .recvmsg = rawv6_recvmsg,
1163 .bind = rawv6_bind,
1164 .backlog_rcv = rawv6_rcv_skb,
1165 .hash = raw_v6_hash,
1166 .unhash = raw_v6_unhash,
1167 .obj_size = sizeof(struct raw6_sock),
1168 #ifdef CONFIG_COMPAT
1169 .compat_setsockopt = compat_rawv6_setsockopt,
1170 .compat_getsockopt = compat_rawv6_getsockopt,
1171 #endif
1172 REF_PROTO_INUSE(rawv6)
1173 };
1174
1175 #ifdef CONFIG_PROC_FS
1176 struct raw6_iter_state {
1177 int bucket;
1178 };
1179
1180 #define raw6_seq_private(seq) ((struct raw6_iter_state *)(seq)->private)
1181
1182 static struct sock *raw6_get_first(struct seq_file *seq)
1183 {
1184 struct sock *sk;
1185 struct hlist_node *node;
1186 struct raw6_iter_state* state = raw6_seq_private(seq);
1187
1188 for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket)
1189 sk_for_each(sk, node, &raw_v6_htable[state->bucket])
1190 if (sk->sk_family == PF_INET6)
1191 goto out;
1192 sk = NULL;
1193 out:
1194 return sk;
1195 }
1196
1197 static struct sock *raw6_get_next(struct seq_file *seq, struct sock *sk)
1198 {
1199 struct raw6_iter_state* state = raw6_seq_private(seq);
1200
1201 do {
1202 sk = sk_next(sk);
1203 try_again:
1204 ;
1205 } while (sk && sk->sk_family != PF_INET6);
1206
1207 if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) {
1208 sk = sk_head(&raw_v6_htable[state->bucket]);
1209 goto try_again;
1210 }
1211 return sk;
1212 }
1213
1214 static struct sock *raw6_get_idx(struct seq_file *seq, loff_t pos)
1215 {
1216 struct sock *sk = raw6_get_first(seq);
1217 if (sk)
1218 while (pos && (sk = raw6_get_next(seq, sk)) != NULL)
1219 --pos;
1220 return pos ? NULL : sk;
1221 }
1222
1223 static void *raw6_seq_start(struct seq_file *seq, loff_t *pos)
1224 {
1225 read_lock(&raw_v6_lock);
1226 return *pos ? raw6_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1227 }
1228
1229 static void *raw6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1230 {
1231 struct sock *sk;
1232
1233 if (v == SEQ_START_TOKEN)
1234 sk = raw6_get_first(seq);
1235 else
1236 sk = raw6_get_next(seq, v);
1237 ++*pos;
1238 return sk;
1239 }
1240
1241 static void raw6_seq_stop(struct seq_file *seq, void *v)
1242 {
1243 read_unlock(&raw_v6_lock);
1244 }
1245
1246 static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1247 {
1248 struct ipv6_pinfo *np = inet6_sk(sp);
1249 struct in6_addr *dest, *src;
1250 __u16 destp, srcp;
1251
1252 dest = &np->daddr;
1253 src = &np->rcv_saddr;
1254 destp = 0;
1255 srcp = inet_sk(sp)->num;
1256 seq_printf(seq,
1257 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1258 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
1259 i,
1260 src->s6_addr32[0], src->s6_addr32[1],
1261 src->s6_addr32[2], src->s6_addr32[3], srcp,
1262 dest->s6_addr32[0], dest->s6_addr32[1],
1263 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1264 sp->sk_state,
1265 atomic_read(&sp->sk_wmem_alloc),
1266 atomic_read(&sp->sk_rmem_alloc),
1267 0, 0L, 0,
1268 sock_i_uid(sp), 0,
1269 sock_i_ino(sp),
1270 atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
1271 }
1272
1273 static int raw6_seq_show(struct seq_file *seq, void *v)
1274 {
1275 if (v == SEQ_START_TOKEN)
1276 seq_printf(seq,
1277 " sl "
1278 "local_address "
1279 "remote_address "
1280 "st tx_queue rx_queue tr tm->when retrnsmt"
1281 " uid timeout inode drops\n");
1282 else
1283 raw6_sock_seq_show(seq, v, raw6_seq_private(seq)->bucket);
1284 return 0;
1285 }
1286
1287 static const struct seq_operations raw6_seq_ops = {
1288 .start = raw6_seq_start,
1289 .next = raw6_seq_next,
1290 .stop = raw6_seq_stop,
1291 .show = raw6_seq_show,
1292 };
1293
1294 static int raw6_seq_open(struct inode *inode, struct file *file)
1295 {
1296 return seq_open_private(file, &raw6_seq_ops,
1297 sizeof(struct raw6_iter_state));
1298 }
1299
1300 static const struct file_operations raw6_seq_fops = {
1301 .owner = THIS_MODULE,
1302 .open = raw6_seq_open,
1303 .read = seq_read,
1304 .llseek = seq_lseek,
1305 .release = seq_release_private,
1306 };
1307
1308 int __init raw6_proc_init(void)
1309 {
1310 if (!proc_net_fops_create(&init_net, "raw6", S_IRUGO, &raw6_seq_fops))
1311 return -ENOMEM;
1312 return 0;
1313 }
1314
1315 void raw6_proc_exit(void)
1316 {
1317 proc_net_remove(&init_net, "raw6");
1318 }
1319 #endif /* CONFIG_PROC_FS */
This page took 0.076237 seconds and 6 git commands to generate.