static struct tcp_func ipv6_mapped;
static struct tcp_func ipv6_specific;
-static inline int tcp_v6_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb)
+int inet6_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb)
{
const struct sock *sk2;
const struct hlist_node *node;
return node != NULL;
}
-/* Grrr, addr_type already calculated by caller, but I don't want
- * to add some silly "cookie" argument to this method just for that.
- * But it doesn't matter, the recalculation is in the rarest path
- * this function ever takes.
- */
static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
{
- struct inet_bind_hashbucket *head;
- struct inet_bind_bucket *tb;
- struct hlist_node *node;
- int ret;
-
- local_bh_disable();
- if (snum == 0) {
- int low = sysctl_local_port_range[0];
- int high = sysctl_local_port_range[1];
- int remaining = (high - low) + 1;
- int rover;
-
- spin_lock(&tcp_hashinfo.portalloc_lock);
- if (tcp_hashinfo.port_rover < low)
- rover = low;
- else
- rover = tcp_hashinfo.port_rover;
- do { rover++;
- if (rover > high)
- rover = low;
- head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
- spin_lock(&head->lock);
- inet_bind_bucket_for_each(tb, node, &head->chain)
- if (tb->port == rover)
- goto next;
- break;
- next:
- spin_unlock(&head->lock);
- } while (--remaining > 0);
- tcp_hashinfo.port_rover = rover;
- spin_unlock(&tcp_hashinfo.portalloc_lock);
-
- /* Exhausted local port range during search? It is not
- * possible for us to be holding one of the bind hash
- * locks if this test triggers, because if 'remaining'
- * drops to zero, we broke out of the do/while loop at
- * the top level, not from the 'break;' statement.
- */
- ret = 1;
- if (unlikely(remaining <= 0))
- goto fail;
-
- /* OK, here is the one we will use. */
- snum = rover;
- } else {
- head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
- spin_lock(&head->lock);
- inet_bind_bucket_for_each(tb, node, &head->chain)
- if (tb->port == snum)
- goto tb_found;
- }
- tb = NULL;
- goto tb_not_found;
-tb_found:
- if (tb && !hlist_empty(&tb->owners)) {
- if (tb->fastreuse > 0 && sk->sk_reuse &&
- sk->sk_state != TCP_LISTEN) {
- goto success;
- } else {
- ret = 1;
- if (tcp_v6_bind_conflict(sk, tb))
- goto fail_unlock;
- }
- }
-tb_not_found:
- ret = 1;
- if (tb == NULL) {
- tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
- if (tb == NULL)
- goto fail_unlock;
- }
- if (hlist_empty(&tb->owners)) {
- if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
- tb->fastreuse = 1;
- else
- tb->fastreuse = 0;
- } else if (tb->fastreuse &&
- (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
- tb->fastreuse = 0;
-
-success:
- if (!inet_csk(sk)->icsk_bind_hash)
- inet_bind_hash(sk, tb, snum);
- BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
- ret = 0;
-
-fail_unlock:
- spin_unlock(&head->lock);
-fail:
- local_bh_enable();
- return ret;
-}
-
-static __inline__ void __tcp_v6_hash(struct sock *sk)
-{
- struct hlist_head *list;
- rwlock_t *lock;
-
- BUG_TRAP(sk_unhashed(sk));
-
- if (sk->sk_state == TCP_LISTEN) {
- list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
- lock = &tcp_hashinfo.lhash_lock;
- inet_listen_wlock(&tcp_hashinfo);
- } else {
- unsigned int hash;
- sk->sk_hash = hash = inet6_sk_ehashfn(sk);
- hash &= (tcp_hashinfo.ehash_size - 1);
- list = &tcp_hashinfo.ehash[hash].chain;
- lock = &tcp_hashinfo.ehash[hash].lock;
- write_lock(lock);
- }
-
- __sk_add_node(sk, list);
- sock_prot_inc_use(sk->sk_prot);
- write_unlock(lock);
+ return inet_csk_get_port(&tcp_hashinfo, sk, snum,
+ inet6_csk_bind_conflict);
}
-
static void tcp_v6_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
return;
}
local_bh_disable();
- __tcp_v6_hash(sk);
+ __inet6_hash(&tcp_hashinfo, sk);
local_bh_enable();
}
}
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port);
- __tcp_v6_hash(sk);
+ __inet6_hash(&tcp_hashinfo, sk);
}
spin_unlock(&head->lock);
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- __tcp_v6_hash(sk);
+ __inet6_hash(&tcp_hashinfo, sk);
spin_unlock_bh(&head->lock);
return 0;
} else {
/* sk = NULL, but it is safe for now. RST socket required. */
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
- if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0)
+ if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
+ ip6_xmit(NULL, buff, &fl, NULL, 0);
+ TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
return;
-
- ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
- TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
- return;
+ }
}
kfree_skb(buff);
fl.fl_ip_sport = t1->source;
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
- if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0)
+ if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
+ ip6_xmit(NULL, buff, &fl, NULL, 0);
+ TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
return;
- ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
- return;
+ }
}
kfree_skb(buff);
newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
- __tcp_v6_hash(newsk);
+ __inet6_hash(&tcp_hashinfo, newsk);
inet_inherit_port(&tcp_hashinfo, sk, newsk);
return newsk;
static int tcp_v6_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_HW) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,skb->csum))
+ &skb->nh.ipv6h->daddr,skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
- LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v6 csum failed\n");
+ }
}
+
+ skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
+ &skb->nh.ipv6h->daddr, 0);
+
if (skb->len <= 76) {
- if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,skb_checksum(skb, 0, skb->len, 0)))
- return -1;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,0);
+ return __skb_checksum_complete(skb);
}
return 0;
}
goto discard_it;
if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
- tcp_v6_checksum_init(skb) < 0))
+ tcp_v6_checksum_init(skb)))
goto bad_packet;
th = skb->h.th;