[INET]: Move tcp_port_rover to inet_hashinfo
[deliverable/linux.git] / net / ipv4 / tcp_minisocks.c
index eea1a17a9ac2aaaf92abe4a3ab1f4496eef5a64e..f29e2f6ebe1bffa2839a1daee8c8b4cad8e7ad61 100644 (file)
@@ -60,12 +60,11 @@ int tcp_tw_count;
 /* Must be called with locally disabled BHs. */
 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
 {
-       struct tcp_ehash_bucket *ehead;
-       struct tcp_bind_hashbucket *bhead;
-       struct tcp_bind_bucket *tb;
-
+       struct inet_bind_hashbucket *bhead;
+       struct inet_bind_bucket *tb;
        /* Unlink from established hashes. */
-       ehead = &tcp_ehash[tw->tw_hashent];
+       struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[tw->tw_hashent];
+
        write_lock(&ehead->lock);
        if (hlist_unhashed(&tw->tw_node)) {
                write_unlock(&ehead->lock);
@@ -76,15 +75,15 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
        write_unlock(&ehead->lock);
 
        /* Disassociate with bind bucket. */
-       bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
+       bhead = &tcp_hashinfo.bhash[inet_bhashfn(tw->tw_num, tcp_hashinfo.bhash_size)];
        spin_lock(&bhead->lock);
        tb = tw->tw_tb;
        __hlist_del(&tw->tw_bind_node);
        tw->tw_tb = NULL;
-       tcp_bucket_destroy(tb);
+       inet_bind_bucket_destroy(tcp_hashinfo.bind_bucket_cachep, tb);
        spin_unlock(&bhead->lock);
 
-#ifdef INET_REFCNT_DEBUG
+#ifdef SOCK_REFCNT_DEBUG
        if (atomic_read(&tw->tw_refcnt) != 1) {
                printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
                       atomic_read(&tw->tw_refcnt));
@@ -296,17 +295,17 @@ kill:
  */
 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
 {
-       struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
-       struct tcp_bind_hashbucket *bhead;
-
+       const struct inet_sock *inet = inet_sk(sk);
+       struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
+       struct inet_bind_hashbucket *bhead;
        /* Step 1: Put TW into bind hash. Original socket stays there too.
-          Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
+          Note, that any socket with inet->num != 0 MUST be bound in
           binding cache, even if it is closed.
         */
-       bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
+       bhead = &tcp_hashinfo.bhash[inet_bhashfn(inet->num, tcp_hashinfo.bhash_size)];
        spin_lock(&bhead->lock);
-       tw->tw_tb = tcp_sk(sk)->bind_hash;
-       BUG_TRAP(tcp_sk(sk)->bind_hash);
+       tw->tw_tb = inet->bind_hash;
+       BUG_TRAP(inet->bind_hash);
        tw_add_bind_node(tw, &tw->tw_tb->owners);
        spin_unlock(&bhead->lock);
 
@@ -317,7 +316,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
                sock_prot_dec_use(sk->sk_prot);
 
        /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
-       tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
+       tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
        atomic_inc(&tw->tw_refcnt);
 
        write_unlock(&ehead->lock);
@@ -684,7 +683,7 @@ out:
  * Actually, we could lots of memory writes here. tp of listening
  * socket contains all necessary default parameters.
  */
-struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
+struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
 {
        /* allocate the newsk from the same slab of the master sock,
         * if not, at sk_free time we'll try to free it from the wrong
@@ -692,6 +691,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
        struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
 
        if(newsk != NULL) {
+               struct inet_request_sock *ireq = inet_rsk(req);
+               struct tcp_request_sock *treq = tcp_rsk(req);
+               struct inet_sock *newinet = inet_sk(newsk);
                struct tcp_sock *newtp;
                struct sk_filter *filter;
 
@@ -700,15 +702,16 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
 
                /* SANITY */
                sk_node_init(&newsk->sk_node);
-               tcp_sk(newsk)->bind_hash = NULL;
+               newinet->bind_hash = NULL;
 
                /* Clone the TCP header template */
-               inet_sk(newsk)->dport = req->rmt_port;
+               newinet->dport = ireq->rmt_port;
 
                sock_lock_init(newsk);
                bh_lock_sock(newsk);
 
                rwlock_init(&newsk->sk_dst_lock);
+               newsk->sk_dst_cache = NULL;
                atomic_set(&newsk->sk_rmem_alloc, 0);
                skb_queue_head_init(&newsk->sk_receive_queue);
                atomic_set(&newsk->sk_wmem_alloc, 0);
@@ -739,14 +742,14 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
                /* Now setup tcp_sock */
                newtp = tcp_sk(newsk);
                newtp->pred_flags = 0;
-               newtp->rcv_nxt = req->rcv_isn + 1;
-               newtp->snd_nxt = req->snt_isn + 1;
-               newtp->snd_una = req->snt_isn + 1;
-               newtp->snd_sml = req->snt_isn + 1;
+               newtp->rcv_nxt = treq->rcv_isn + 1;
+               newtp->snd_nxt = treq->snt_isn + 1;
+               newtp->snd_una = treq->snt_isn + 1;
+               newtp->snd_sml = treq->snt_isn + 1;
 
                tcp_prequeue_init(newtp);
 
-               tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
+               tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
 
                newtp->retransmits = 0;
                newtp->backoff = 0;
@@ -772,13 +775,15 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
                newtp->frto_counter = 0;
                newtp->frto_highmark = 0;
 
+               newtp->ca_ops = &tcp_reno;
+
                tcp_set_ca_state(newtp, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                skb_queue_head_init(&newtp->out_of_order_queue);
-               newtp->rcv_wup = req->rcv_isn + 1;
-               newtp->write_seq = req->snt_isn + 1;
+               newtp->rcv_wup = treq->rcv_isn + 1;
+               newtp->write_seq = treq->snt_isn + 1;
                newtp->pushed_seq = newtp->write_seq;
-               newtp->copied_seq = req->rcv_isn + 1;
+               newtp->copied_seq = treq->rcv_isn + 1;
 
                newtp->rx_opt.saw_tstamp = 0;
 
@@ -788,18 +793,28 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
                newtp->probes_out = 0;
                newtp->rx_opt.num_sacks = 0;
                newtp->urg_data = 0;
-               newtp->listen_opt = NULL;
-               newtp->accept_queue = newtp->accept_queue_tail = NULL;
-               /* Deinitialize syn_wait_lock to trap illegal accesses. */
-               memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
+               /* Deinitialize accept_queue to trap illegal accesses. */
+               memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
 
                /* Back to base struct sock members. */
                newsk->sk_err = 0;
                newsk->sk_priority = 0;
                atomic_set(&newsk->sk_refcnt, 2);
-#ifdef INET_REFCNT_DEBUG
-               atomic_inc(&inet_sock_nr);
-#endif
+
+               /*
+                * Increment the counter in the same struct proto as the master
+                * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+                * is the same as sk->sk_prot->socks, as this field was copied
+                * with memcpy), same rationale as the first comment in this
+                * function.
+                *
+                * This _changes_ the previous behaviour, where
+                * tcp_create_openreq_child always was incrementing the
+                * equivalent to tcp_prot->socks (inet_sock_nr), so this have
+                * to be taken into account in all callers. -acme
+                */
+               sk_refcnt_debug_inc(newsk);
+
                atomic_inc(&tcp_sockets_allocated);
 
                if (sock_flag(newsk, SOCK_KEEPOPEN))
@@ -808,18 +823,18 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
                newsk->sk_socket = NULL;
                newsk->sk_sleep = NULL;
 
-               newtp->rx_opt.tstamp_ok = req->tstamp_ok;
-               if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) {
+               newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
+               if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
                        if (sysctl_tcp_fack)
                                newtp->rx_opt.sack_ok |= 2;
                }
                newtp->window_clamp = req->window_clamp;
                newtp->rcv_ssthresh = req->rcv_wnd;
                newtp->rcv_wnd = req->rcv_wnd;
-               newtp->rx_opt.wscale_ok = req->wscale_ok;
+               newtp->rx_opt.wscale_ok = ireq->wscale_ok;
                if (newtp->rx_opt.wscale_ok) {
-                       newtp->rx_opt.snd_wscale = req->snd_wscale;
-                       newtp->rx_opt.rcv_wscale = req->rcv_wscale;
+                       newtp->rx_opt.snd_wscale = ireq->snd_wscale;
+                       newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
                } else {
                        newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
                        newtp->window_clamp = min(newtp->window_clamp, 65535U);
@@ -842,8 +857,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
                if (newtp->ecn_flags&TCP_ECN_OK)
                        sock_set_flag(newsk, SOCK_NO_LARGESEND);
 
-               tcp_ca_init(newtp);
-
                TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
        }
        return newsk;
@@ -851,12 +864,12 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
 
 /* 
  *     Process an incoming packet for SYN_RECV sockets represented
- *     as an open_request.
+ *     as a request_sock.
  */
 
 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
-                          struct open_request *req,
-                          struct open_request **prev)
+                          struct request_sock *req,
+                          struct request_sock **prev)
 {
        struct tcphdr *th = skb->h.th;
        struct tcp_sock *tp = tcp_sk(sk);
@@ -881,7 +894,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
        }
 
        /* Check for pure retransmitted SYN. */
-       if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
+       if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
            flg == TCP_FLAG_SYN &&
            !paws_reject) {
                /*
@@ -901,7 +914,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                 * Enforce "SYN-ACK" according to figure 8, figure 6
                 * of RFC793, fixed by RFC1122.
                 */
-               req->class->rtx_syn_ack(sk, req, NULL);
+               req->rsk_ops->rtx_syn_ack(sk, req, NULL);
                return NULL;
        }
 
@@ -959,7 +972,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
         * Invalid ACK: reset will be sent by listening socket
         */
        if ((flg & TCP_FLAG_ACK) &&
-           (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
+           (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
                return sk;
 
        /* Also, it would be not so bad idea to check rcv_tsecr, which
@@ -970,10 +983,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
        /* RFC793: "first check sequence number". */
 
        if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-                                         req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
+                                         tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
                /* Out of window: send ACK and drop. */
                if (!(flg & TCP_FLAG_RST))
-                       req->class->send_ack(skb, req);
+                       req->rsk_ops->send_ack(skb, req);
                if (paws_reject)
                        NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
                return NULL;
@@ -981,12 +994,12 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
 
        /* In sequence, PAWS is OK. */
 
-       if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
+       if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
                        req->ts_recent = tmp_opt.rcv_tsval;
 
-               if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
+               if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
                        /* Truncate SYN, it is out of window starting
-                          at req->rcv_isn+1. */
+                          at tcp_rsk(req)->rcv_isn + 1. */
                        flg &= ~TCP_FLAG_SYN;
                }
 
@@ -1003,8 +1016,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                        return NULL;
 
                /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
-               if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
-                       req->acked = 1;
+               if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
+                       inet_rsk(req)->acked = 1;
                        return NULL;
                }
 
@@ -1026,14 +1039,14 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
 
        listen_overflow:
                if (!sysctl_tcp_abort_on_overflow) {
-                       req->acked = 1;
+                       inet_rsk(req)->acked = 1;
                        return NULL;
                }
 
        embryonic_reset:
                NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
                if (!(flg & TCP_FLAG_RST))
-                       req->class->send_reset(skb);
+                       req->rsk_ops->send_reset(skb);
 
                tcp_synq_drop(sk, req, prev);
                return NULL;
This page took 0.031238 seconds and 5 git commands to generate.