[NET]: Introduce inet_connection_sock
[deliverable/linux.git] / net / ipv4 / tcp_minisocks.c
index f42a284164b794aefec97bb296a115bc5dbc231a..56823704eb7d2f3e4a9df357a3a2d0a51a6eec90 100644 (file)
@@ -41,7 +41,7 @@ int sysctl_tcp_max_tw_buckets = NR_FILE*2;
 int sysctl_tcp_syncookies = SYNC_INIT; 
 int sysctl_tcp_abort_on_overflow;
 
-static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
+static void tcp_tw_schedule(struct inet_timewait_sock *tw, int timeo);
 
 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 {
@@ -56,43 +56,6 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 
 int tcp_tw_count;
 
-
-/* Must be called with locally disabled BHs. */
-static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
-{
-       struct tcp_ehash_bucket *ehead;
-       struct tcp_bind_hashbucket *bhead;
-       struct tcp_bind_bucket *tb;
-
-       /* Unlink from established hashes. */
-       ehead = &tcp_ehash[tw->tw_hashent];
-       write_lock(&ehead->lock);
-       if (hlist_unhashed(&tw->tw_node)) {
-               write_unlock(&ehead->lock);
-               return;
-       }
-       __hlist_del(&tw->tw_node);
-       sk_node_init(&tw->tw_node);
-       write_unlock(&ehead->lock);
-
-       /* Disassociate with bind bucket. */
-       bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
-       spin_lock(&bhead->lock);
-       tb = tw->tw_tb;
-       __hlist_del(&tw->tw_bind_node);
-       tw->tw_tb = NULL;
-       tcp_bucket_destroy(tb);
-       spin_unlock(&bhead->lock);
-
-#ifdef INET_REFCNT_DEBUG
-       if (atomic_read(&tw->tw_refcnt) != 1) {
-               printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
-                      atomic_read(&tw->tw_refcnt));
-       }
-#endif
-       tcp_tw_put(tw);
-}
-
 /* 
  * * Main purpose of TIME-WAIT state is to close connection gracefully,
  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -122,19 +85,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
  * to avoid misread sequence numbers, states etc.  --ANK
  */
 enum tcp_tw_status
-tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
-                          struct tcphdr *th, unsigned len)
+tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
+                          const struct tcphdr *th)
 {
+       struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        struct tcp_options_received tmp_opt;
        int paws_reject = 0;
 
        tmp_opt.saw_tstamp = 0;
-       if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
+       if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
                tcp_parse_options(skb, &tmp_opt, 0);
 
                if (tmp_opt.saw_tstamp) {
-                       tmp_opt.ts_recent          = tw->tw_ts_recent;
-                       tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
+                       tmp_opt.ts_recent       = tcptw->tw_ts_recent;
+                       tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
                        paws_reject = tcp_paws_check(&tmp_opt, th->rst);
                }
        }
@@ -145,20 +109,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
                /* Out of window, send ACK */
                if (paws_reject ||
                    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-                                  tw->tw_rcv_nxt,
-                                  tw->tw_rcv_nxt + tw->tw_rcv_wnd))
+                                  tcptw->tw_rcv_nxt,
+                                  tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
                        return TCP_TW_ACK;
 
                if (th->rst)
                        goto kill;
 
-               if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
+               if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
                        goto kill_with_rst;
 
                /* Dup ACK? */
-               if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
+               if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
                    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
-                       tcp_tw_put(tw);
+                       inet_twsk_put(tw);
                        return TCP_TW_SUCCESS;
                }
 
@@ -166,19 +130,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
                 * reset.
                 */
                if (!th->fin ||
-                   TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
+                   TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
 kill_with_rst:
                        tcp_tw_deschedule(tw);
-                       tcp_tw_put(tw);
+                       inet_twsk_put(tw);
                        return TCP_TW_RST;
                }
 
                /* FIN arrived, enter true time-wait state. */
-               tw->tw_substate = TCP_TIME_WAIT;
-               tw->tw_rcv_nxt  = TCP_SKB_CB(skb)->end_seq;
+               tw->tw_substate   = TCP_TIME_WAIT;
+               tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                if (tmp_opt.saw_tstamp) {
-                       tw->tw_ts_recent_stamp  = xtime.tv_sec;
-                       tw->tw_ts_recent        = tmp_opt.rcv_tsval;
+                       tcptw->tw_ts_recent_stamp = xtime.tv_sec;
+                       tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
                }
 
                /* I am shamed, but failed to make it more elegant.
@@ -187,7 +151,7 @@ kill_with_rst:
                 * do not undertsnad recycling in any case, it not
                 * a big problem in practice. --ANK */
                if (tw->tw_family == AF_INET &&
-                   sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
+                   sysctl_tcp_tw_recycle && tcptw->tw_ts_recent_stamp &&
                    tcp_v4_tw_remember_stamp(tw))
                        tcp_tw_schedule(tw, tw->tw_timeout);
                else
@@ -213,7 +177,7 @@ kill_with_rst:
         */
 
        if (!paws_reject &&
-           (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
+           (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
             (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
                /* In window segment, it may be only reset or bare ack. */
 
@@ -225,18 +189,18 @@ kill_with_rst:
                        if (sysctl_tcp_rfc1337 == 0) {
 kill:
                                tcp_tw_deschedule(tw);
-                               tcp_tw_put(tw);
+                               inet_twsk_put(tw);
                                return TCP_TW_SUCCESS;
                        }
                }
                tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
 
                if (tmp_opt.saw_tstamp) {
-                       tw->tw_ts_recent        = tmp_opt.rcv_tsval;
-                       tw->tw_ts_recent_stamp  = xtime.tv_sec;
+                       tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
+                       tcptw->tw_ts_recent_stamp = xtime.tv_sec;
                }
 
-               tcp_tw_put(tw);
+               inet_twsk_put(tw);
                return TCP_TW_SUCCESS;
        }
 
@@ -258,9 +222,10 @@ kill:
         */
 
        if (th->syn && !th->rst && !th->ack && !paws_reject &&
-           (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
-            (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
-               u32 isn = tw->tw_snd_nxt + 65535 + 2;
+           (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
+            (tmp_opt.saw_tstamp &&
+             (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
+               u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
                if (isn == 0)
                        isn++;
                TCP_SKB_CB(skb)->when = isn;
@@ -285,100 +250,49 @@ kill:
                 */
                return TCP_TW_ACK;
        }
-       tcp_tw_put(tw);
+       inet_twsk_put(tw);
        return TCP_TW_SUCCESS;
 }
 
-/* Enter the time wait state.  This is called with locally disabled BH.
- * Essentially we whip up a timewait bucket, copy the
- * relevant info into it from the SK, and mess with hash chains
- * and list linkage.
- */
-static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
-{
-       struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
-       struct tcp_bind_hashbucket *bhead;
-
-       /* Step 1: Put TW into bind hash. Original socket stays there too.
-          Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
-          binding cache, even if it is closed.
-        */
-       bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
-       spin_lock(&bhead->lock);
-       tw->tw_tb = tcp_sk(sk)->bind_hash;
-       BUG_TRAP(tcp_sk(sk)->bind_hash);
-       tw_add_bind_node(tw, &tw->tw_tb->owners);
-       spin_unlock(&bhead->lock);
-
-       write_lock(&ehead->lock);
-
-       /* Step 2: Remove SK from established hash. */
-       if (__sk_del_node_init(sk))
-               sock_prot_dec_use(sk->sk_prot);
-
-       /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
-       tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
-       atomic_inc(&tw->tw_refcnt);
-
-       write_unlock(&ehead->lock);
-}
-
 /* 
  * Move a socket to time-wait or dead fin-wait-2 state.
  */ 
 void tcp_time_wait(struct sock *sk, int state, int timeo)
 {
-       struct tcp_tw_bucket *tw = NULL;
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_timewait_sock *tw = NULL;
+       const struct tcp_sock *tp = tcp_sk(sk);
        int recycle_ok = 0;
 
        if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
                recycle_ok = tp->af_specific->remember_stamp(sk);
 
        if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
-               tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
-
-       if(tw != NULL) {
-               struct inet_sock *inet = inet_sk(sk);
-               int rto = (tp->rto<<2) - (tp->rto>>1);
-
-               /* Give us an identity. */
-               tw->tw_daddr            = inet->daddr;
-               tw->tw_rcv_saddr        = inet->rcv_saddr;
-               tw->tw_bound_dev_if     = sk->sk_bound_dev_if;
-               tw->tw_num              = inet->num;
-               tw->tw_state            = TCP_TIME_WAIT;
-               tw->tw_substate         = state;
-               tw->tw_sport            = inet->sport;
-               tw->tw_dport            = inet->dport;
-               tw->tw_family           = sk->sk_family;
-               tw->tw_reuse            = sk->sk_reuse;
-               tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
-               atomic_set(&tw->tw_refcnt, 1);
+               tw = inet_twsk_alloc(sk, state);
+
+       if (tw != NULL) {
+               struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+               const struct inet_connection_sock *icsk = inet_csk(sk);
+               const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 
-               tw->tw_hashent          = sk->sk_hashent;
-               tw->tw_rcv_nxt          = tp->rcv_nxt;
-               tw->tw_snd_nxt          = tp->snd_nxt;
-               tw->tw_rcv_wnd          = tcp_receive_window(tp);
-               tw->tw_ts_recent        = tp->rx_opt.ts_recent;
-               tw->tw_ts_recent_stamp  = tp->rx_opt.ts_recent_stamp;
-               tw_dead_node_init(tw);
+               tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
+               tcptw->tw_rcv_nxt       = tp->rcv_nxt;
+               tcptw->tw_snd_nxt       = tp->snd_nxt;
+               tcptw->tw_rcv_wnd       = tcp_receive_window(tp);
+               tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
+               tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
+                       struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
 
-                       ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
-                       ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
-                       tw->tw_v6_ipv6only = np->ipv6only;
-               } else {
-                       memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
-                       memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
-                       tw->tw_v6_ipv6only = 0;
+                       ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
+                       ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
+                       tw->tw_ipv6only = np->ipv6only;
                }
 #endif
                /* Linkage updates. */
-               __tcp_tw_hashdance(sk, tw);
+               __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 
                /* Get the TIME_WAIT timeout firing. */
                if (timeo < rto)
@@ -393,7 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                }
 
                tcp_tw_schedule(tw, timeo);
-               tcp_tw_put(tw);
+               inet_twsk_put(tw);
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
@@ -428,7 +342,7 @@ static u32 twkill_thread_slots;
 /* Returns non-zero if quota exceeded.  */
 static int tcp_do_twkill_work(int slot, unsigned int quota)
 {
-       struct tcp_tw_bucket *tw;
+       struct inet_timewait_sock *tw;
        struct hlist_node *node;
        unsigned int killed;
        int ret;
@@ -442,11 +356,11 @@ static int tcp_do_twkill_work(int slot, unsigned int quota)
        killed = 0;
        ret = 0;
 rescan:
-       tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
-               __tw_del_dead_node(tw);
+       inet_twsk_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
+               __inet_twsk_del_dead_node(tw);
                spin_unlock(&tw_death_lock);
-               tcp_timewait_kill(tw);
-               tcp_tw_put(tw);
+               __inet_twsk_kill(tw, &tcp_hashinfo);
+               inet_twsk_put(tw);
                killed++;
                spin_lock(&tw_death_lock);
                if (killed > quota) {
@@ -532,16 +446,16 @@ static void twkill_work(void *dummy)
  */
 
 /* This is for handling early-kills of TIME_WAIT sockets. */
-void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
+void tcp_tw_deschedule(struct inet_timewait_sock *tw)
 {
        spin_lock(&tw_death_lock);
-       if (tw_del_dead_node(tw)) {
-               tcp_tw_put(tw);
+       if (inet_twsk_del_dead_node(tw)) {
+               inet_twsk_put(tw);
                if (--tcp_tw_count == 0)
                        del_timer(&tcp_tw_timer);
        }
        spin_unlock(&tw_death_lock);
-       tcp_timewait_kill(tw);
+       __inet_twsk_kill(tw, &tcp_hashinfo);
 }
 
 /* Short-time timewait calendar */
@@ -553,7 +467,7 @@ static struct timer_list tcp_twcal_timer =
                TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
 
-static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
+static void tcp_tw_schedule(struct inet_timewait_sock *tw, const int timeo)
 {
        struct hlist_head *list;
        int slot;
@@ -587,7 +501,7 @@ static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
        spin_lock(&tw_death_lock);
 
        /* Unlink it, if it was scheduled */
-       if (tw_del_dead_node(tw))
+       if (inet_twsk_del_dead_node(tw))
                tcp_tw_count--;
        else
                atomic_inc(&tw->tw_refcnt);
@@ -645,13 +559,13 @@ void tcp_twcal_tick(unsigned long dummy)
        for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
                if (time_before_eq(j, now)) {
                        struct hlist_node *node, *safe;
-                       struct tcp_tw_bucket *tw;
+                       struct inet_timewait_sock *tw;
 
-                       tw_for_each_inmate_safe(tw, node, safe,
-                                          &tcp_twcal_row[slot]) {
-                               __tw_del_dead_node(tw);
-                               tcp_timewait_kill(tw);
-                               tcp_tw_put(tw);
+                       inet_twsk_for_each_inmate_safe(tw, node, safe,
+                                                      &tcp_twcal_row[slot]) {
+                               __inet_twsk_del_dead_node(tw);
+                               __inet_twsk_kill(tw, &tcp_hashinfo);
+                               inet_twsk_put(tw);
                                killed++;
                        }
                } else {
@@ -686,75 +600,37 @@ out:
  */
 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
 {
-       /* allocate the newsk from the same slab of the master sock,
-        * if not, at sk_free time we'll try to free it from the wrong
-        * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
-       struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
+       struct sock *newsk = sk_clone(sk, GFP_ATOMIC);
 
-       if(newsk != NULL) {
+       if (newsk != NULL) {
                struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
+               struct inet_sock *newinet = inet_sk(newsk);
+               struct inet_connection_sock *newicsk = inet_csk(newsk);
                struct tcp_sock *newtp;
-               struct sk_filter *filter;
 
-               memcpy(newsk, sk, sizeof(struct tcp_sock));
                newsk->sk_state = TCP_SYN_RECV;
-
-               /* SANITY */
-               sk_node_init(&newsk->sk_node);
-               tcp_sk(newsk)->bind_hash = NULL;
+               newicsk->icsk_bind_hash = NULL;
 
                /* Clone the TCP header template */
-               inet_sk(newsk)->dport = ireq->rmt_port;
-
-               sock_lock_init(newsk);
-               bh_lock_sock(newsk);
-
-               rwlock_init(&newsk->sk_dst_lock);
-               atomic_set(&newsk->sk_rmem_alloc, 0);
-               skb_queue_head_init(&newsk->sk_receive_queue);
-               atomic_set(&newsk->sk_wmem_alloc, 0);
-               skb_queue_head_init(&newsk->sk_write_queue);
-               atomic_set(&newsk->sk_omem_alloc, 0);
-               newsk->sk_wmem_queued = 0;
-               newsk->sk_forward_alloc = 0;
-
-               sock_reset_flag(newsk, SOCK_DONE);
-               newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
-               newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
-               newsk->sk_send_head = NULL;
-               rwlock_init(&newsk->sk_callback_lock);
-               skb_queue_head_init(&newsk->sk_error_queue);
+               newinet->dport = ireq->rmt_port;
                newsk->sk_write_space = sk_stream_write_space;
 
-               if ((filter = newsk->sk_filter) != NULL)
-                       sk_filter_charge(newsk, filter);
-
-               if (unlikely(xfrm_sk_clone_policy(newsk))) {
-                       /* It is still raw copy of parent, so invalidate
-                        * destructor and make plain sk_free() */
-                       newsk->sk_destruct = NULL;
-                       sk_free(newsk);
-                       return NULL;
-               }
-
                /* Now setup tcp_sock */
                newtp = tcp_sk(newsk);
                newtp->pred_flags = 0;
                newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->snd_nxt = treq->snt_isn + 1;
-               newtp->snd_una = treq->snt_isn + 1;
-               newtp->snd_sml = treq->snt_isn + 1;
+               newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
 
                tcp_prequeue_init(newtp);
 
                tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
 
-               newtp->retransmits = 0;
-               newtp->backoff = 0;
+               newicsk->icsk_retransmits = 0;
+               newicsk->icsk_backoff = 0;
                newtp->srtt = 0;
                newtp->mdev = TCP_TIMEOUT_INIT;
-               newtp->rto = TCP_TIMEOUT_INIT;
+               newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 
                newtp->packets_out = 0;
                newtp->left_out = 0;
@@ -793,22 +669,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->rx_opt.num_sacks = 0;
                newtp->urg_data = 0;
                /* Deinitialize accept_queue to trap illegal accesses. */
-               memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
-
-               /* Back to base struct sock members. */
-               newsk->sk_err = 0;
-               newsk->sk_priority = 0;
-               atomic_set(&newsk->sk_refcnt, 2);
-#ifdef INET_REFCNT_DEBUG
-               atomic_inc(&inet_sock_nr);
-#endif
-               atomic_inc(&tcp_sockets_allocated);
+               memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 
                if (sock_flag(newsk, SOCK_KEEPOPEN))
-                       tcp_reset_keepalive_timer(newsk,
-                                                 keepalive_time_when(newtp));
-               newsk->sk_socket = NULL;
-               newsk->sk_sleep = NULL;
+                       inet_csk_reset_keepalive_timer(newsk,
+                                                      keepalive_time_when(newtp));
 
                newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
                if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
@@ -838,7 +703,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                        newtp->tcp_header_len = sizeof(struct tcphdr);
                }
                if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
-                       newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
+                       newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
                if (newtp->ecn_flags&TCP_ECN_OK)
@@ -1018,10 +883,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                if (child == NULL)
                        goto listen_overflow;
 
-               tcp_synq_unlink(tp, req, prev);
-               tcp_synq_removed(sk, req);
+               inet_csk_reqsk_queue_unlink(sk, req, prev);
+               inet_csk_reqsk_queue_removed(sk, req);
 
-               tcp_acceptq_queue(sk, req, child);
+               inet_csk_reqsk_queue_add(sk, req, child);
                return child;
 
        listen_overflow:
@@ -1035,7 +900,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
                if (!(flg & TCP_FLAG_RST))
                        req->rsk_ops->send_reset(skb);
 
-               tcp_synq_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req, prev);
                return NULL;
 }
 
This page took 0.051176 seconds and 5 git commands to generate.