[NET]: Introduce inet_connection_sock
[deliverable/linux.git] / net / ipv4 / tcp_output.c
index e3f8ea1bfa9c01179f93094c534c2ea3f07bcd9f..6f0a7e30ceac45bc66ee479bf5ea7bcc80527ae9 100644 (file)
@@ -105,8 +105,9 @@ static __u16 tcp_advertise_mss(struct sock *sk)
 
 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
  * This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
+static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        s32 delta = tcp_time_stamp - tp->lsndtime;
        u32 restart_cwnd = tcp_init_cwnd(tp, dst);
        u32 cwnd = tp->snd_cwnd;
@@ -116,7 +117,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
        tp->snd_ssthresh = tcp_current_ssthresh(tp);
        restart_cwnd = min(restart_cwnd, cwnd);
 
-       while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd)
+       while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
                cwnd >>= 1;
        tp->snd_cwnd = max(cwnd, restart_cwnd);
        tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
 static inline void tcp_event_data_sent(struct tcp_sock *tp,
                                       struct sk_buff *skb, struct sock *sk)
 {
-       u32 now = tcp_time_stamp;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       const u32 now = tcp_time_stamp;
 
-       if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto)
-               tcp_cwnd_restart(tp, __sk_dst_get(sk));
+       if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
+               tcp_cwnd_restart(sk, __sk_dst_get(sk));
 
        tp->lsndtime = now;
 
        /* If it is a reply for ato after last received
         * packet, enter pingpong mode.
         */
-       if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato)
-               tp->ack.pingpong = 1;
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               icsk->icsk_ack.pingpong = 1;
 }
 
 static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       tcp_dec_quickack_mode(tp, pkts);
-       tcp_clear_xmit_timer(sk, TCP_TIME_DACK);
+       tcp_dec_quickack_mode(sk, pkts);
+       inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
 
 /* Determine a window scaling and initial window to offer.
@@ -403,11 +403,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
                sk->sk_send_head = skb;
 }
 
-static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
+static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       if (skb->len <= tp->mss_cache ||
+       if (skb->len <= mss_now ||
            !(sk->sk_route_caps & NETIF_F_TSO)) {
                /* Avoid the costly divide in the normal
                 * non-TSO case.
@@ -417,10 +415,10 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
        } else {
                unsigned int factor;
 
-               factor = skb->len + (tp->mss_cache - 1);
-               factor /= tp->mss_cache;
+               factor = skb->len + (mss_now - 1);
+               factor /= mss_now;
                skb_shinfo(skb)->tso_segs = factor;
-               skb_shinfo(skb)->tso_size = tp->mss_cache;
+               skb_shinfo(skb)->tso_size = mss_now;
        }
 }
 
@@ -429,7 +427,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
  * packet to the list.  This won't be called frequently, I hope. 
  * Remember, these are still headerless SKBs at this point.
  */
-static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
+static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
@@ -492,8 +490,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
        }
 
        /* Fix up tso_factor for both original and new SKB.  */
-       tcp_set_skb_tso_segs(sk, skb);
-       tcp_set_skb_tso_segs(sk, buff);
+       tcp_set_skb_tso_segs(sk, skb, mss_now);
+       tcp_set_skb_tso_segs(sk, buff, mss_now);
 
        if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
                tp->lost_out += tcp_skb_pcount(skb);
@@ -507,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff);
+       __skb_append(skb, buff, &sk->sk_write_queue);
 
        return 0;
 }
@@ -569,7 +567,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
         * factor and mss.
         */
        if (tcp_skb_pcount(skb) > 1)
-               tcp_set_skb_tso_segs(sk, skb);
+               tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
 
        return 0;
 }
@@ -698,7 +696,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
                if (tp->packets_out > tp->snd_cwnd_used)
                        tp->snd_cwnd_used = tp->packets_out;
 
-               if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
+               if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
                        tcp_cwnd_application_limited(sk);
        }
 }
@@ -734,12 +732,14 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
 /* This must be invoked the first time we consider transmitting
  * SKB onto the wire.
  */
-static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
 {
        int tso_segs = tcp_skb_pcount(skb);
 
-       if (!tso_segs) {
-               tcp_set_skb_tso_segs(sk, skb);
+       if (!tso_segs ||
+           (tso_segs > 1 &&
+            skb_shinfo(skb)->tso_size != mss_now)) {
+               tcp_set_skb_tso_segs(sk, skb, mss_now);
                tso_segs = tcp_skb_pcount(skb);
        }
        return tso_segs;
@@ -817,7 +817,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned int cwnd_quota;
 
-       tcp_init_tso_segs(sk, skb);
+       tcp_init_tso_segs(sk, skb, cur_mss);
 
        if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
                return 0;
@@ -854,14 +854,15 @@ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
  * know that all the data is in scatter-gather pages, and that the
  * packet has never been sent out before (and thus is not cloned).
  */
-static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len)
+static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
 {
        struct sk_buff *buff;
        int nlen = skb->len - len;
        u16 flags;
 
        /* All of a TSO frame must be composed of paged data.  */
-       BUG_ON(skb->len != skb->data_len);
+       if (skb->len != skb->data_len)
+               return tcp_fragment(sk, skb, len, mss_now);
 
        buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
        if (unlikely(buff == NULL))
@@ -887,12 +888,12 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len)
        skb_split(skb, buff, len);
 
        /* Fix up tso_factor for both original and new SKB.  */
-       tcp_set_skb_tso_segs(sk, skb);
-       tcp_set_skb_tso_segs(sk, buff);
+       tcp_set_skb_tso_segs(sk, skb, mss_now);
+       tcp_set_skb_tso_segs(sk, buff, mss_now);
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff);
+       __skb_append(skb, buff, &sk->sk_write_queue);
 
        return 0;
 }
@@ -924,10 +925,6 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
 
        limit = min(send_win, cong_win);
 
-       /* If sk_send_head can be sent fully now, just do it.  */
-       if (skb->len <= limit)
-               return 0;
-
        if (sysctl_tcp_tso_win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
 
@@ -972,19 +969,20 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
        if (unlikely(sk->sk_state == TCP_CLOSE))
                return 0;
 
-       skb = sk->sk_send_head;
-       if (unlikely(!skb))
-               return 0;
-
-       tso_segs = tcp_init_tso_segs(sk, skb);
-       cwnd_quota = tcp_cwnd_test(tp, skb);
-       if (unlikely(!cwnd_quota))
-               goto out;
-
        sent_pkts = 0;
-       while (likely(tcp_snd_wnd_test(tp, skb, mss_now))) {
+       while ((skb = sk->sk_send_head)) {
+               unsigned int limit;
+
+               tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
                BUG_ON(!tso_segs);
 
+               cwnd_quota = tcp_cwnd_test(tp, skb);
+               if (!cwnd_quota)
+                       break;
+
+               if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
+                       break;
+
                if (tso_segs == 1) {
                        if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
                                                     (tcp_skb_is_last(sk, skb) ?
@@ -995,9 +993,10 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                                break;
                }
 
+               limit = mss_now;
                if (tso_segs > 1) {
-                       u32 limit = tcp_window_allows(tp, skb,
-                                                     mss_now, cwnd_quota);
+                       limit = tcp_window_allows(tp, skb,
+                                                 mss_now, cwnd_quota);
 
                        if (skb->len < limit) {
                                unsigned int trim = skb->len % mss_now;
@@ -1005,15 +1004,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                                if (trim)
                                        limit = skb->len - trim;
                        }
-                       if (skb->len > limit) {
-                               if (tso_fragment(sk, skb, limit))
-                                       break;
-                       }
-               } else if (unlikely(skb->len > mss_now)) {
-                       if (unlikely(tcp_fragment(sk, skb,  mss_now)))
-                               break;
                }
 
+               if (skb->len > limit &&
+                   unlikely(tso_fragment(sk, skb, limit, mss_now)))
+                       break;
+
                TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
                if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
@@ -1026,27 +1022,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
 
                tcp_minshall_update(tp, mss_now, skb);
                sent_pkts++;
-
-               /* Do not optimize this to use tso_segs. If we chopped up
-                * the packet above, tso_segs will no longer be valid.
-                */
-               cwnd_quota -= tcp_skb_pcount(skb);
-
-               BUG_ON(cwnd_quota < 0);
-               if (!cwnd_quota)
-                       break;
-
-               skb = sk->sk_send_head;
-               if (!skb)
-                       break;
-               tso_segs = tcp_init_tso_segs(sk, skb);
        }
 
        if (likely(sent_pkts)) {
                tcp_cwnd_validate(sk, tp);
                return 0;
        }
-out:
        return !tp->packets_out && sk->sk_send_head;
 }
 
@@ -1076,15 +1057,18 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
 
        BUG_ON(!skb || skb->len < mss_now);
 
-       tso_segs = tcp_init_tso_segs(sk, skb);
+       tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
        cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
 
        if (likely(cwnd_quota)) {
+               unsigned int limit;
+
                BUG_ON(!tso_segs);
 
+               limit = mss_now;
                if (tso_segs > 1) {
-                       u32 limit = tcp_window_allows(tp, skb,
-                                                     mss_now, cwnd_quota);
+                       limit = tcp_window_allows(tp, skb,
+                                                 mss_now, cwnd_quota);
 
                        if (skb->len < limit) {
                                unsigned int trim = skb->len % mss_now;
@@ -1092,15 +1076,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
                                if (trim)
                                        limit = skb->len - trim;
                        }
-                       if (skb->len > limit) {
-                               if (unlikely(tso_fragment(sk, skb, limit)))
-                                       return;
-                       }
-               } else if (unlikely(skb->len > mss_now)) {
-                       if (unlikely(tcp_fragment(sk, skb, mss_now)))
-                               return;
                }
 
+               if (skb->len > limit &&
+                   unlikely(tso_fragment(sk, skb, limit, mss_now)))
+                       return;
+
                /* Send it out now. */
                TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
@@ -1166,6 +1147,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
  */
 u32 __tcp_select_window(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        /* MSS for the peer's data.  Previous verions used mss_clamp
         * here.  I don't know if the value based on our guesses
@@ -1173,7 +1155,7 @@ u32 __tcp_select_window(struct sock *sk)
         * but may be worse for the performance because of rcv_mss
         * fluctuations.  --SAW  1998/11/1
         */
-       int mss = tp->ack.rcv_mss;
+       int mss = icsk->icsk_ack.rcv_mss;
        int free_space = tcp_space(sk);
        int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
        int window;
@@ -1182,7 +1164,7 @@ u32 __tcp_select_window(struct sock *sk)
                mss = full_space; 
 
        if (free_space < full_space/2) {
-               tp->ack.quick = 0;
+               icsk->icsk_ack.quick = 0;
 
                if (tcp_memory_pressure)
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
@@ -1257,7 +1239,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                       tcp_skb_pcount(next_skb) != 1);
 
                /* Ok.  We will be able to collapse the packet. */
-               __skb_unlink(next_skb, next_skb->list);
+               __skb_unlink(next_skb, &sk->sk_write_queue);
 
                memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
 
@@ -1386,15 +1368,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
 
        if (skb->len > cur_mss) {
                int old_factor = tcp_skb_pcount(skb);
-               int new_factor;
+               int diff;
 
-               if (tcp_fragment(sk, skb, cur_mss))
+               if (tcp_fragment(sk, skb, cur_mss, cur_mss))
                        return -ENOMEM; /* We'll try again later. */
 
                /* New SKB created, account for it. */
-               new_factor = tcp_skb_pcount(skb);
-               tp->packets_out -= old_factor - new_factor;
-               tp->packets_out += tcp_skb_pcount(skb->next);
+               diff = old_factor - tcp_skb_pcount(skb) -
+                      tcp_skb_pcount(skb->next);
+               tp->packets_out -= diff;
+
+               if (diff > 0) {
+                       tp->fackets_out -= diff;
+                       if ((int)tp->fackets_out < 0)
+                               tp->fackets_out = 0;
+               }
        }
 
        /* Collapse two adjacent packets if worthwhile and we can. */
@@ -1504,7 +1492,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
 
                                        if (skb ==
                                            skb_peek(&sk->sk_write_queue))
-                                               tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+                                               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                                                         inet_csk(sk)->icsk_rto);
                                }
 
                                packet_cnt -= tcp_skb_pcount(skb);
@@ -1557,7 +1546,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                        break;
 
                if (skb == skb_peek(&sk->sk_write_queue))
-                       tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
 
                NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
        }
@@ -1793,8 +1782,8 @@ static inline void tcp_connect_init(struct sock *sk)
        tp->rcv_wup = 0;
        tp->copied_seq = 0;
 
-       tp->rto = TCP_TIMEOUT_INIT;
-       tp->retransmits = 0;
+       inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+       inet_csk(sk)->icsk_retransmits = 0;
        tcp_clear_retrans(tp);
 }
 
@@ -1837,7 +1826,7 @@ int tcp_connect(struct sock *sk)
        TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
 
        /* Timer for repeating the SYN until an answer. */
-       tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
        return 0;
 }
 
@@ -1847,20 +1836,21 @@ int tcp_connect(struct sock *sk)
  */
 void tcp_send_delayed_ack(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
-       int ato = tp->ack.ato;
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       int ato = icsk->icsk_ack.ato;
        unsigned long timeout;
 
        if (ato > TCP_DELACK_MIN) {
+               const struct tcp_sock *tp = tcp_sk(sk);
                int max_ato = HZ/2;
 
-               if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED))
+               if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
                        max_ato = TCP_DELACK_MAX;
 
                /* Slow path, intersegment interval is "high". */
 
                /* If some rtt estimate is known, use it to bound delayed ack.
-                * Do not use tp->rto here, use results of rtt measurements
+                * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
                 * directly.
                 */
                if (tp->srtt) {
@@ -1877,21 +1867,22 @@ void tcp_send_delayed_ack(struct sock *sk)
        timeout = jiffies + ato;
 
        /* Use new timeout only if there wasn't a older one earlier. */
-       if (tp->ack.pending&TCP_ACK_TIMER) {
+       if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
                /* If delack timer was blocked or is about to expire,
                 * send ACK now.
                 */
-               if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) {
+               if (icsk->icsk_ack.blocked ||
+                   time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
                        tcp_send_ack(sk);
                        return;
                }
 
-               if (!time_before(timeout, tp->ack.timeout))
-                       timeout = tp->ack.timeout;
+               if (!time_before(timeout, icsk->icsk_ack.timeout))
+                       timeout = icsk->icsk_ack.timeout;
        }
-       tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER;
-       tp->ack.timeout = timeout;
-       sk_reset_timer(sk, &tp->delack_timer, timeout);
+       icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+       icsk->icsk_ack.timeout = timeout;
+       sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
 }
 
 /* This routine sends an ack and also updates the window. */
@@ -1908,9 +1899,9 @@ void tcp_send_ack(struct sock *sk)
                 */
                buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
                if (buff == NULL) {
-                       tcp_schedule_ack(tp);
-                       tp->ack.ato = TCP_ATO_MIN;
-                       tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX);
+                       inet_csk_schedule_ack(sk);
+                       inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
                        return;
                }
 
@@ -1991,7 +1982,7 @@ int tcp_write_wakeup(struct sock *sk)
                            skb->len > mss) {
                                seg_size = min(seg_size, mss);
                                TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
-                               if (tcp_fragment(sk, skb, seg_size))
+                               if (tcp_fragment(sk, skb, seg_size, mss))
                                        return -1;
                                /* SWS override triggered forced fragmentation.
                                 * Disable TSO, the connection is too sick. */
@@ -2000,7 +1991,7 @@ int tcp_write_wakeup(struct sock *sk)
                                        sk->sk_route_caps &= ~NETIF_F_TSO;
                                }
                        } else if (!tcp_skb_pcount(skb))
-                               tcp_set_skb_tso_segs(sk, skb);
+                               tcp_set_skb_tso_segs(sk, skb, mss);
 
                        TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
                        TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2024,6 +2015,7 @@ int tcp_write_wakeup(struct sock *sk)
  */
 void tcp_send_probe0(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int err;
 
@@ -2032,16 +2024,16 @@ void tcp_send_probe0(struct sock *sk)
        if (tp->packets_out || !sk->sk_send_head) {
                /* Cancel probe timer, if it is not required. */
                tp->probes_out = 0;
-               tp->backoff = 0;
+               icsk->icsk_backoff = 0;
                return;
        }
 
        if (err <= 0) {
-               if (tp->backoff < sysctl_tcp_retries2)
-                       tp->backoff++;
+               if (icsk->icsk_backoff < sysctl_tcp_retries2)
+                       icsk->icsk_backoff++;
                tp->probes_out++;
-               tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 
-                                     min(tp->rto << tp->backoff, TCP_RTO_MAX));
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 
+                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
        } else {
                /* If packet was not sent due to local congestion,
                 * do not backoff and do not remember probes_out.
@@ -2051,8 +2043,9 @@ void tcp_send_probe0(struct sock *sk)
                 */
                if (!tp->probes_out)
                        tp->probes_out=1;
-               tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 
-                                     min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL));
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 
+                                         min(icsk->icsk_rto << icsk->icsk_backoff,
+                                             TCP_RESOURCE_PROBE_INTERVAL));
        }
 }
 
This page took 0.042438 seconds and 5 git commands to generate.