tcp: refine TSO autosizing
[deliverable/linux.git] / net / ipv4 / tcp.c
index dc13a3657e8e1b81ba0cb1fcd5386a9d0b106168..427aee33ffc04ad189d9d0ec24ab8004c25961ec 100644 (file)
@@ -835,47 +835,29 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
                                       int large_allowed)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 xmit_size_goal, old_size_goal;
-
-       xmit_size_goal = mss_now;
-
-       if (large_allowed && sk_can_gso(sk)) {
-               u32 gso_size, hlen;
-
-               /* Maybe we should/could use sk->sk_prot->max_header here ? */
-               hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
-                      inet_csk(sk)->icsk_ext_hdr_len +
-                      tp->tcp_header_len;
-
-               /* Goal is to send at least one packet per ms,
-                * not one big TSO packet every 100 ms.
-                * This preserves ACK clocking and is consistent
-                * with tcp_tso_should_defer() heuristic.
-                */
-               gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
-               gso_size = max_t(u32, gso_size,
-                                sysctl_tcp_min_tso_segs * mss_now);
-
-               xmit_size_goal = min_t(u32, gso_size,
-                                      sk->sk_gso_max_size - 1 - hlen);
-
-               xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
-
-               /* We try hard to avoid divides here */
-               old_size_goal = tp->xmit_size_goal_segs * mss_now;
-
-               if (likely(old_size_goal <= xmit_size_goal &&
-                          old_size_goal + mss_now > xmit_size_goal)) {
-                       xmit_size_goal = old_size_goal;
-               } else {
-                       tp->xmit_size_goal_segs =
-                               min_t(u16, xmit_size_goal / mss_now,
-                                     sk->sk_gso_max_segs);
-                       xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
-               }
+       u32 new_size_goal, size_goal, hlen;
+
+       if (!large_allowed || !sk_can_gso(sk))
+               return mss_now;
+
+       /* Maybe we should/could use sk->sk_prot->max_header here ? */
+       hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
+              inet_csk(sk)->icsk_ext_hdr_len +
+              tp->tcp_header_len;
+
+       new_size_goal = sk->sk_gso_max_size - 1 - hlen;
+       new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
+
+       /* We try hard to avoid divides here */
+       size_goal = tp->gso_segs * mss_now;
+       if (unlikely(new_size_goal < size_goal ||
+                    new_size_goal >= size_goal + mss_now)) {
+               tp->gso_segs = min_t(u16, new_size_goal / mss_now,
+                                    sk->sk_gso_max_segs);
+               size_goal = tp->gso_segs * mss_now;
        }
 
-       return max(xmit_size_goal, mss_now);
+       return max(size_goal, mss_now);
 }
 
 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
This page took 0.031256 seconds and 5 git commands to generate.