net/mlx4_en: Don't configure the HW vxlan parser when vxlan offloading isn't set
[deliverable/linux.git] / net / ipv4 / tcp_input.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes:
23 * Pedro Roque : Fast Retransmit/Recovery.
24 * Two receive queues.
25 * Retransmit queue handled by TCP.
26 * Better retransmit timer handling.
27 * New congestion avoidance.
28 * Header prediction.
29 * Variable renaming.
30 *
31 * Eric : Fast Retransmit.
32 * Randy Scott : MSS option defines.
33 * Eric Schenk : Fixes to slow start algorithm.
34 * Eric Schenk : Yet another double ACK bug.
35 * Eric Schenk : Delayed ACK bug fixes.
36 * Eric Schenk : Floyd style fast retrans war avoidance.
37 * David S. Miller : Don't allow zero congestion window.
38 * Eric Schenk : Fix retransmitter so that it sends
39 * next packet on ack of previous packet.
40 * Andi Kleen : Moved open_request checking here
41 * and process RSTs for open_requests.
42 * Andi Kleen : Better prune_queue, and other fixes.
caa20d9a 43 * Andrey Savochkin: Fix RTT measurements in the presence of
1da177e4
LT
44 * timestamps.
45 * Andrey Savochkin: Check sequence numbers correctly when
46 * removing SACKs due to in sequence incoming
47 * data segments.
48 * Andi Kleen: Make sure we never ack data there is not
49 * enough room for. Also make this condition
50 * a fatal error if it might still happen.
e905a9ed 51 * Andi Kleen: Add tcp_measure_rcv_mss to make
1da177e4 52 * connections with MSS<min(MTU,ann. MSS)
e905a9ed 53 * work without delayed acks.
1da177e4
LT
54 * Andi Kleen: Process packets with PSH set in the
55 * fast path.
56 * J Hadi Salim: ECN support
57 * Andrei Gurtov,
58 * Pasi Sarolahti,
59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission
60 * engine. Lots of bugs are found.
61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
1da177e4
LT
62 */
63
afd46503
JP
64#define pr_fmt(fmt) "TCP: " fmt
65
1da177e4 66#include <linux/mm.h>
5a0e3ad6 67#include <linux/slab.h>
1da177e4
LT
68#include <linux/module.h>
69#include <linux/sysctl.h>
a0bffffc 70#include <linux/kernel.h>
5ffc02a1 71#include <net/dst.h>
1da177e4
LT
72#include <net/tcp.h>
73#include <net/inet_common.h>
74#include <linux/ipsec.h>
75#include <asm/unaligned.h>
1a2449a8 76#include <net/netdma.h>
1da177e4 77
ab32ea5d
BH
78int sysctl_tcp_timestamps __read_mostly = 1;
79int sysctl_tcp_window_scaling __read_mostly = 1;
80int sysctl_tcp_sack __read_mostly = 1;
81int sysctl_tcp_fack __read_mostly = 1;
82int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
4bc2f18b 83EXPORT_SYMBOL(sysctl_tcp_reordering);
ab32ea5d
BH
84int sysctl_tcp_dsack __read_mostly = 1;
85int sysctl_tcp_app_win __read_mostly = 31;
b49960a0 86int sysctl_tcp_adv_win_scale __read_mostly = 1;
4bc2f18b 87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
1da177e4 88
282f23c6
ED
89/* rfc5961 challenge ack rate limiting */
90int sysctl_tcp_challenge_ack_limit = 100;
91
ab32ea5d
BH
92int sysctl_tcp_stdurg __read_mostly;
93int sysctl_tcp_rfc1337 __read_mostly;
94int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
c96fd3d4 95int sysctl_tcp_frto __read_mostly = 2;
1da177e4 96
7e380175
AP
97int sysctl_tcp_thin_dupack __read_mostly;
98
ab32ea5d 99int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
6ba8a3b1 100int sysctl_tcp_early_retrans __read_mostly = 3;
1da177e4 101
1da177e4
LT
102#define FLAG_DATA 0x01 /* Incoming frame contained data. */
103#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
104#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
105#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
106#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
107#define FLAG_DATA_SACKED 0x20 /* New SACK. */
108#define FLAG_ECE 0x40 /* ECE in this ACK */
1da177e4 109#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
e33099f9 110#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
2e605294 111#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
564262c1 112#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
cadbd031 113#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
12fb3dd9 114#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
1da177e4
LT
115
116#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
117#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
118#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
119#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
120
1da177e4 121#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
bdf1ee5d 122#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
1da177e4 123
e905a9ed 124/* Adapt the MSS value used to make delayed ack decision to the
1da177e4 125 * real world.
e905a9ed 126 */
056834d9 127static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
1da177e4 128{
463c84b9 129 struct inet_connection_sock *icsk = inet_csk(sk);
e905a9ed 130 const unsigned int lss = icsk->icsk_ack.last_seg_size;
463c84b9 131 unsigned int len;
1da177e4 132
e905a9ed 133 icsk->icsk_ack.last_seg_size = 0;
1da177e4
LT
134
135 /* skb->len may jitter because of SACKs, even if peer
136 * sends good full-sized frames.
137 */
056834d9 138 len = skb_shinfo(skb)->gso_size ? : skb->len;
463c84b9
ACM
139 if (len >= icsk->icsk_ack.rcv_mss) {
140 icsk->icsk_ack.rcv_mss = len;
1da177e4
LT
141 } else {
142 /* Otherwise, we make more careful check taking into account,
143 * that SACKs block is variable.
144 *
145 * "len" is invariant segment length, including TCP header.
146 */
9c70220b 147 len += skb->data - skb_transport_header(skb);
bee7ca9e 148 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
1da177e4
LT
149 /* If PSH is not set, packet should be
150 * full sized, provided peer TCP is not badly broken.
151 * This observation (if it is correct 8)) allows
152 * to handle super-low mtu links fairly.
153 */
154 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
aa8223c7 155 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
1da177e4
LT
156 /* Subtract also invariant (if peer is RFC compliant),
157 * tcp header plus fixed timestamp option length.
158 * Resulting "len" is MSS free of SACK jitter.
159 */
463c84b9
ACM
160 len -= tcp_sk(sk)->tcp_header_len;
161 icsk->icsk_ack.last_seg_size = len;
1da177e4 162 if (len == lss) {
463c84b9 163 icsk->icsk_ack.rcv_mss = len;
1da177e4
LT
164 return;
165 }
166 }
1ef9696c
AK
167 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
168 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
463c84b9 169 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1da177e4
LT
170 }
171}
172
463c84b9 173static void tcp_incr_quickack(struct sock *sk)
1da177e4 174{
463c84b9 175 struct inet_connection_sock *icsk = inet_csk(sk);
95c96174 176 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
1da177e4 177
056834d9
IJ
178 if (quickacks == 0)
179 quickacks = 2;
463c84b9
ACM
180 if (quickacks > icsk->icsk_ack.quick)
181 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
1da177e4
LT
182}
183
1b9f4092 184static void tcp_enter_quickack_mode(struct sock *sk)
1da177e4 185{
463c84b9
ACM
186 struct inet_connection_sock *icsk = inet_csk(sk);
187 tcp_incr_quickack(sk);
188 icsk->icsk_ack.pingpong = 0;
189 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4
LT
190}
191
192/* Send ACKs quickly, if "quick" count is not exhausted
193 * and the session is not interactive.
194 */
195
a2a385d6 196static inline bool tcp_in_quickack_mode(const struct sock *sk)
1da177e4 197{
463c84b9 198 const struct inet_connection_sock *icsk = inet_csk(sk);
a2a385d6 199
463c84b9 200 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
1da177e4
LT
201}
202
bdf1ee5d
IJ
203static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
204{
056834d9 205 if (tp->ecn_flags & TCP_ECN_OK)
bdf1ee5d
IJ
206 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
207}
208
cf533ea5 209static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
bdf1ee5d
IJ
210{
211 if (tcp_hdr(skb)->cwr)
212 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
213}
214
215static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
216{
217 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
218}
219
7a269ffa 220static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
bdf1ee5d 221{
7a269ffa
ED
222 if (!(tp->ecn_flags & TCP_ECN_OK))
223 return;
224
b82d1bb4 225 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
7a269ffa 226 case INET_ECN_NOT_ECT:
bdf1ee5d 227 /* Funny extension: if ECT is not set on a segment,
7a269ffa
ED
228 * and we already seen ECT on a previous segment,
229 * it is probably a retransmit.
230 */
231 if (tp->ecn_flags & TCP_ECN_SEEN)
bdf1ee5d 232 tcp_enter_quickack_mode((struct sock *)tp);
7a269ffa
ED
233 break;
234 case INET_ECN_CE:
aae06bf5
ED
235 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
236 /* Better not delay acks, sender can have a very low cwnd */
237 tcp_enter_quickack_mode((struct sock *)tp);
238 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
239 }
7a269ffa
ED
240 /* fallinto */
241 default:
242 tp->ecn_flags |= TCP_ECN_SEEN;
bdf1ee5d
IJ
243 }
244}
245
cf533ea5 246static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
bdf1ee5d 247{
056834d9 248 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
bdf1ee5d
IJ
249 tp->ecn_flags &= ~TCP_ECN_OK;
250}
251
cf533ea5 252static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
bdf1ee5d 253{
056834d9 254 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
bdf1ee5d
IJ
255 tp->ecn_flags &= ~TCP_ECN_OK;
256}
257
a2a385d6 258static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
bdf1ee5d 259{
056834d9 260 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
a2a385d6
ED
261 return true;
262 return false;
bdf1ee5d
IJ
263}
264
1da177e4
LT
265/* Buffer size and advertised window tuning.
266 *
267 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
268 */
269
6ae70532 270static void tcp_sndbuf_expand(struct sock *sk)
1da177e4 271{
6ae70532
ED
272 const struct tcp_sock *tp = tcp_sk(sk);
273 int sndmem, per_mss;
274 u32 nr_segs;
275
276 /* Worst case is non GSO/TSO : each frame consumes one skb
277 * and skb->head is kmalloced using power of two area of memory
278 */
279 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
280 MAX_TCP_HEADER +
281 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
282
283 per_mss = roundup_pow_of_two(per_mss) +
284 SKB_DATA_ALIGN(sizeof(struct sk_buff));
285
286 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
287 nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
288
289 /* Fast Recovery (RFC 5681 3.2) :
290 * Cubic needs 1.7 factor, rounded to 2 to include
291 * extra cushion (application might react slowly to POLLOUT)
292 */
293 sndmem = 2 * nr_segs * per_mss;
1da177e4 294
06a59ecb
ED
295 if (sk->sk_sndbuf < sndmem)
296 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
1da177e4
LT
297}
298
299/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
300 *
301 * All tcp_full_space() is split to two parts: "network" buffer, allocated
302 * forward and advertised in receiver window (tp->rcv_wnd) and
303 * "application buffer", required to isolate scheduling/application
304 * latencies from network.
305 * window_clamp is maximal advertised window. It can be less than
306 * tcp_full_space(), in this case tcp_full_space() - window_clamp
307 * is reserved for "application" buffer. The less window_clamp is
308 * the smoother our behaviour from viewpoint of network, but the lower
309 * throughput and the higher sensitivity of the connection to losses. 8)
310 *
311 * rcv_ssthresh is more strict window_clamp used at "slow start"
312 * phase to predict further behaviour of this connection.
313 * It is used for two goals:
314 * - to enforce header prediction at sender, even when application
315 * requires some significant "application buffer". It is check #1.
316 * - to prevent pruning of receive queue because of misprediction
317 * of receiver window. Check #2.
318 *
319 * The scheme does not work when sender sends good segments opening
caa20d9a 320 * window and then starts to feed us spaghetti. But it should work
1da177e4
LT
321 * in common situations. Otherwise, we have to rely on queue collapsing.
322 */
323
324/* Slow part of check#2. */
9e412ba7 325static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
1da177e4 326{
9e412ba7 327 struct tcp_sock *tp = tcp_sk(sk);
1da177e4 328 /* Optimize this! */
dfd4f0ae
ED
329 int truesize = tcp_win_from_space(skb->truesize) >> 1;
330 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1;
1da177e4
LT
331
332 while (tp->rcv_ssthresh <= window) {
333 if (truesize <= skb->len)
463c84b9 334 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
1da177e4
LT
335
336 truesize >>= 1;
337 window >>= 1;
338 }
339 return 0;
340}
341
cf533ea5 342static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
1da177e4 343{
9e412ba7
IJ
344 struct tcp_sock *tp = tcp_sk(sk);
345
1da177e4
LT
346 /* Check #1 */
347 if (tp->rcv_ssthresh < tp->window_clamp &&
348 (int)tp->rcv_ssthresh < tcp_space(sk) &&
180d8cd9 349 !sk_under_memory_pressure(sk)) {
1da177e4
LT
350 int incr;
351
352 /* Check #2. Increase window, if skb with such overhead
353 * will fit to rcvbuf in future.
354 */
355 if (tcp_win_from_space(skb->truesize) <= skb->len)
056834d9 356 incr = 2 * tp->advmss;
1da177e4 357 else
9e412ba7 358 incr = __tcp_grow_window(sk, skb);
1da177e4
LT
359
360 if (incr) {
4d846f02 361 incr = max_t(int, incr, 2 * skb->len);
056834d9
IJ
362 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
363 tp->window_clamp);
463c84b9 364 inet_csk(sk)->icsk_ack.quick |= 1;
1da177e4
LT
365 }
366 }
367}
368
369/* 3. Tuning rcvbuf, when connection enters established state. */
1da177e4
LT
370static void tcp_fixup_rcvbuf(struct sock *sk)
371{
e9266a02 372 u32 mss = tcp_sk(sk)->advmss;
e9266a02 373 int rcvmem;
1da177e4 374
85f16525
YC
375 rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
376 tcp_default_init_rwnd(mss);
e9266a02 377
b0983d3c
ED
378 /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
379 * Allow enough cushion so that sender is not limited by our window
380 */
381 if (sysctl_tcp_moderate_rcvbuf)
382 rcvmem <<= 2;
383
e9266a02
ED
384 if (sk->sk_rcvbuf < rcvmem)
385 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
1da177e4
LT
386}
387
caa20d9a 388/* 4. Try to fixup all. It is made immediately after connection enters
1da177e4
LT
389 * established state.
390 */
10467163 391void tcp_init_buffer_space(struct sock *sk)
1da177e4
LT
392{
393 struct tcp_sock *tp = tcp_sk(sk);
394 int maxwin;
395
396 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
397 tcp_fixup_rcvbuf(sk);
398 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
6ae70532 399 tcp_sndbuf_expand(sk);
1da177e4
LT
400
401 tp->rcvq_space.space = tp->rcv_wnd;
b0983d3c
ED
402 tp->rcvq_space.time = tcp_time_stamp;
403 tp->rcvq_space.seq = tp->copied_seq;
1da177e4
LT
404
405 maxwin = tcp_full_space(sk);
406
407 if (tp->window_clamp >= maxwin) {
408 tp->window_clamp = maxwin;
409
410 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
411 tp->window_clamp = max(maxwin -
412 (maxwin >> sysctl_tcp_app_win),
413 4 * tp->advmss);
414 }
415
416 /* Force reservation of one segment. */
417 if (sysctl_tcp_app_win &&
418 tp->window_clamp > 2 * tp->advmss &&
419 tp->window_clamp + tp->advmss > maxwin)
420 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
421
422 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
423 tp->snd_cwnd_stamp = tcp_time_stamp;
424}
425
1da177e4 426/* 5. Recalculate window clamp after socket hit its memory bounds. */
9e412ba7 427static void tcp_clamp_window(struct sock *sk)
1da177e4 428{
9e412ba7 429 struct tcp_sock *tp = tcp_sk(sk);
6687e988 430 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 431
6687e988 432 icsk->icsk_ack.quick = 0;
1da177e4 433
326f36e9
JH
434 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
435 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
180d8cd9
GC
436 !sk_under_memory_pressure(sk) &&
437 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
326f36e9
JH
438 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
439 sysctl_tcp_rmem[2]);
1da177e4 440 }
326f36e9 441 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
056834d9 442 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
1da177e4
LT
443}
444
40efc6fa
SH
445/* Initialize RCV_MSS value.
446 * RCV_MSS is an our guess about MSS used by the peer.
447 * We haven't any direct information about the MSS.
448 * It's better to underestimate the RCV_MSS rather than overestimate.
449 * Overestimations make us ACKing less frequently than needed.
450 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
451 */
452void tcp_initialize_rcv_mss(struct sock *sk)
453{
cf533ea5 454 const struct tcp_sock *tp = tcp_sk(sk);
40efc6fa
SH
455 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
456
056834d9 457 hint = min(hint, tp->rcv_wnd / 2);
bee7ca9e 458 hint = min(hint, TCP_MSS_DEFAULT);
40efc6fa
SH
459 hint = max(hint, TCP_MIN_MSS);
460
461 inet_csk(sk)->icsk_ack.rcv_mss = hint;
462}
4bc2f18b 463EXPORT_SYMBOL(tcp_initialize_rcv_mss);
40efc6fa 464
1da177e4
LT
465/* Receiver "autotuning" code.
466 *
467 * The algorithm for RTT estimation w/o timestamps is based on
468 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
631dd1a8 469 * <http://public.lanl.gov/radiant/pubs.html#DRS>
1da177e4
LT
470 *
471 * More detail on this code can be found at
631dd1a8 472 * <http://staff.psc.edu/jheffner/>,
1da177e4
LT
473 * though this reference is out of date. A new paper
474 * is pending.
475 */
476static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
477{
478 u32 new_sample = tp->rcv_rtt_est.rtt;
479 long m = sample;
480
481 if (m == 0)
482 m = 1;
483
484 if (new_sample != 0) {
485 /* If we sample in larger samples in the non-timestamp
486 * case, we could grossly overestimate the RTT especially
487 * with chatty applications or bulk transfer apps which
488 * are stalled on filesystem I/O.
489 *
490 * Also, since we are only going for a minimum in the
31f34269 491 * non-timestamp case, we do not smooth things out
caa20d9a 492 * else with timestamps disabled convergence takes too
1da177e4
LT
493 * long.
494 */
495 if (!win_dep) {
496 m -= (new_sample >> 3);
497 new_sample += m;
18a223e0
NC
498 } else {
499 m <<= 3;
500 if (m < new_sample)
501 new_sample = m;
502 }
1da177e4 503 } else {
caa20d9a 504 /* No previous measure. */
1da177e4
LT
505 new_sample = m << 3;
506 }
507
508 if (tp->rcv_rtt_est.rtt != new_sample)
509 tp->rcv_rtt_est.rtt = new_sample;
510}
511
512static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
513{
514 if (tp->rcv_rtt_est.time == 0)
515 goto new_measure;
516 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
517 return;
651913ce 518 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
1da177e4
LT
519
520new_measure:
521 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
522 tp->rcv_rtt_est.time = tcp_time_stamp;
523}
524
056834d9
IJ
525static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
526 const struct sk_buff *skb)
1da177e4 527{
463c84b9 528 struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
529 if (tp->rx_opt.rcv_tsecr &&
530 (TCP_SKB_CB(skb)->end_seq -
463c84b9 531 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
1da177e4
LT
532 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
533}
534
535/*
536 * This function should be called every time data is copied to user space.
537 * It calculates the appropriate TCP receive buffer space.
538 */
539void tcp_rcv_space_adjust(struct sock *sk)
540{
541 struct tcp_sock *tp = tcp_sk(sk);
542 int time;
b0983d3c 543 int copied;
e905a9ed 544
1da177e4 545 time = tcp_time_stamp - tp->rcvq_space.time;
056834d9 546 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
1da177e4 547 return;
e905a9ed 548
b0983d3c
ED
549 /* Number of bytes copied to user in last RTT */
550 copied = tp->copied_seq - tp->rcvq_space.seq;
551 if (copied <= tp->rcvq_space.space)
552 goto new_measure;
553
554 /* A bit of theory :
555 * copied = bytes received in previous RTT, our base window
556 * To cope with packet losses, we need a 2x factor
557 * To cope with slow start, and sender growing its cwin by 100 %
558 * every RTT, we need a 4x factor, because the ACK we are sending
559 * now is for the next RTT, not the current one :
560 * <prev RTT . ><current RTT .. ><next RTT .... >
561 */
562
563 if (sysctl_tcp_moderate_rcvbuf &&
564 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
565 int rcvwin, rcvmem, rcvbuf;
1da177e4 566
b0983d3c
ED
567 /* minimal window to cope with packet losses, assuming
568 * steady state. Add some cushion because of small variations.
569 */
570 rcvwin = (copied << 1) + 16 * tp->advmss;
1da177e4 571
b0983d3c
ED
572 /* If rate increased by 25%,
573 * assume slow start, rcvwin = 3 * copied
574 * If rate increased by 50%,
575 * assume sender can use 2x growth, rcvwin = 4 * copied
576 */
577 if (copied >=
578 tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
579 if (copied >=
580 tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
581 rcvwin <<= 1;
582 else
583 rcvwin += (rcvwin >> 1);
584 }
1da177e4 585
b0983d3c
ED
586 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
587 while (tcp_win_from_space(rcvmem) < tp->advmss)
588 rcvmem += 128;
1da177e4 589
b0983d3c
ED
590 rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
591 if (rcvbuf > sk->sk_rcvbuf) {
592 sk->sk_rcvbuf = rcvbuf;
1da177e4 593
b0983d3c
ED
594 /* Make the window clamp follow along. */
595 tp->window_clamp = rcvwin;
1da177e4
LT
596 }
597 }
b0983d3c 598 tp->rcvq_space.space = copied;
e905a9ed 599
1da177e4
LT
600new_measure:
601 tp->rcvq_space.seq = tp->copied_seq;
602 tp->rcvq_space.time = tcp_time_stamp;
603}
604
605/* There is something which you must keep in mind when you analyze the
606 * behavior of the tp->ato delayed ack timeout interval. When a
607 * connection starts up, we want to ack as quickly as possible. The
608 * problem is that "good" TCP's do slow start at the beginning of data
609 * transmission. The means that until we send the first few ACK's the
610 * sender will sit on his end and only queue most of his data, because
611 * he can only send snd_cwnd unacked packets at any given time. For
612 * each ACK we send, he increments snd_cwnd and transmits more of his
613 * queue. -DaveM
614 */
9e412ba7 615static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
1da177e4 616{
9e412ba7 617 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 618 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
619 u32 now;
620
463c84b9 621 inet_csk_schedule_ack(sk);
1da177e4 622
463c84b9 623 tcp_measure_rcv_mss(sk, skb);
1da177e4
LT
624
625 tcp_rcv_rtt_measure(tp);
e905a9ed 626
1da177e4
LT
627 now = tcp_time_stamp;
628
463c84b9 629 if (!icsk->icsk_ack.ato) {
1da177e4
LT
630 /* The _first_ data packet received, initialize
631 * delayed ACK engine.
632 */
463c84b9
ACM
633 tcp_incr_quickack(sk);
634 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4 635 } else {
463c84b9 636 int m = now - icsk->icsk_ack.lrcvtime;
1da177e4 637
056834d9 638 if (m <= TCP_ATO_MIN / 2) {
1da177e4 639 /* The fastest case is the first. */
463c84b9
ACM
640 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
641 } else if (m < icsk->icsk_ack.ato) {
642 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
643 if (icsk->icsk_ack.ato > icsk->icsk_rto)
644 icsk->icsk_ack.ato = icsk->icsk_rto;
645 } else if (m > icsk->icsk_rto) {
caa20d9a 646 /* Too long gap. Apparently sender failed to
1da177e4
LT
647 * restart window, so that we send ACKs quickly.
648 */
463c84b9 649 tcp_incr_quickack(sk);
3ab224be 650 sk_mem_reclaim(sk);
1da177e4
LT
651 }
652 }
463c84b9 653 icsk->icsk_ack.lrcvtime = now;
1da177e4
LT
654
655 TCP_ECN_check_ce(tp, skb);
656
657 if (skb->len >= 128)
9e412ba7 658 tcp_grow_window(sk, skb);
1da177e4
LT
659}
660
1da177e4
LT
661/* Called to compute a smoothed rtt estimate. The data fed to this
662 * routine either comes from timestamps, or from segments that were
663 * known _not_ to have been retransmitted [see Karn/Partridge
664 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
665 * piece by Van Jacobson.
666 * NOTE: the next three routines used to be one big routine.
667 * To save cycles in the RFC 1323 implementation it was better to break
668 * it up into three procedures. -- erics
669 */
740b0f18 670static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
1da177e4 671{
6687e988 672 struct tcp_sock *tp = tcp_sk(sk);
740b0f18
ED
673 long m = mrtt_us; /* RTT */
674 u32 srtt = tp->srtt_us;
1da177e4 675
1da177e4
LT
676 /* The following amusing code comes from Jacobson's
677 * article in SIGCOMM '88. Note that rtt and mdev
678 * are scaled versions of rtt and mean deviation.
e905a9ed 679 * This is designed to be as fast as possible
1da177e4
LT
680 * m stands for "measurement".
681 *
682 * On a 1990 paper the rto value is changed to:
683 * RTO = rtt + 4 * mdev
684 *
685 * Funny. This algorithm seems to be very broken.
686 * These formulae increase RTO, when it should be decreased, increase
31f34269 687 * too slowly, when it should be increased quickly, decrease too quickly
1da177e4
LT
688 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
689 * does not matter how to _calculate_ it. Seems, it was trap
690 * that VJ failed to avoid. 8)
691 */
4a5ab4e2
ED
692 if (srtt != 0) {
693 m -= (srtt >> 3); /* m is now error in rtt est */
694 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
1da177e4
LT
695 if (m < 0) {
696 m = -m; /* m is now abs(error) */
740b0f18 697 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1da177e4
LT
698 /* This is similar to one of Eifel findings.
699 * Eifel blocks mdev updates when rtt decreases.
700 * This solution is a bit different: we use finer gain
701 * for mdev in this case (alpha*beta).
702 * Like Eifel it also prevents growth of rto,
703 * but also it limits too fast rto decreases,
704 * happening in pure Eifel.
705 */
706 if (m > 0)
707 m >>= 3;
708 } else {
740b0f18 709 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1da177e4 710 }
740b0f18
ED
711 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
712 if (tp->mdev_us > tp->mdev_max_us) {
713 tp->mdev_max_us = tp->mdev_us;
714 if (tp->mdev_max_us > tp->rttvar_us)
715 tp->rttvar_us = tp->mdev_max_us;
1da177e4
LT
716 }
717 if (after(tp->snd_una, tp->rtt_seq)) {
740b0f18
ED
718 if (tp->mdev_max_us < tp->rttvar_us)
719 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
1da177e4 720 tp->rtt_seq = tp->snd_nxt;
740b0f18 721 tp->mdev_max_us = tcp_rto_min_us(sk);
1da177e4
LT
722 }
723 } else {
724 /* no previous measure. */
4a5ab4e2 725 srtt = m << 3; /* take the measured time to be rtt */
740b0f18
ED
726 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
727 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
728 tp->mdev_max_us = tp->rttvar_us;
1da177e4
LT
729 tp->rtt_seq = tp->snd_nxt;
730 }
740b0f18 731 tp->srtt_us = max(1U, srtt);
1da177e4
LT
732}
733
95bd09eb
ED
734/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
735 * Note: TCP stack does not yet implement pacing.
736 * FQ packet scheduler can be used to implement cheap but effective
737 * TCP pacing, to smooth the burst on large writes when packets
738 * in flight is significantly lower than cwnd (or rwin)
739 */
740static void tcp_update_pacing_rate(struct sock *sk)
741{
742 const struct tcp_sock *tp = tcp_sk(sk);
743 u64 rate;
744
745 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
740b0f18 746 rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
95bd09eb
ED
747
748 rate *= max(tp->snd_cwnd, tp->packets_out);
749
740b0f18
ED
750 if (likely(tp->srtt_us))
751 do_div(rate, tp->srtt_us);
95bd09eb 752
ba537427
ED
753 /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
754 * without any lock. We want to make sure compiler wont store
755 * intermediate values in this location.
756 */
757 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
758 sk->sk_max_pacing_rate);
95bd09eb
ED
759}
760
1da177e4
LT
761/* Calculate rto without backoff. This is the second half of Van Jacobson's
762 * routine referred to above.
763 */
f7e56a76 764static void tcp_set_rto(struct sock *sk)
1da177e4 765{
463c84b9 766 const struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
767 /* Old crap is replaced with new one. 8)
768 *
769 * More seriously:
770 * 1. If rtt variance happened to be less 50msec, it is hallucination.
771 * It cannot be less due to utterly erratic ACK generation made
772 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
773 * to do with delayed acks, because at cwnd>2 true delack timeout
774 * is invisible. Actually, Linux-2.4 also generates erratic
caa20d9a 775 * ACKs in some circumstances.
1da177e4 776 */
f1ecd5d9 777 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
1da177e4
LT
778
779 /* 2. Fixups made earlier cannot be right.
780 * If we do not estimate RTO correctly without them,
781 * all the algo is pure shit and should be replaced
caa20d9a 782 * with correct one. It is exactly, which we pretend to do.
1da177e4 783 */
1da177e4 784
ee6aac59
IJ
785 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
786 * guarantees that rto is higher.
787 */
f1ecd5d9 788 tcp_bound_rto(sk);
1da177e4
LT
789}
790
cf533ea5 791__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
1da177e4
LT
792{
793 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
794
22b71c8f 795 if (!cwnd)
442b9635 796 cwnd = TCP_INIT_CWND;
1da177e4
LT
797 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
798}
799
e60402d0
IJ
800/*
801 * Packet counting of FACK is based on in-order assumptions, therefore TCP
802 * disables it when reordering is detected
803 */
4aabd8ef 804void tcp_disable_fack(struct tcp_sock *tp)
e60402d0 805{
85cc391c
IJ
806 /* RFC3517 uses different metric in lost marker => reset on change */
807 if (tcp_is_fack(tp))
808 tp->lost_skb_hint = NULL;
ab56222a 809 tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
e60402d0
IJ
810}
811
564262c1 812/* Take a notice that peer is sending D-SACKs */
e60402d0
IJ
813static void tcp_dsack_seen(struct tcp_sock *tp)
814{
ab56222a 815 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
e60402d0
IJ
816}
817
6687e988
ACM
818static void tcp_update_reordering(struct sock *sk, const int metric,
819 const int ts)
1da177e4 820{
6687e988 821 struct tcp_sock *tp = tcp_sk(sk);
1da177e4 822 if (metric > tp->reordering) {
40b215e5
PE
823 int mib_idx;
824
1da177e4
LT
825 tp->reordering = min(TCP_MAX_REORDERING, metric);
826
827 /* This exciting event is worth to be remembered. 8) */
828 if (ts)
40b215e5 829 mib_idx = LINUX_MIB_TCPTSREORDER;
e60402d0 830 else if (tcp_is_reno(tp))
40b215e5 831 mib_idx = LINUX_MIB_TCPRENOREORDER;
e60402d0 832 else if (tcp_is_fack(tp))
40b215e5 833 mib_idx = LINUX_MIB_TCPFACKREORDER;
1da177e4 834 else
40b215e5
PE
835 mib_idx = LINUX_MIB_TCPSACKREORDER;
836
de0744af 837 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1da177e4 838#if FASTRETRANS_DEBUG > 1
91df42be
JP
839 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
840 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
841 tp->reordering,
842 tp->fackets_out,
843 tp->sacked_out,
844 tp->undo_marker ? tp->undo_retrans : 0);
1da177e4 845#endif
e60402d0 846 tcp_disable_fack(tp);
1da177e4 847 }
eed530b6
YC
848
849 if (metric > 0)
850 tcp_disable_early_retrans(tp);
1da177e4
LT
851}
852
006f582c 853/* This must be called before lost_out is incremented */
c8c213f2
IJ
854static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
855{
006f582c 856 if ((tp->retransmit_skb_hint == NULL) ||
c8c213f2
IJ
857 before(TCP_SKB_CB(skb)->seq,
858 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
006f582c
IJ
859 tp->retransmit_skb_hint = skb;
860
861 if (!tp->lost_out ||
862 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
863 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
c8c213f2
IJ
864}
865
41ea36e3
IJ
866static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
867{
868 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
869 tcp_verify_retransmit_hint(tp, skb);
870
871 tp->lost_out += tcp_skb_pcount(skb);
872 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
873 }
874}
875
e1aa680f
IJ
876static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
877 struct sk_buff *skb)
006f582c
IJ
878{
879 tcp_verify_retransmit_hint(tp, skb);
880
881 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
882 tp->lost_out += tcp_skb_pcount(skb);
883 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
884 }
885}
886
1da177e4
LT
887/* This procedure tags the retransmission queue when SACKs arrive.
888 *
889 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
890 * Packets in queue with these bits set are counted in variables
891 * sacked_out, retrans_out and lost_out, correspondingly.
892 *
893 * Valid combinations are:
894 * Tag InFlight Description
895 * 0 1 - orig segment is in flight.
896 * S 0 - nothing flies, orig reached receiver.
897 * L 0 - nothing flies, orig lost by net.
898 * R 2 - both orig and retransmit are in flight.
899 * L|R 1 - orig is lost, retransmit is in flight.
900 * S|R 1 - orig reached receiver, retrans is still in flight.
901 * (L|S|R is logically valid, it could occur when L|R is sacked,
902 * but it is equivalent to plain S and code short-curcuits it to S.
903 * L|S is logically invalid, it would mean -1 packet in flight 8))
904 *
905 * These 6 states form finite state machine, controlled by the following events:
906 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
907 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
974c1236 908 * 3. Loss detection event of two flavors:
1da177e4
LT
909 * A. Scoreboard estimator decided the packet is lost.
910 * A'. Reno "three dupacks" marks head of queue lost.
974c1236
YC
911 * A''. Its FACK modification, head until snd.fack is lost.
912 * B. SACK arrives sacking SND.NXT at the moment, when the
1da177e4
LT
913 * segment was retransmitted.
914 * 4. D-SACK added new rule: D-SACK changes any tag to S.
915 *
916 * It is pleasant to note, that state diagram turns out to be commutative,
917 * so that we are allowed not to be bothered by order of our actions,
918 * when multiple events arrive simultaneously. (see the function below).
919 *
920 * Reordering detection.
921 * --------------------
922 * Reordering metric is maximal distance, which a packet can be displaced
923 * in packet stream. With SACKs we can estimate it:
924 *
925 * 1. SACK fills old hole and the corresponding segment was not
926 * ever retransmitted -> reordering. Alas, we cannot use it
927 * when segment was retransmitted.
928 * 2. The last flaw is solved with D-SACK. D-SACK arrives
929 * for retransmitted and already SACKed segment -> reordering..
930 * Both of these heuristics are not used in Loss state, when we cannot
931 * account for retransmits accurately.
5b3c9882
IJ
932 *
933 * SACK block validation.
934 * ----------------------
935 *
936 * SACK block range validation checks that the received SACK block fits to
937 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
938 * Note that SND.UNA is not included to the range though being valid because
0e835331
IJ
939 * it means that the receiver is rather inconsistent with itself reporting
940 * SACK reneging when it should advance SND.UNA. Such SACK block this is
941 * perfectly valid, however, in light of RFC2018 which explicitly states
942 * that "SACK block MUST reflect the newest segment. Even if the newest
943 * segment is going to be discarded ...", not that it looks very clever
944 * in case of head skb. Due to potentional receiver driven attacks, we
945 * choose to avoid immediate execution of a walk in write queue due to
946 * reneging and defer head skb's loss recovery to standard loss recovery
947 * procedure that will eventually trigger (nothing forbids us doing this).
5b3c9882
IJ
948 *
949 * Implements also blockage to start_seq wrap-around. Problem lies in the
950 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
951 * there's no guarantee that it will be before snd_nxt (n). The problem
952 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
953 * wrap (s_w):
954 *
955 * <- outs wnd -> <- wrapzone ->
956 * u e n u_w e_w s n_w
957 * | | | | | | |
958 * |<------------+------+----- TCP seqno space --------------+---------->|
959 * ...-- <2^31 ->| |<--------...
960 * ...---- >2^31 ------>| |<--------...
961 *
962 * Current code wouldn't be vulnerable but it's better still to discard such
963 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
964 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
965 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
966 * equal to the ideal case (infinite seqno space without wrap caused issues).
967 *
968 * With D-SACK the lower bound is extended to cover sequence space below
969 * SND.UNA down to undo_marker, which is the last point of interest. Yet
564262c1 970 * again, D-SACK block must not to go across snd_una (for the same reason as
5b3c9882
IJ
971 * for the normal SACK blocks, explained above). But there all simplicity
972 * ends, TCP might receive valid D-SACKs below that. As long as they reside
973 * fully below undo_marker they do not affect behavior in anyway and can
974 * therefore be safely ignored. In rare cases (which are more or less
975 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
976 * fragmentation and packet reordering past skb's retransmission. To consider
977 * them correctly, the acceptable range must be extended even more though
978 * the exact amount is rather hard to quantify. However, tp->max_window can
979 * be used as an exaggerated estimate.
1da177e4 980 */
a2a385d6
ED
981static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
982 u32 start_seq, u32 end_seq)
5b3c9882
IJ
983{
984 /* Too far in future, or reversed (interpretation is ambiguous) */
985 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
a2a385d6 986 return false;
5b3c9882
IJ
987
988 /* Nasty start_seq wrap-around check (see comments above) */
989 if (!before(start_seq, tp->snd_nxt))
a2a385d6 990 return false;
5b3c9882 991
564262c1 992 /* In outstanding window? ...This is valid exit for D-SACKs too.
5b3c9882
IJ
993 * start_seq == snd_una is non-sensical (see comments above)
994 */
995 if (after(start_seq, tp->snd_una))
a2a385d6 996 return true;
5b3c9882
IJ
997
998 if (!is_dsack || !tp->undo_marker)
a2a385d6 999 return false;
5b3c9882
IJ
1000
1001 /* ...Then it's D-SACK, and must reside below snd_una completely */
f779b2d6 1002 if (after(end_seq, tp->snd_una))
a2a385d6 1003 return false;
5b3c9882
IJ
1004
1005 if (!before(start_seq, tp->undo_marker))
a2a385d6 1006 return true;
5b3c9882
IJ
1007
1008 /* Too old */
1009 if (!after(end_seq, tp->undo_marker))
a2a385d6 1010 return false;
5b3c9882
IJ
1011
1012 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1013 * start_seq < undo_marker and end_seq >= undo_marker.
1014 */
1015 return !before(start_seq, end_seq - tp->max_window);
1016}
1017
1c1e87ed 1018/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
974c1236 1019 * Event "B". Later note: FACK people cheated me again 8), we have to account
1c1e87ed 1020 * for reordering! Ugly, but should help.
f785a8e2
IJ
1021 *
1022 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
1023 * less than what is now known to be received by the other end (derived from
9f58f3b7
IJ
1024 * highest SACK block). Also calculate the lowest snd_nxt among the remaining
1025 * retransmitted skbs to avoid some costly processing per ACKs.
1c1e87ed 1026 */
407ef1de 1027static void tcp_mark_lost_retrans(struct sock *sk)
1c1e87ed 1028{
9f58f3b7 1029 const struct inet_connection_sock *icsk = inet_csk(sk);
1c1e87ed
IJ
1030 struct tcp_sock *tp = tcp_sk(sk);
1031 struct sk_buff *skb;
f785a8e2 1032 int cnt = 0;
df2e014b 1033 u32 new_low_seq = tp->snd_nxt;
6859d494 1034 u32 received_upto = tcp_highest_sack_seq(tp);
9f58f3b7
IJ
1035
1036 if (!tcp_is_fack(tp) || !tp->retrans_out ||
1037 !after(received_upto, tp->lost_retrans_low) ||
1038 icsk->icsk_ca_state != TCP_CA_Recovery)
407ef1de 1039 return;
1c1e87ed
IJ
1040
1041 tcp_for_write_queue(skb, sk) {
1042 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
1043
1044 if (skb == tcp_send_head(sk))
1045 break;
f785a8e2 1046 if (cnt == tp->retrans_out)
1c1e87ed
IJ
1047 break;
1048 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1049 continue;
1050
f785a8e2
IJ
1051 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
1052 continue;
1053
d0af4160
IJ
1054 /* TODO: We would like to get rid of tcp_is_fack(tp) only
1055 * constraint here (see above) but figuring out that at
1056 * least tp->reordering SACK blocks reside between ack_seq
1057 * and received_upto is not easy task to do cheaply with
1058 * the available datastructures.
1059 *
1060 * Whether FACK should check here for tp->reordering segs
1061 * in-between one could argue for either way (it would be
1062 * rather simple to implement as we could count fack_count
1063 * during the walk and do tp->fackets_out - fack_count).
1064 */
1065 if (after(received_upto, ack_seq)) {
1c1e87ed
IJ
1066 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1067 tp->retrans_out -= tcp_skb_pcount(skb);
1068
006f582c 1069 tcp_skb_mark_lost_uncond_verify(tp, skb);
de0744af 1070 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
f785a8e2 1071 } else {
df2e014b 1072 if (before(ack_seq, new_low_seq))
b08d6cb2 1073 new_low_seq = ack_seq;
f785a8e2 1074 cnt += tcp_skb_pcount(skb);
1c1e87ed
IJ
1075 }
1076 }
b08d6cb2
IJ
1077
1078 if (tp->retrans_out)
1079 tp->lost_retrans_low = new_low_seq;
1c1e87ed 1080}
5b3c9882 1081
a2a385d6
ED
1082static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1083 struct tcp_sack_block_wire *sp, int num_sacks,
1084 u32 prior_snd_una)
d06e021d 1085{
1ed83465 1086 struct tcp_sock *tp = tcp_sk(sk);
d3e2ce3b
HH
1087 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1088 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
a2a385d6 1089 bool dup_sack = false;
d06e021d
DM
1090
1091 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
a2a385d6 1092 dup_sack = true;
e60402d0 1093 tcp_dsack_seen(tp);
de0744af 1094 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
d06e021d 1095 } else if (num_sacks > 1) {
d3e2ce3b
HH
1096 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1097 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
d06e021d
DM
1098
1099 if (!after(end_seq_0, end_seq_1) &&
1100 !before(start_seq_0, start_seq_1)) {
a2a385d6 1101 dup_sack = true;
e60402d0 1102 tcp_dsack_seen(tp);
de0744af
PE
1103 NET_INC_STATS_BH(sock_net(sk),
1104 LINUX_MIB_TCPDSACKOFORECV);
d06e021d
DM
1105 }
1106 }
1107
1108 /* D-SACK for already forgotten data... Do dumb counting. */
c24f691b 1109 if (dup_sack && tp->undo_marker && tp->undo_retrans &&
d06e021d
DM
1110 !after(end_seq_0, prior_snd_una) &&
1111 after(end_seq_0, tp->undo_marker))
1112 tp->undo_retrans--;
1113
1114 return dup_sack;
1115}
1116
a1197f5a 1117struct tcp_sacktag_state {
740b0f18
ED
1118 int reord;
1119 int fack_count;
1120 long rtt_us; /* RTT measured by SACKing never-retransmitted data */
1121 int flag;
a1197f5a
IJ
1122};
1123
d1935942
IJ
1124/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1125 * the incoming SACK may not exactly match but we can find smaller MSS
1126 * aligned portion of it that matches. Therefore we might need to fragment
1127 * which may fail and creates some hassle (caller must handle error case
1128 * returns).
832d11c5
IJ
1129 *
1130 * FIXME: this could be merged to shift decision code
d1935942 1131 */
0f79efdc 1132static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
a2a385d6 1133 u32 start_seq, u32 end_seq)
d1935942 1134{
a2a385d6
ED
1135 int err;
1136 bool in_sack;
d1935942 1137 unsigned int pkt_len;
adb92db8 1138 unsigned int mss;
d1935942
IJ
1139
1140 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1141 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1142
1143 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1144 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
adb92db8 1145 mss = tcp_skb_mss(skb);
d1935942
IJ
1146 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1147
adb92db8 1148 if (!in_sack) {
d1935942 1149 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
adb92db8
IJ
1150 if (pkt_len < mss)
1151 pkt_len = mss;
1152 } else {
d1935942 1153 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
adb92db8
IJ
1154 if (pkt_len < mss)
1155 return -EINVAL;
1156 }
1157
1158 /* Round if necessary so that SACKs cover only full MSSes
1159 * and/or the remaining small portion (if present)
1160 */
1161 if (pkt_len > mss) {
1162 unsigned int new_len = (pkt_len / mss) * mss;
1163 if (!in_sack && new_len < pkt_len) {
1164 new_len += mss;
2cd0d743 1165 if (new_len >= skb->len)
adb92db8
IJ
1166 return 0;
1167 }
1168 pkt_len = new_len;
1169 }
6cc55e09 1170 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
d1935942
IJ
1171 if (err < 0)
1172 return err;
1173 }
1174
1175 return in_sack;
1176}
1177
cc9a672e
NC
1178/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1179static u8 tcp_sacktag_one(struct sock *sk,
1180 struct tcp_sacktag_state *state, u8 sacked,
1181 u32 start_seq, u32 end_seq,
740b0f18
ED
1182 int dup_sack, int pcount,
1183 const struct skb_mstamp *xmit_time)
9e10c47c 1184{
6859d494 1185 struct tcp_sock *tp = tcp_sk(sk);
a1197f5a 1186 int fack_count = state->fack_count;
9e10c47c
IJ
1187
1188 /* Account D-SACK for retransmitted packet. */
1189 if (dup_sack && (sacked & TCPCB_RETRANS)) {
c24f691b 1190 if (tp->undo_marker && tp->undo_retrans &&
cc9a672e 1191 after(end_seq, tp->undo_marker))
9e10c47c 1192 tp->undo_retrans--;
ede9f3b1 1193 if (sacked & TCPCB_SACKED_ACKED)
a1197f5a 1194 state->reord = min(fack_count, state->reord);
9e10c47c
IJ
1195 }
1196
1197 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
cc9a672e 1198 if (!after(end_seq, tp->snd_una))
a1197f5a 1199 return sacked;
9e10c47c
IJ
1200
1201 if (!(sacked & TCPCB_SACKED_ACKED)) {
1202 if (sacked & TCPCB_SACKED_RETRANS) {
1203 /* If the segment is not tagged as lost,
1204 * we do not clear RETRANS, believing
1205 * that retransmission is still in flight.
1206 */
1207 if (sacked & TCPCB_LOST) {
a1197f5a 1208 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
f58b22fd
IJ
1209 tp->lost_out -= pcount;
1210 tp->retrans_out -= pcount;
9e10c47c
IJ
1211 }
1212 } else {
1213 if (!(sacked & TCPCB_RETRANS)) {
1214 /* New sack for not retransmitted frame,
1215 * which was in hole. It is reordering.
1216 */
cc9a672e 1217 if (before(start_seq,
9e10c47c 1218 tcp_highest_sack_seq(tp)))
a1197f5a
IJ
1219 state->reord = min(fack_count,
1220 state->reord);
e33099f9
YC
1221 if (!after(end_seq, tp->high_seq))
1222 state->flag |= FLAG_ORIG_SACK_ACKED;
59c9af42 1223 /* Pick the earliest sequence sacked for RTT */
740b0f18
ED
1224 if (state->rtt_us < 0) {
1225 struct skb_mstamp now;
1226
1227 skb_mstamp_get(&now);
1228 state->rtt_us = skb_mstamp_us_delta(&now,
1229 xmit_time);
1230 }
9e10c47c
IJ
1231 }
1232
1233 if (sacked & TCPCB_LOST) {
a1197f5a 1234 sacked &= ~TCPCB_LOST;
f58b22fd 1235 tp->lost_out -= pcount;
9e10c47c
IJ
1236 }
1237 }
1238
a1197f5a
IJ
1239 sacked |= TCPCB_SACKED_ACKED;
1240 state->flag |= FLAG_DATA_SACKED;
f58b22fd 1241 tp->sacked_out += pcount;
9e10c47c 1242
f58b22fd 1243 fack_count += pcount;
9e10c47c
IJ
1244
1245 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1246 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
cc9a672e 1247 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
f58b22fd 1248 tp->lost_cnt_hint += pcount;
9e10c47c
IJ
1249
1250 if (fack_count > tp->fackets_out)
1251 tp->fackets_out = fack_count;
9e10c47c
IJ
1252 }
1253
1254 /* D-SACK. We can detect redundant retransmission in S|R and plain R
1255 * frames and clear it. undo_retrans is decreased above, L|R frames
1256 * are accounted above as well.
1257 */
a1197f5a
IJ
1258 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1259 sacked &= ~TCPCB_SACKED_RETRANS;
f58b22fd 1260 tp->retrans_out -= pcount;
9e10c47c
IJ
1261 }
1262
a1197f5a 1263 return sacked;
9e10c47c
IJ
1264}
1265
daef52ba
NC
1266/* Shift newly-SACKed bytes from this skb to the immediately previous
1267 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1268 */
a2a385d6
ED
1269static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1270 struct tcp_sacktag_state *state,
1271 unsigned int pcount, int shifted, int mss,
1272 bool dup_sack)
832d11c5
IJ
1273{
1274 struct tcp_sock *tp = tcp_sk(sk);
50133161 1275 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
daef52ba
NC
1276 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1277 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
832d11c5
IJ
1278
1279 BUG_ON(!pcount);
1280
4c90d3b3
NC
1281 /* Adjust counters and hints for the newly sacked sequence
1282 * range but discard the return value since prev is already
1283 * marked. We must tag the range first because the seq
1284 * advancement below implicitly advances
1285 * tcp_highest_sack_seq() when skb is highest_sack.
1286 */
1287 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
59c9af42 1288 start_seq, end_seq, dup_sack, pcount,
740b0f18 1289 &skb->skb_mstamp);
4c90d3b3
NC
1290
1291 if (skb == tp->lost_skb_hint)
0af2a0d0
NC
1292 tp->lost_cnt_hint += pcount;
1293
832d11c5
IJ
1294 TCP_SKB_CB(prev)->end_seq += shifted;
1295 TCP_SKB_CB(skb)->seq += shifted;
1296
1297 skb_shinfo(prev)->gso_segs += pcount;
1298 BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
1299 skb_shinfo(skb)->gso_segs -= pcount;
1300
1301 /* When we're adding to gso_segs == 1, gso_size will be zero,
1302 * in theory this shouldn't be necessary but as long as DSACK
1303 * code can come after this skb later on it's better to keep
1304 * setting gso_size to something.
1305 */
1306 if (!skb_shinfo(prev)->gso_size) {
1307 skb_shinfo(prev)->gso_size = mss;
c9af6db4 1308 skb_shinfo(prev)->gso_type = sk->sk_gso_type;
832d11c5
IJ
1309 }
1310
1311 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1312 if (skb_shinfo(skb)->gso_segs <= 1) {
1313 skb_shinfo(skb)->gso_size = 0;
c9af6db4 1314 skb_shinfo(skb)->gso_type = 0;
832d11c5
IJ
1315 }
1316
832d11c5
IJ
1317 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1318 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1319
832d11c5
IJ
1320 if (skb->len > 0) {
1321 BUG_ON(!tcp_skb_pcount(skb));
111cc8b9 1322 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
a2a385d6 1323 return false;
832d11c5
IJ
1324 }
1325
1326 /* Whole SKB was eaten :-) */
1327
92ee76b6
IJ
1328 if (skb == tp->retransmit_skb_hint)
1329 tp->retransmit_skb_hint = prev;
92ee76b6
IJ
1330 if (skb == tp->lost_skb_hint) {
1331 tp->lost_skb_hint = prev;
1332 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1333 }
1334
5e8a402f
ED
1335 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1336 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1337 TCP_SKB_CB(prev)->end_seq++;
1338
832d11c5
IJ
1339 if (skb == tcp_highest_sack(sk))
1340 tcp_advance_highest_sack(sk, skb);
1341
1342 tcp_unlink_write_queue(skb, sk);
1343 sk_wmem_free_skb(sk, skb);
1344
111cc8b9
IJ
1345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
1346
a2a385d6 1347 return true;
832d11c5
IJ
1348}
1349
1350/* I wish gso_size would have a bit more sane initialization than
1351 * something-or-zero which complicates things
1352 */
cf533ea5 1353static int tcp_skb_seglen(const struct sk_buff *skb)
832d11c5 1354{
775ffabf 1355 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
832d11c5
IJ
1356}
1357
1358/* Shifting pages past head area doesn't work */
cf533ea5 1359static int skb_can_shift(const struct sk_buff *skb)
832d11c5
IJ
1360{
1361 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1362}
1363
1364/* Try collapsing SACK blocks spanning across multiple skbs to a single
1365 * skb.
1366 */
1367static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
a1197f5a 1368 struct tcp_sacktag_state *state,
832d11c5 1369 u32 start_seq, u32 end_seq,
a2a385d6 1370 bool dup_sack)
832d11c5
IJ
1371{
1372 struct tcp_sock *tp = tcp_sk(sk);
1373 struct sk_buff *prev;
1374 int mss;
1375 int pcount = 0;
1376 int len;
1377 int in_sack;
1378
1379 if (!sk_can_gso(sk))
1380 goto fallback;
1381
1382 /* Normally R but no L won't result in plain S */
1383 if (!dup_sack &&
9969ca5f 1384 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
832d11c5
IJ
1385 goto fallback;
1386 if (!skb_can_shift(skb))
1387 goto fallback;
1388 /* This frame is about to be dropped (was ACKed). */
1389 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1390 goto fallback;
1391
1392 /* Can only happen with delayed DSACK + discard craziness */
1393 if (unlikely(skb == tcp_write_queue_head(sk)))
1394 goto fallback;
1395 prev = tcp_write_queue_prev(sk, skb);
1396
1397 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1398 goto fallback;
1399
1400 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1401 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1402
1403 if (in_sack) {
1404 len = skb->len;
1405 pcount = tcp_skb_pcount(skb);
775ffabf 1406 mss = tcp_skb_seglen(skb);
832d11c5
IJ
1407
1408 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1409 * drop this restriction as unnecessary
1410 */
775ffabf 1411 if (mss != tcp_skb_seglen(prev))
832d11c5
IJ
1412 goto fallback;
1413 } else {
1414 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1415 goto noop;
1416 /* CHECKME: This is non-MSS split case only?, this will
1417 * cause skipped skbs due to advancing loop btw, original
1418 * has that feature too
1419 */
1420 if (tcp_skb_pcount(skb) <= 1)
1421 goto noop;
1422
1423 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1424 if (!in_sack) {
1425 /* TODO: head merge to next could be attempted here
1426 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1427 * though it might not be worth of the additional hassle
1428 *
1429 * ...we can probably just fallback to what was done
1430 * previously. We could try merging non-SACKed ones
1431 * as well but it probably isn't going to buy off
1432 * because later SACKs might again split them, and
1433 * it would make skb timestamp tracking considerably
1434 * harder problem.
1435 */
1436 goto fallback;
1437 }
1438
1439 len = end_seq - TCP_SKB_CB(skb)->seq;
1440 BUG_ON(len < 0);
1441 BUG_ON(len > skb->len);
1442
1443 /* MSS boundaries should be honoured or else pcount will
1444 * severely break even though it makes things bit trickier.
1445 * Optimize common case to avoid most of the divides
1446 */
1447 mss = tcp_skb_mss(skb);
1448
1449 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1450 * drop this restriction as unnecessary
1451 */
775ffabf 1452 if (mss != tcp_skb_seglen(prev))
832d11c5
IJ
1453 goto fallback;
1454
1455 if (len == mss) {
1456 pcount = 1;
1457 } else if (len < mss) {
1458 goto noop;
1459 } else {
1460 pcount = len / mss;
1461 len = pcount * mss;
1462 }
1463 }
1464
4648dc97
NC
1465 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1466 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1467 goto fallback;
1468
832d11c5
IJ
1469 if (!skb_shift(prev, skb, len))
1470 goto fallback;
9ec06ff5 1471 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
832d11c5
IJ
1472 goto out;
1473
1474 /* Hole filled allows collapsing with the next as well, this is very
1475 * useful when hole on every nth skb pattern happens
1476 */
1477 if (prev == tcp_write_queue_tail(sk))
1478 goto out;
1479 skb = tcp_write_queue_next(sk, prev);
1480
f0bc52f3
IJ
1481 if (!skb_can_shift(skb) ||
1482 (skb == tcp_send_head(sk)) ||
1483 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
775ffabf 1484 (mss != tcp_skb_seglen(skb)))
832d11c5
IJ
1485 goto out;
1486
1487 len = skb->len;
1488 if (skb_shift(prev, skb, len)) {
1489 pcount += tcp_skb_pcount(skb);
9ec06ff5 1490 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
832d11c5
IJ
1491 }
1492
1493out:
a1197f5a 1494 state->fack_count += pcount;
832d11c5
IJ
1495 return prev;
1496
1497noop:
1498 return skb;
1499
1500fallback:
111cc8b9 1501 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
832d11c5
IJ
1502 return NULL;
1503}
1504
68f8353b
IJ
1505static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1506 struct tcp_sack_block *next_dup,
a1197f5a 1507 struct tcp_sacktag_state *state,
68f8353b 1508 u32 start_seq, u32 end_seq,
a2a385d6 1509 bool dup_sack_in)
68f8353b 1510{
832d11c5
IJ
1511 struct tcp_sock *tp = tcp_sk(sk);
1512 struct sk_buff *tmp;
1513
68f8353b
IJ
1514 tcp_for_write_queue_from(skb, sk) {
1515 int in_sack = 0;
a2a385d6 1516 bool dup_sack = dup_sack_in;
68f8353b
IJ
1517
1518 if (skb == tcp_send_head(sk))
1519 break;
1520
1521 /* queue is in-order => we can short-circuit the walk early */
1522 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1523 break;
1524
1525 if ((next_dup != NULL) &&
1526 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1527 in_sack = tcp_match_skb_to_sack(sk, skb,
1528 next_dup->start_seq,
1529 next_dup->end_seq);
1530 if (in_sack > 0)
a2a385d6 1531 dup_sack = true;
68f8353b
IJ
1532 }
1533
832d11c5
IJ
1534 /* skb reference here is a bit tricky to get right, since
1535 * shifting can eat and free both this skb and the next,
1536 * so not even _safe variant of the loop is enough.
1537 */
1538 if (in_sack <= 0) {
a1197f5a
IJ
1539 tmp = tcp_shift_skb_data(sk, skb, state,
1540 start_seq, end_seq, dup_sack);
832d11c5
IJ
1541 if (tmp != NULL) {
1542 if (tmp != skb) {
1543 skb = tmp;
1544 continue;
1545 }
1546
1547 in_sack = 0;
1548 } else {
1549 in_sack = tcp_match_skb_to_sack(sk, skb,
1550 start_seq,
1551 end_seq);
1552 }
1553 }
1554
68f8353b
IJ
1555 if (unlikely(in_sack < 0))
1556 break;
1557
832d11c5 1558 if (in_sack) {
cc9a672e
NC
1559 TCP_SKB_CB(skb)->sacked =
1560 tcp_sacktag_one(sk,
1561 state,
1562 TCP_SKB_CB(skb)->sacked,
1563 TCP_SKB_CB(skb)->seq,
1564 TCP_SKB_CB(skb)->end_seq,
1565 dup_sack,
59c9af42 1566 tcp_skb_pcount(skb),
740b0f18 1567 &skb->skb_mstamp);
68f8353b 1568
832d11c5
IJ
1569 if (!before(TCP_SKB_CB(skb)->seq,
1570 tcp_highest_sack_seq(tp)))
1571 tcp_advance_highest_sack(sk, skb);
1572 }
1573
a1197f5a 1574 state->fack_count += tcp_skb_pcount(skb);
68f8353b
IJ
1575 }
1576 return skb;
1577}
1578
1579/* Avoid all extra work that is being done by sacktag while walking in
1580 * a normal way
1581 */
1582static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
a1197f5a
IJ
1583 struct tcp_sacktag_state *state,
1584 u32 skip_to_seq)
68f8353b
IJ
1585{
1586 tcp_for_write_queue_from(skb, sk) {
1587 if (skb == tcp_send_head(sk))
1588 break;
1589
e8bae275 1590 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
68f8353b 1591 break;
d152a7d8 1592
a1197f5a 1593 state->fack_count += tcp_skb_pcount(skb);
68f8353b
IJ
1594 }
1595 return skb;
1596}
1597
1598static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1599 struct sock *sk,
1600 struct tcp_sack_block *next_dup,
a1197f5a
IJ
1601 struct tcp_sacktag_state *state,
1602 u32 skip_to_seq)
68f8353b
IJ
1603{
1604 if (next_dup == NULL)
1605 return skb;
1606
1607 if (before(next_dup->start_seq, skip_to_seq)) {
a1197f5a
IJ
1608 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
1609 skb = tcp_sacktag_walk(skb, sk, NULL, state,
1610 next_dup->start_seq, next_dup->end_seq,
1611 1);
68f8353b
IJ
1612 }
1613
1614 return skb;
1615}
1616
cf533ea5 1617static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
68f8353b
IJ
1618{
1619 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1620}
1621
1da177e4 1622static int
cf533ea5 1623tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
740b0f18 1624 u32 prior_snd_una, long *sack_rtt_us)
1da177e4
LT
1625{
1626 struct tcp_sock *tp = tcp_sk(sk);
cf533ea5
ED
1627 const unsigned char *ptr = (skb_transport_header(ack_skb) +
1628 TCP_SKB_CB(ack_skb)->sacked);
fd6dad61 1629 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
4389dded 1630 struct tcp_sack_block sp[TCP_NUM_SACKS];
68f8353b 1631 struct tcp_sack_block *cache;
a1197f5a 1632 struct tcp_sacktag_state state;
68f8353b 1633 struct sk_buff *skb;
4389dded 1634 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
fd6dad61 1635 int used_sacks;
a2a385d6 1636 bool found_dup_sack = false;
68f8353b 1637 int i, j;
fda03fbb 1638 int first_sack_index;
1da177e4 1639
a1197f5a
IJ
1640 state.flag = 0;
1641 state.reord = tp->packets_out;
740b0f18 1642 state.rtt_us = -1L;
a1197f5a 1643
d738cd8f 1644 if (!tp->sacked_out) {
de83c058
IJ
1645 if (WARN_ON(tp->fackets_out))
1646 tp->fackets_out = 0;
6859d494 1647 tcp_highest_sack_reset(sk);
d738cd8f 1648 }
1da177e4 1649
1ed83465 1650 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
d06e021d
DM
1651 num_sacks, prior_snd_una);
1652 if (found_dup_sack)
a1197f5a 1653 state.flag |= FLAG_DSACKING_ACK;
6f74651a
BE
1654
1655 /* Eliminate too old ACKs, but take into
1656 * account more or less fresh ones, they can
1657 * contain valid SACK info.
1658 */
1659 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1660 return 0;
1661