Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
18 | * Jorge Cwik, <jorge@laser.satlink.net> | |
19 | */ | |
20 | ||
21 | /* | |
22 | * Changes: Pedro Roque : Retransmit queue handled by TCP. | |
23 | * : Fragmentation on mtu decrease | |
24 | * : Segment collapse on retransmit | |
25 | * : AF independence | |
26 | * | |
27 | * Linus Torvalds : send_delayed_ack | |
28 | * David S. Miller : Charge memory using the right skb | |
29 | * during syn/ack processing. | |
30 | * David S. Miller : Output engine completely rewritten. | |
31 | * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. | |
32 | * Cacophonix Gaul : draft-minshall-nagle-01 | |
33 | * J Hadi Salim : ECN support | |
34 | * | |
35 | */ | |
36 | ||
37 | #include <net/tcp.h> | |
38 | ||
39 | #include <linux/compiler.h> | |
40 | #include <linux/module.h> | |
1da177e4 LT |
41 | |
42 | /* People can turn this off for buggy TCP's found in printers etc. */ | |
ab32ea5d | 43 | int sysctl_tcp_retrans_collapse __read_mostly = 1; |
1da177e4 | 44 | |
15d99e02 RJ |
45 | /* People can turn this on to work with those rare, broken TCPs that |
46 | * interpret the window field as a signed quantity. | |
47 | */ | |
ab32ea5d | 48 | int sysctl_tcp_workaround_signed_windows __read_mostly = 0; |
15d99e02 | 49 | |
1da177e4 LT |
50 | /* This limits the percentage of the congestion window which we |
51 | * will allow a single TSO frame to consume. Building TSO frames | |
52 | * which are too large can cause TCP streams to be bursty. | |
53 | */ | |
ab32ea5d | 54 | int sysctl_tcp_tso_win_divisor __read_mostly = 3; |
1da177e4 | 55 | |
ab32ea5d BH |
56 | int sysctl_tcp_mtu_probing __read_mostly = 0; |
57 | int sysctl_tcp_base_mss __read_mostly = 512; | |
5d424d5a | 58 | |
35089bb2 | 59 | /* By default, RFC2861 behavior. */ |
ab32ea5d | 60 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; |
35089bb2 | 61 | |
66f5fe62 | 62 | static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) |
1da177e4 | 63 | { |
9e412ba7 | 64 | struct tcp_sock *tp = tcp_sk(sk); |
66f5fe62 | 65 | unsigned int prior_packets = tp->packets_out; |
9e412ba7 | 66 | |
fe067e8a | 67 | tcp_advance_send_head(sk, skb); |
1da177e4 | 68 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; |
8512430e IJ |
69 | |
70 | /* Don't override Nagle indefinately with F-RTO */ | |
71 | if (tp->frto_counter == 2) | |
72 | tp->frto_counter = 3; | |
66f5fe62 IJ |
73 | |
74 | tp->packets_out += tcp_skb_pcount(skb); | |
75 | if (!prior_packets) | |
76 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | |
77 | inet_csk(sk)->icsk_rto, TCP_RTO_MAX); | |
1da177e4 LT |
78 | } |
79 | ||
80 | /* SND.NXT, if window was not shrunk. | |
81 | * If window has been shrunk, what should we make? It is not clear at all. | |
82 | * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( | |
83 | * Anything in between SND.UNA...SND.UNA+SND.WND also can be already | |
84 | * invalid. OK, let's make this for now: | |
85 | */ | |
9e412ba7 | 86 | static inline __u32 tcp_acceptable_seq(struct sock *sk) |
1da177e4 | 87 | { |
9e412ba7 IJ |
88 | struct tcp_sock *tp = tcp_sk(sk); |
89 | ||
90840def | 90 | if (!before(tcp_wnd_end(tp), tp->snd_nxt)) |
1da177e4 LT |
91 | return tp->snd_nxt; |
92 | else | |
90840def | 93 | return tcp_wnd_end(tp); |
1da177e4 LT |
94 | } |
95 | ||
96 | /* Calculate mss to advertise in SYN segment. | |
97 | * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: | |
98 | * | |
99 | * 1. It is independent of path mtu. | |
100 | * 2. Ideally, it is maximal possible segment size i.e. 65535-40. | |
101 | * 3. For IPv4 it is reasonable to calculate it from maximal MTU of | |
102 | * attached devices, because some buggy hosts are confused by | |
103 | * large MSS. | |
104 | * 4. We do not make 3, we advertise MSS, calculated from first | |
105 | * hop device mtu, but allow to raise it to ip_rt_min_advmss. | |
106 | * This may be overridden via information stored in routing table. | |
107 | * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, | |
108 | * probably even Jumbo". | |
109 | */ | |
110 | static __u16 tcp_advertise_mss(struct sock *sk) | |
111 | { | |
112 | struct tcp_sock *tp = tcp_sk(sk); | |
113 | struct dst_entry *dst = __sk_dst_get(sk); | |
114 | int mss = tp->advmss; | |
115 | ||
116 | if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { | |
117 | mss = dst_metric(dst, RTAX_ADVMSS); | |
118 | tp->advmss = mss; | |
119 | } | |
120 | ||
121 | return (__u16)mss; | |
122 | } | |
123 | ||
124 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". | |
125 | * This is the first part of cwnd validation mechanism. */ | |
463c84b9 | 126 | static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) |
1da177e4 | 127 | { |
463c84b9 | 128 | struct tcp_sock *tp = tcp_sk(sk); |
1da177e4 LT |
129 | s32 delta = tcp_time_stamp - tp->lsndtime; |
130 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); | |
131 | u32 cwnd = tp->snd_cwnd; | |
132 | ||
6687e988 | 133 | tcp_ca_event(sk, CA_EVENT_CWND_RESTART); |
1da177e4 | 134 | |
6687e988 | 135 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
1da177e4 LT |
136 | restart_cwnd = min(restart_cwnd, cwnd); |
137 | ||
463c84b9 | 138 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) |
1da177e4 LT |
139 | cwnd >>= 1; |
140 | tp->snd_cwnd = max(cwnd, restart_cwnd); | |
141 | tp->snd_cwnd_stamp = tcp_time_stamp; | |
142 | tp->snd_cwnd_used = 0; | |
143 | } | |
144 | ||
40efc6fa SH |
145 | static void tcp_event_data_sent(struct tcp_sock *tp, |
146 | struct sk_buff *skb, struct sock *sk) | |
1da177e4 | 147 | { |
463c84b9 ACM |
148 | struct inet_connection_sock *icsk = inet_csk(sk); |
149 | const u32 now = tcp_time_stamp; | |
1da177e4 | 150 | |
35089bb2 DM |
151 | if (sysctl_tcp_slow_start_after_idle && |
152 | (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) | |
463c84b9 | 153 | tcp_cwnd_restart(sk, __sk_dst_get(sk)); |
1da177e4 LT |
154 | |
155 | tp->lsndtime = now; | |
156 | ||
157 | /* If it is a reply for ato after last received | |
158 | * packet, enter pingpong mode. | |
159 | */ | |
463c84b9 ACM |
160 | if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) |
161 | icsk->icsk_ack.pingpong = 1; | |
1da177e4 LT |
162 | } |
163 | ||
40efc6fa | 164 | static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
1da177e4 | 165 | { |
463c84b9 ACM |
166 | tcp_dec_quickack_mode(sk, pkts); |
167 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | |
1da177e4 LT |
168 | } |
169 | ||
170 | /* Determine a window scaling and initial window to offer. | |
171 | * Based on the assumption that the given amount of space | |
172 | * will be offered. Store the results in the tp structure. | |
173 | * NOTE: for smooth operation initial space offering should | |
174 | * be a multiple of mss if possible. We assume here that mss >= 1. | |
175 | * This MUST be enforced by all callers. | |
176 | */ | |
177 | void tcp_select_initial_window(int __space, __u32 mss, | |
178 | __u32 *rcv_wnd, __u32 *window_clamp, | |
179 | int wscale_ok, __u8 *rcv_wscale) | |
180 | { | |
181 | unsigned int space = (__space < 0 ? 0 : __space); | |
182 | ||
183 | /* If no clamp set the clamp to the max possible scaled window */ | |
184 | if (*window_clamp == 0) | |
185 | (*window_clamp) = (65535 << 14); | |
186 | space = min(*window_clamp, space); | |
187 | ||
188 | /* Quantize space offering to a multiple of mss if possible. */ | |
189 | if (space > mss) | |
190 | space = (space / mss) * mss; | |
191 | ||
192 | /* NOTE: offering an initial window larger than 32767 | |
15d99e02 RJ |
193 | * will break some buggy TCP stacks. If the admin tells us |
194 | * it is likely we could be speaking with such a buggy stack | |
195 | * we will truncate our initial window offering to 32K-1 | |
196 | * unless the remote has sent us a window scaling option, | |
197 | * which we interpret as a sign the remote TCP is not | |
198 | * misinterpreting the window field as a signed quantity. | |
1da177e4 | 199 | */ |
15d99e02 RJ |
200 | if (sysctl_tcp_workaround_signed_windows) |
201 | (*rcv_wnd) = min(space, MAX_TCP_WINDOW); | |
202 | else | |
203 | (*rcv_wnd) = space; | |
204 | ||
1da177e4 LT |
205 | (*rcv_wscale) = 0; |
206 | if (wscale_ok) { | |
207 | /* Set window scaling on max possible window | |
e905a9ed | 208 | * See RFC1323 for an explanation of the limit to 14 |
1da177e4 LT |
209 | */ |
210 | space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); | |
316c1592 | 211 | space = min_t(u32, space, *window_clamp); |
1da177e4 LT |
212 | while (space > 65535 && (*rcv_wscale) < 14) { |
213 | space >>= 1; | |
214 | (*rcv_wscale)++; | |
215 | } | |
216 | } | |
217 | ||
218 | /* Set initial window to value enough for senders, | |
6b251858 | 219 | * following RFC2414. Senders, not following this RFC, |
1da177e4 LT |
220 | * will be satisfied with 2. |
221 | */ | |
056834d9 | 222 | if (mss > (1 << *rcv_wscale)) { |
01ff367e | 223 | int init_cwnd = 4; |
056834d9 | 224 | if (mss > 1460 * 3) |
1da177e4 | 225 | init_cwnd = 2; |
01ff367e DM |
226 | else if (mss > 1460) |
227 | init_cwnd = 3; | |
056834d9 IJ |
228 | if (*rcv_wnd > init_cwnd * mss) |
229 | *rcv_wnd = init_cwnd * mss; | |
1da177e4 LT |
230 | } |
231 | ||
232 | /* Set the clamp no higher than max representable value */ | |
233 | (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); | |
234 | } | |
235 | ||
236 | /* Chose a new window to advertise, update state in tcp_sock for the | |
237 | * socket, and return result with RFC1323 scaling applied. The return | |
238 | * value can be stuffed directly into th->window for an outgoing | |
239 | * frame. | |
240 | */ | |
40efc6fa | 241 | static u16 tcp_select_window(struct sock *sk) |
1da177e4 LT |
242 | { |
243 | struct tcp_sock *tp = tcp_sk(sk); | |
244 | u32 cur_win = tcp_receive_window(tp); | |
245 | u32 new_win = __tcp_select_window(sk); | |
246 | ||
247 | /* Never shrink the offered window */ | |
2de979bd | 248 | if (new_win < cur_win) { |
1da177e4 LT |
249 | /* Danger Will Robinson! |
250 | * Don't update rcv_wup/rcv_wnd here or else | |
251 | * we will not be able to advertise a zero | |
252 | * window in time. --DaveM | |
253 | * | |
254 | * Relax Will Robinson. | |
255 | */ | |
607bfbf2 | 256 | new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); |
1da177e4 LT |
257 | } |
258 | tp->rcv_wnd = new_win; | |
259 | tp->rcv_wup = tp->rcv_nxt; | |
260 | ||
261 | /* Make sure we do not exceed the maximum possible | |
262 | * scaled window. | |
263 | */ | |
15d99e02 | 264 | if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) |
1da177e4 LT |
265 | new_win = min(new_win, MAX_TCP_WINDOW); |
266 | else | |
267 | new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); | |
268 | ||
269 | /* RFC1323 scaling applied */ | |
270 | new_win >>= tp->rx_opt.rcv_wscale; | |
271 | ||
272 | /* If we advertise zero window, disable fast path. */ | |
273 | if (new_win == 0) | |
274 | tp->pred_flags = 0; | |
275 | ||
276 | return new_win; | |
277 | } | |
278 | ||
056834d9 | 279 | static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) |
bdf1ee5d IJ |
280 | { |
281 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; | |
056834d9 | 282 | if (!(tp->ecn_flags & TCP_ECN_OK)) |
bdf1ee5d IJ |
283 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; |
284 | } | |
285 | ||
286 | static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) | |
287 | { | |
288 | struct tcp_sock *tp = tcp_sk(sk); | |
289 | ||
290 | tp->ecn_flags = 0; | |
291 | if (sysctl_tcp_ecn) { | |
056834d9 | 292 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; |
bdf1ee5d IJ |
293 | tp->ecn_flags = TCP_ECN_OK; |
294 | } | |
295 | } | |
296 | ||
297 | static __inline__ void | |
298 | TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) | |
299 | { | |
300 | if (inet_rsk(req)->ecn_ok) | |
301 | th->ece = 1; | |
302 | } | |
303 | ||
304 | static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | |
305 | int tcp_header_len) | |
306 | { | |
307 | struct tcp_sock *tp = tcp_sk(sk); | |
308 | ||
309 | if (tp->ecn_flags & TCP_ECN_OK) { | |
310 | /* Not-retransmitted data segment: set ECT and inject CWR. */ | |
311 | if (skb->len != tcp_header_len && | |
312 | !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { | |
313 | INET_ECN_xmit(sk); | |
056834d9 | 314 | if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { |
bdf1ee5d IJ |
315 | tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; |
316 | tcp_hdr(skb)->cwr = 1; | |
317 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | |
318 | } | |
319 | } else { | |
320 | /* ACK or retransmitted segment: clear ECT|CE */ | |
321 | INET_ECN_dontxmit(sk); | |
322 | } | |
323 | if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) | |
324 | tcp_hdr(skb)->ece = 1; | |
325 | } | |
326 | } | |
327 | ||
e870a8ef IJ |
328 | /* Constructs common control bits of non-data skb. If SYN/FIN is present, |
329 | * auto increment end seqno. | |
330 | */ | |
331 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | |
332 | { | |
333 | skb->csum = 0; | |
334 | ||
335 | TCP_SKB_CB(skb)->flags = flags; | |
336 | TCP_SKB_CB(skb)->sacked = 0; | |
337 | ||
338 | skb_shinfo(skb)->gso_segs = 1; | |
339 | skb_shinfo(skb)->gso_size = 0; | |
340 | skb_shinfo(skb)->gso_type = 0; | |
341 | ||
342 | TCP_SKB_CB(skb)->seq = seq; | |
343 | if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) | |
344 | seq++; | |
345 | TCP_SKB_CB(skb)->end_seq = seq; | |
346 | } | |
347 | ||
df7a3b07 | 348 | static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, |
cfb6eeb4 | 349 | __u32 tstamp, __u8 **md5_hash) |
40efc6fa SH |
350 | { |
351 | if (tp->rx_opt.tstamp_ok) { | |
496c98df YH |
352 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
353 | (TCPOPT_NOP << 16) | | |
354 | (TCPOPT_TIMESTAMP << 8) | | |
355 | TCPOLEN_TIMESTAMP); | |
40efc6fa SH |
356 | *ptr++ = htonl(tstamp); |
357 | *ptr++ = htonl(tp->rx_opt.ts_recent); | |
358 | } | |
359 | if (tp->rx_opt.eff_sacks) { | |
360 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; | |
361 | int this_sack; | |
362 | ||
363 | *ptr++ = htonl((TCPOPT_NOP << 24) | | |
364 | (TCPOPT_NOP << 16) | | |
365 | (TCPOPT_SACK << 8) | | |
366 | (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * | |
367 | TCPOLEN_SACK_PERBLOCK))); | |
2de979bd SH |
368 | |
369 | for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { | |
40efc6fa SH |
370 | *ptr++ = htonl(sp[this_sack].start_seq); |
371 | *ptr++ = htonl(sp[this_sack].end_seq); | |
372 | } | |
2de979bd | 373 | |
40efc6fa SH |
374 | if (tp->rx_opt.dsack) { |
375 | tp->rx_opt.dsack = 0; | |
376 | tp->rx_opt.eff_sacks--; | |
377 | } | |
378 | } | |
cfb6eeb4 YH |
379 | #ifdef CONFIG_TCP_MD5SIG |
380 | if (md5_hash) { | |
381 | *ptr++ = htonl((TCPOPT_NOP << 24) | | |
382 | (TCPOPT_NOP << 16) | | |
383 | (TCPOPT_MD5SIG << 8) | | |
384 | TCPOLEN_MD5SIG); | |
385 | *md5_hash = (__u8 *)ptr; | |
386 | } | |
387 | #endif | |
40efc6fa SH |
388 | } |
389 | ||
390 | /* Construct a tcp options header for a SYN or SYN_ACK packet. | |
391 | * If this is every changed make sure to change the definition of | |
392 | * MAX_SYN_SIZE to match the new maximum number of options that you | |
393 | * can generate. | |
cfb6eeb4 YH |
394 | * |
395 | * Note - that with the RFC2385 TCP option, we make room for the | |
396 | * 16 byte MD5 hash. This will be filled in later, so the pointer for the | |
397 | * location to be filled is passed back up. | |
40efc6fa | 398 | */ |
df7a3b07 | 399 | static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, |
40efc6fa | 400 | int offer_wscale, int wscale, __u32 tstamp, |
cfb6eeb4 | 401 | __u32 ts_recent, __u8 **md5_hash) |
40efc6fa SH |
402 | { |
403 | /* We always get an MSS option. | |
404 | * The option bytes which will be seen in normal data | |
405 | * packets should timestamps be used, must be in the MSS | |
406 | * advertised. But we subtract them from tp->mss_cache so | |
407 | * that calculations in tcp_sendmsg are simpler etc. | |
408 | * So account for this fact here if necessary. If we | |
409 | * don't do this correctly, as a receiver we won't | |
410 | * recognize data packets as being full sized when we | |
411 | * should, and thus we won't abide by the delayed ACK | |
412 | * rules correctly. | |
413 | * SACKs don't matter, we never delay an ACK when we | |
414 | * have any of those going out. | |
415 | */ | |
416 | *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); | |
417 | if (ts) { | |
2de979bd | 418 | if (sack) |
496c98df YH |
419 | *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | |
420 | (TCPOLEN_SACK_PERM << 16) | | |
421 | (TCPOPT_TIMESTAMP << 8) | | |
422 | TCPOLEN_TIMESTAMP); | |
40efc6fa | 423 | else |
496c98df YH |
424 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
425 | (TCPOPT_NOP << 16) | | |
426 | (TCPOPT_TIMESTAMP << 8) | | |
427 | TCPOLEN_TIMESTAMP); | |
40efc6fa SH |
428 | *ptr++ = htonl(tstamp); /* TSVAL */ |
429 | *ptr++ = htonl(ts_recent); /* TSECR */ | |
2de979bd | 430 | } else if (sack) |
496c98df YH |
431 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
432 | (TCPOPT_NOP << 16) | | |
433 | (TCPOPT_SACK_PERM << 8) | | |
434 | TCPOLEN_SACK_PERM); | |
40efc6fa | 435 | if (offer_wscale) |
496c98df YH |
436 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
437 | (TCPOPT_WINDOW << 16) | | |
438 | (TCPOLEN_WINDOW << 8) | | |
439 | (wscale)); | |
cfb6eeb4 YH |
440 | #ifdef CONFIG_TCP_MD5SIG |
441 | /* | |
442 | * If MD5 is enabled, then we set the option, and include the size | |
443 | * (always 18). The actual MD5 hash is added just before the | |
444 | * packet is sent. | |
445 | */ | |
446 | if (md5_hash) { | |
447 | *ptr++ = htonl((TCPOPT_NOP << 24) | | |
448 | (TCPOPT_NOP << 16) | | |
449 | (TCPOPT_MD5SIG << 8) | | |
450 | TCPOLEN_MD5SIG); | |
056834d9 | 451 | *md5_hash = (__u8 *)ptr; |
cfb6eeb4 YH |
452 | } |
453 | #endif | |
40efc6fa | 454 | } |
1da177e4 LT |
455 | |
456 | /* This routine actually transmits TCP packets queued in by | |
457 | * tcp_do_sendmsg(). This is used by both the initial | |
458 | * transmission and possible later retransmissions. | |
459 | * All SKB's seen here are completely headerless. It is our | |
460 | * job to build the TCP header, and pass the packet down to | |
461 | * IP so it can do the same plus pass the packet off to the | |
462 | * device. | |
463 | * | |
464 | * We are working here with either a clone of the original | |
465 | * SKB, or a fresh unique copy made by the retransmit engine. | |
466 | */ | |
056834d9 IJ |
467 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
468 | gfp_t gfp_mask) | |
1da177e4 | 469 | { |
dfb4b9dc DM |
470 | const struct inet_connection_sock *icsk = inet_csk(sk); |
471 | struct inet_sock *inet; | |
472 | struct tcp_sock *tp; | |
473 | struct tcp_skb_cb *tcb; | |
474 | int tcp_header_size; | |
cfb6eeb4 YH |
475 | #ifdef CONFIG_TCP_MD5SIG |
476 | struct tcp_md5sig_key *md5; | |
477 | __u8 *md5_hash_location; | |
478 | #endif | |
dfb4b9dc DM |
479 | struct tcphdr *th; |
480 | int sysctl_flags; | |
481 | int err; | |
482 | ||
483 | BUG_ON(!skb || !tcp_skb_pcount(skb)); | |
484 | ||
485 | /* If congestion control is doing timestamping, we must | |
486 | * take such a timestamp before we potentially clone/copy. | |
487 | */ | |
164891aa | 488 | if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) |
dfb4b9dc DM |
489 | __net_timestamp(skb); |
490 | ||
491 | if (likely(clone_it)) { | |
492 | if (unlikely(skb_cloned(skb))) | |
493 | skb = pskb_copy(skb, gfp_mask); | |
494 | else | |
495 | skb = skb_clone(skb, gfp_mask); | |
496 | if (unlikely(!skb)) | |
497 | return -ENOBUFS; | |
498 | } | |
1da177e4 | 499 | |
dfb4b9dc DM |
500 | inet = inet_sk(sk); |
501 | tp = tcp_sk(sk); | |
502 | tcb = TCP_SKB_CB(skb); | |
503 | tcp_header_size = tp->tcp_header_len; | |
1da177e4 LT |
504 | |
505 | #define SYSCTL_FLAG_TSTAMPS 0x1 | |
506 | #define SYSCTL_FLAG_WSCALE 0x2 | |
507 | #define SYSCTL_FLAG_SACK 0x4 | |
508 | ||
dfb4b9dc DM |
509 | sysctl_flags = 0; |
510 | if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { | |
511 | tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; | |
2de979bd | 512 | if (sysctl_tcp_timestamps) { |
dfb4b9dc DM |
513 | tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; |
514 | sysctl_flags |= SYSCTL_FLAG_TSTAMPS; | |
1da177e4 | 515 | } |
dfb4b9dc DM |
516 | if (sysctl_tcp_window_scaling) { |
517 | tcp_header_size += TCPOLEN_WSCALE_ALIGNED; | |
518 | sysctl_flags |= SYSCTL_FLAG_WSCALE; | |
1da177e4 | 519 | } |
dfb4b9dc DM |
520 | if (sysctl_tcp_sack) { |
521 | sysctl_flags |= SYSCTL_FLAG_SACK; | |
522 | if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) | |
523 | tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; | |
1da177e4 | 524 | } |
dfb4b9dc DM |
525 | } else if (unlikely(tp->rx_opt.eff_sacks)) { |
526 | /* A SACK is 2 pad bytes, a 2 byte header, plus | |
527 | * 2 32-bit sequence numbers for each SACK block. | |
528 | */ | |
529 | tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + | |
530 | (tp->rx_opt.eff_sacks * | |
531 | TCPOLEN_SACK_PERBLOCK)); | |
532 | } | |
e905a9ed | 533 | |
dfb4b9dc DM |
534 | if (tcp_packets_in_flight(tp) == 0) |
535 | tcp_ca_event(sk, CA_EVENT_TX_START); | |
536 | ||
cfb6eeb4 YH |
537 | #ifdef CONFIG_TCP_MD5SIG |
538 | /* | |
539 | * Are we doing MD5 on this segment? If so - make | |
540 | * room for it. | |
541 | */ | |
542 | md5 = tp->af_specific->md5_lookup(sk, sk); | |
49a72dfb | 543 | if (md5) { |
cfb6eeb4 | 544 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; |
49a72dfb AL |
545 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
546 | } | |
cfb6eeb4 YH |
547 | #endif |
548 | ||
aa8223c7 ACM |
549 | skb_push(skb, tcp_header_size); |
550 | skb_reset_transport_header(skb); | |
e89862f4 | 551 | skb_set_owner_w(skb, sk); |
dfb4b9dc DM |
552 | |
553 | /* Build TCP header and checksum it. */ | |
aa8223c7 | 554 | th = tcp_hdr(skb); |
dfb4b9dc DM |
555 | th->source = inet->sport; |
556 | th->dest = inet->dport; | |
557 | th->seq = htonl(tcb->seq); | |
558 | th->ack_seq = htonl(tp->rcv_nxt); | |
df7a3b07 | 559 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
dfb4b9dc DM |
560 | tcb->flags); |
561 | ||
562 | if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { | |
563 | /* RFC1323: The window in SYN & SYN/ACK segments | |
564 | * is never scaled. | |
565 | */ | |
600ff0c2 | 566 | th->window = htons(min(tp->rcv_wnd, 65535U)); |
dfb4b9dc DM |
567 | } else { |
568 | th->window = htons(tcp_select_window(sk)); | |
569 | } | |
570 | th->check = 0; | |
571 | th->urg_ptr = 0; | |
1da177e4 | 572 | |
dfb4b9dc | 573 | if (unlikely(tp->urg_mode && |
056834d9 IJ |
574 | between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { |
575 | th->urg_ptr = htons(tp->snd_up - tcb->seq); | |
dfb4b9dc DM |
576 | th->urg = 1; |
577 | } | |
1da177e4 | 578 | |
dfb4b9dc | 579 | if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { |
df7a3b07 | 580 | tcp_syn_build_options((__be32 *)(th + 1), |
dfb4b9dc DM |
581 | tcp_advertise_mss(sk), |
582 | (sysctl_flags & SYSCTL_FLAG_TSTAMPS), | |
583 | (sysctl_flags & SYSCTL_FLAG_SACK), | |
584 | (sysctl_flags & SYSCTL_FLAG_WSCALE), | |
585 | tp->rx_opt.rcv_wscale, | |
586 | tcb->when, | |
cfb6eeb4 YH |
587 | tp->rx_opt.ts_recent, |
588 | ||
589 | #ifdef CONFIG_TCP_MD5SIG | |
590 | md5 ? &md5_hash_location : | |
591 | #endif | |
592 | NULL); | |
dfb4b9dc | 593 | } else { |
df7a3b07 | 594 | tcp_build_and_update_options((__be32 *)(th + 1), |
cfb6eeb4 YH |
595 | tp, tcb->when, |
596 | #ifdef CONFIG_TCP_MD5SIG | |
597 | md5 ? &md5_hash_location : | |
598 | #endif | |
599 | NULL); | |
9e412ba7 | 600 | TCP_ECN_send(sk, skb, tcp_header_size); |
dfb4b9dc | 601 | } |
1da177e4 | 602 | |
cfb6eeb4 YH |
603 | #ifdef CONFIG_TCP_MD5SIG |
604 | /* Calculate the MD5 hash, as we have all we need now */ | |
605 | if (md5) { | |
606 | tp->af_specific->calc_md5_hash(md5_hash_location, | |
49a72dfb | 607 | md5, sk, NULL, skb); |
cfb6eeb4 YH |
608 | } |
609 | #endif | |
610 | ||
8292a17a | 611 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); |
1da177e4 | 612 | |
dfb4b9dc DM |
613 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) |
614 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); | |
1da177e4 | 615 | |
dfb4b9dc DM |
616 | if (skb->len != tcp_header_size) |
617 | tcp_event_data_sent(tp, skb, sk); | |
1da177e4 | 618 | |
bd37a088 | 619 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |
81cc8a75 | 620 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); |
1da177e4 | 621 | |
e89862f4 | 622 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); |
83de47cd | 623 | if (likely(err <= 0)) |
dfb4b9dc DM |
624 | return err; |
625 | ||
3cfe3baa | 626 | tcp_enter_cwr(sk, 1); |
dfb4b9dc | 627 | |
b9df3cb8 | 628 | return net_xmit_eval(err); |
1da177e4 | 629 | |
1da177e4 LT |
630 | #undef SYSCTL_FLAG_TSTAMPS |
631 | #undef SYSCTL_FLAG_WSCALE | |
632 | #undef SYSCTL_FLAG_SACK | |
633 | } | |
634 | ||
e905a9ed | 635 | /* This routine just queue's the buffer |
1da177e4 LT |
636 | * |
637 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | |
638 | * otherwise socket can stall. | |
639 | */ | |
640 | static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |
641 | { | |
642 | struct tcp_sock *tp = tcp_sk(sk); | |
643 | ||
644 | /* Advance write_seq and place onto the write_queue. */ | |
645 | tp->write_seq = TCP_SKB_CB(skb)->end_seq; | |
646 | skb_header_release(skb); | |
fe067e8a | 647 | tcp_add_write_queue_tail(sk, skb); |
3ab224be HA |
648 | sk->sk_wmem_queued += skb->truesize; |
649 | sk_mem_charge(sk, skb->truesize); | |
1da177e4 LT |
650 | } |
651 | ||
056834d9 IJ |
652 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, |
653 | unsigned int mss_now) | |
f6302d1d | 654 | { |
bcd76111 | 655 | if (skb->len <= mss_now || !sk_can_gso(sk)) { |
f6302d1d DM |
656 | /* Avoid the costly divide in the normal |
657 | * non-TSO case. | |
658 | */ | |
7967168c HX |
659 | skb_shinfo(skb)->gso_segs = 1; |
660 | skb_shinfo(skb)->gso_size = 0; | |
661 | skb_shinfo(skb)->gso_type = 0; | |
f6302d1d | 662 | } else { |
356f89e1 | 663 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); |
7967168c | 664 | skb_shinfo(skb)->gso_size = mss_now; |
bcd76111 | 665 | skb_shinfo(skb)->gso_type = sk->sk_gso_type; |
1da177e4 LT |
666 | } |
667 | } | |
668 | ||
91fed7a1 | 669 | /* When a modification to fackets out becomes necessary, we need to check |
68f8353b | 670 | * skb is counted to fackets_out or not. |
91fed7a1 | 671 | */ |
a47e5a98 | 672 | static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, |
91fed7a1 IJ |
673 | int decr) |
674 | { | |
a47e5a98 IJ |
675 | struct tcp_sock *tp = tcp_sk(sk); |
676 | ||
dc86967b | 677 | if (!tp->sacked_out || tcp_is_reno(tp)) |
91fed7a1 IJ |
678 | return; |
679 | ||
6859d494 | 680 | if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) |
91fed7a1 | 681 | tp->fackets_out -= decr; |
91fed7a1 IJ |
682 | } |
683 | ||
1da177e4 LT |
684 | /* Function to create two new TCP segments. Shrinks the given segment |
685 | * to the specified size and appends a new segment with the rest of the | |
e905a9ed | 686 | * packet to the list. This won't be called frequently, I hope. |
1da177e4 LT |
687 | * Remember, these are still headerless SKBs at this point. |
688 | */ | |
056834d9 IJ |
689 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, |
690 | unsigned int mss_now) | |
1da177e4 LT |
691 | { |
692 | struct tcp_sock *tp = tcp_sk(sk); | |
693 | struct sk_buff *buff; | |
6475be16 | 694 | int nsize, old_factor; |
b60b49ea | 695 | int nlen; |
1da177e4 LT |
696 | u16 flags; |
697 | ||
b2cc99f0 | 698 | BUG_ON(len > skb->len); |
6a438bbe | 699 | |
b7689205 | 700 | tcp_clear_retrans_hints_partial(tp); |
1da177e4 LT |
701 | nsize = skb_headlen(skb) - len; |
702 | if (nsize < 0) | |
703 | nsize = 0; | |
704 | ||
705 | if (skb_cloned(skb) && | |
706 | skb_is_nonlinear(skb) && | |
707 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | |
708 | return -ENOMEM; | |
709 | ||
710 | /* Get a new skb... force flag on. */ | |
711 | buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); | |
712 | if (buff == NULL) | |
713 | return -ENOMEM; /* We'll just try again later. */ | |
ef5cb973 | 714 | |
3ab224be HA |
715 | sk->sk_wmem_queued += buff->truesize; |
716 | sk_mem_charge(sk, buff->truesize); | |
b60b49ea HX |
717 | nlen = skb->len - len - nsize; |
718 | buff->truesize += nlen; | |
719 | skb->truesize -= nlen; | |
1da177e4 LT |
720 | |
721 | /* Correct the sequence numbers. */ | |
722 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; | |
723 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; | |
724 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; | |
725 | ||
726 | /* PSH and FIN should only be set in the second packet. */ | |
727 | flags = TCP_SKB_CB(skb)->flags; | |
056834d9 | 728 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); |
1da177e4 | 729 | TCP_SKB_CB(buff)->flags = flags; |
e14c3caf | 730 | TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; |
1da177e4 | 731 | |
84fa7933 | 732 | if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { |
1da177e4 | 733 | /* Copy and checksum data tail into the new buffer. */ |
056834d9 IJ |
734 | buff->csum = csum_partial_copy_nocheck(skb->data + len, |
735 | skb_put(buff, nsize), | |
1da177e4 LT |
736 | nsize, 0); |
737 | ||
738 | skb_trim(skb, len); | |
739 | ||
740 | skb->csum = csum_block_sub(skb->csum, buff->csum, len); | |
741 | } else { | |
84fa7933 | 742 | skb->ip_summed = CHECKSUM_PARTIAL; |
1da177e4 LT |
743 | skb_split(skb, buff, len); |
744 | } | |
745 | ||
746 | buff->ip_summed = skb->ip_summed; | |
747 | ||
748 | /* Looks stupid, but our code really uses when of | |
749 | * skbs, which it never sent before. --ANK | |
750 | */ | |
751 | TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; | |
a61bbcf2 | 752 | buff->tstamp = skb->tstamp; |
1da177e4 | 753 | |
6475be16 DM |
754 | old_factor = tcp_skb_pcount(skb); |
755 | ||
1da177e4 | 756 | /* Fix up tso_factor for both original and new SKB. */ |
846998ae DM |
757 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
758 | tcp_set_skb_tso_segs(sk, buff, mss_now); | |
1da177e4 | 759 | |
6475be16 DM |
760 | /* If this packet has been sent out already, we must |
761 | * adjust the various packet counters. | |
762 | */ | |
cf0b450c | 763 | if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { |
6475be16 DM |
764 | int diff = old_factor - tcp_skb_pcount(skb) - |
765 | tcp_skb_pcount(buff); | |
1da177e4 | 766 | |
6475be16 | 767 | tp->packets_out -= diff; |
e14c3caf HX |
768 | |
769 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) | |
770 | tp->sacked_out -= diff; | |
771 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) | |
772 | tp->retrans_out -= diff; | |
773 | ||
b5860bba | 774 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) |
6475be16 | 775 | tp->lost_out -= diff; |
83ca28be | 776 | |
91fed7a1 IJ |
777 | /* Adjust Reno SACK estimate. */ |
778 | if (tcp_is_reno(tp) && diff > 0) { | |
779 | tcp_dec_pcount_approx_int(&tp->sacked_out, diff); | |
780 | tcp_verify_left_out(tp); | |
6475be16 | 781 | } |
a47e5a98 | 782 | tcp_adjust_fackets_out(sk, skb, diff); |
1da177e4 LT |
783 | } |
784 | ||
785 | /* Link BUFF into the send queue. */ | |
f44b5271 | 786 | skb_header_release(buff); |
fe067e8a | 787 | tcp_insert_write_queue_after(skb, buff, sk); |
1da177e4 LT |
788 | |
789 | return 0; | |
790 | } | |
791 | ||
792 | /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c | |
793 | * eventually). The difference is that pulled data not copied, but | |
794 | * immediately discarded. | |
795 | */ | |
f2911969 | 796 | static void __pskb_trim_head(struct sk_buff *skb, int len) |
1da177e4 LT |
797 | { |
798 | int i, k, eat; | |
799 | ||
800 | eat = len; | |
801 | k = 0; | |
056834d9 | 802 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1da177e4 LT |
803 | if (skb_shinfo(skb)->frags[i].size <= eat) { |
804 | put_page(skb_shinfo(skb)->frags[i].page); | |
805 | eat -= skb_shinfo(skb)->frags[i].size; | |
806 | } else { | |
807 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; | |
808 | if (eat) { | |
809 | skb_shinfo(skb)->frags[k].page_offset += eat; | |
810 | skb_shinfo(skb)->frags[k].size -= eat; | |
811 | eat = 0; | |
812 | } | |
813 | k++; | |
814 | } | |
815 | } | |
816 | skb_shinfo(skb)->nr_frags = k; | |
817 | ||
27a884dc | 818 | skb_reset_tail_pointer(skb); |
1da177e4 LT |
819 | skb->data_len -= len; |
820 | skb->len = skb->data_len; | |
1da177e4 LT |
821 | } |
822 | ||
823 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |
824 | { | |
056834d9 | 825 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
1da177e4 LT |
826 | return -ENOMEM; |
827 | ||
f2911969 HXP |
828 | /* If len == headlen, we avoid __skb_pull to preserve alignment. */ |
829 | if (unlikely(len < skb_headlen(skb))) | |
1da177e4 | 830 | __skb_pull(skb, len); |
f2911969 HXP |
831 | else |
832 | __pskb_trim_head(skb, len - skb_headlen(skb)); | |
1da177e4 LT |
833 | |
834 | TCP_SKB_CB(skb)->seq += len; | |
84fa7933 | 835 | skb->ip_summed = CHECKSUM_PARTIAL; |
1da177e4 LT |
836 | |
837 | skb->truesize -= len; | |
838 | sk->sk_wmem_queued -= len; | |
3ab224be | 839 | sk_mem_uncharge(sk, len); |
1da177e4 LT |
840 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); |
841 | ||
842 | /* Any change of skb->len requires recalculation of tso | |
843 | * factor and mss. | |
844 | */ | |
845 | if (tcp_skb_pcount(skb) > 1) | |
846998ae | 846 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); |
1da177e4 LT |
847 | |
848 | return 0; | |
849 | } | |
850 | ||
5d424d5a JH |
851 | /* Not accounting for SACKs here. */ |
852 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | |
853 | { | |
854 | struct tcp_sock *tp = tcp_sk(sk); | |
855 | struct inet_connection_sock *icsk = inet_csk(sk); | |
856 | int mss_now; | |
857 | ||
858 | /* Calculate base mss without TCP options: | |
859 | It is MMS_S - sizeof(tcphdr) of rfc1122 | |
860 | */ | |
861 | mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); | |
862 | ||
863 | /* Clamp it (mss_clamp does not include tcp options) */ | |
864 | if (mss_now > tp->rx_opt.mss_clamp) | |
865 | mss_now = tp->rx_opt.mss_clamp; | |
866 | ||
867 | /* Now subtract optional transport overhead */ | |
868 | mss_now -= icsk->icsk_ext_hdr_len; | |
869 | ||
870 | /* Then reserve room for full set of TCP options and 8 bytes of data */ | |
871 | if (mss_now < 48) | |
872 | mss_now = 48; | |
873 | ||
874 | /* Now subtract TCP options size, not including SACKs */ | |
875 | mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); | |
876 | ||
877 | return mss_now; | |
878 | } | |
879 | ||
880 | /* Inverse of above */ | |
881 | int tcp_mss_to_mtu(struct sock *sk, int mss) | |
882 | { | |
883 | struct tcp_sock *tp = tcp_sk(sk); | |
884 | struct inet_connection_sock *icsk = inet_csk(sk); | |
885 | int mtu; | |
886 | ||
887 | mtu = mss + | |
888 | tp->tcp_header_len + | |
889 | icsk->icsk_ext_hdr_len + | |
890 | icsk->icsk_af_ops->net_header_len; | |
891 | ||
892 | return mtu; | |
893 | } | |
894 | ||
895 | void tcp_mtup_init(struct sock *sk) | |
896 | { | |
897 | struct tcp_sock *tp = tcp_sk(sk); | |
898 | struct inet_connection_sock *icsk = inet_csk(sk); | |
899 | ||
900 | icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; | |
901 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + | |
e905a9ed | 902 | icsk->icsk_af_ops->net_header_len; |
5d424d5a JH |
903 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); |
904 | icsk->icsk_mtup.probe_size = 0; | |
905 | } | |
906 | ||
409d22b4 IJ |
907 | /* Bound MSS / TSO packet size with the half of the window */ |
908 | static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | |
909 | { | |
910 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | |
911 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | |
912 | else | |
913 | return pktsize; | |
914 | } | |
915 | ||
1da177e4 LT |
916 | /* This function synchronize snd mss to current pmtu/exthdr set. |
917 | ||
918 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts | |
919 | for TCP options, but includes only bare TCP header. | |
920 | ||
921 | tp->rx_opt.mss_clamp is mss negotiated at connection setup. | |
caa20d9a | 922 | It is minimum of user_mss and mss received with SYN. |
1da177e4 LT |
923 | It also does not include TCP options. |
924 | ||
d83d8461 | 925 | inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. |
1da177e4 LT |
926 | |
927 | tp->mss_cache is current effective sending mss, including | |
928 | all tcp options except for SACKs. It is evaluated, | |
929 | taking into account current pmtu, but never exceeds | |
930 | tp->rx_opt.mss_clamp. | |
931 | ||
932 | NOTE1. rfc1122 clearly states that advertised MSS | |
933 | DOES NOT include either tcp or ip options. | |
934 | ||
d83d8461 ACM |
935 | NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache |
936 | are READ ONLY outside this function. --ANK (980731) | |
1da177e4 | 937 | */ |
1da177e4 LT |
938 | unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) |
939 | { | |
940 | struct tcp_sock *tp = tcp_sk(sk); | |
d83d8461 | 941 | struct inet_connection_sock *icsk = inet_csk(sk); |
5d424d5a | 942 | int mss_now; |
1da177e4 | 943 | |
5d424d5a JH |
944 | if (icsk->icsk_mtup.search_high > pmtu) |
945 | icsk->icsk_mtup.search_high = pmtu; | |
1da177e4 | 946 | |
5d424d5a | 947 | mss_now = tcp_mtu_to_mss(sk, pmtu); |
409d22b4 | 948 | mss_now = tcp_bound_to_half_wnd(tp, mss_now); |
1da177e4 LT |
949 | |
950 | /* And store cached results */ | |
d83d8461 | 951 | icsk->icsk_pmtu_cookie = pmtu; |
5d424d5a JH |
952 | if (icsk->icsk_mtup.enabled) |
953 | mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); | |
c1b4a7e6 | 954 | tp->mss_cache = mss_now; |
1da177e4 LT |
955 | |
956 | return mss_now; | |
957 | } | |
958 | ||
959 | /* Compute the current effective MSS, taking SACKs and IP options, | |
960 | * and even PMTU discovery events into account. | |
961 | * | |
962 | * LARGESEND note: !urg_mode is overkill, only frames up to snd_up | |
963 | * cannot be large. However, taking into account rare use of URG, this | |
964 | * is not a big flaw. | |
965 | */ | |
c1b4a7e6 | 966 | unsigned int tcp_current_mss(struct sock *sk, int large_allowed) |
1da177e4 LT |
967 | { |
968 | struct tcp_sock *tp = tcp_sk(sk); | |
969 | struct dst_entry *dst = __sk_dst_get(sk); | |
c1b4a7e6 DM |
970 | u32 mss_now; |
971 | u16 xmit_size_goal; | |
972 | int doing_tso = 0; | |
973 | ||
974 | mss_now = tp->mss_cache; | |
975 | ||
bcd76111 | 976 | if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) |
c1b4a7e6 | 977 | doing_tso = 1; |
1da177e4 | 978 | |
1da177e4 LT |
979 | if (dst) { |
980 | u32 mtu = dst_mtu(dst); | |
d83d8461 | 981 | if (mtu != inet_csk(sk)->icsk_pmtu_cookie) |
1da177e4 LT |
982 | mss_now = tcp_sync_mss(sk, mtu); |
983 | } | |
984 | ||
c1b4a7e6 DM |
985 | if (tp->rx_opt.eff_sacks) |
986 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + | |
987 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); | |
1da177e4 | 988 | |
cfb6eeb4 YH |
989 | #ifdef CONFIG_TCP_MD5SIG |
990 | if (tp->af_specific->md5_lookup(sk, sk)) | |
991 | mss_now -= TCPOLEN_MD5SIG_ALIGNED; | |
992 | #endif | |
993 | ||
c1b4a7e6 | 994 | xmit_size_goal = mss_now; |
1da177e4 | 995 | |
c1b4a7e6 | 996 | if (doing_tso) { |
82cc1a7a | 997 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - |
8292a17a | 998 | inet_csk(sk)->icsk_af_ops->net_header_len - |
d83d8461 ACM |
999 | inet_csk(sk)->icsk_ext_hdr_len - |
1000 | tp->tcp_header_len); | |
1da177e4 | 1001 | |
409d22b4 | 1002 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); |
c1b4a7e6 | 1003 | xmit_size_goal -= (xmit_size_goal % mss_now); |
1da177e4 | 1004 | } |
c1b4a7e6 | 1005 | tp->xmit_size_goal = xmit_size_goal; |
1da177e4 | 1006 | |
1da177e4 LT |
1007 | return mss_now; |
1008 | } | |
1009 | ||
a762a980 | 1010 | /* Congestion window validation. (RFC2861) */ |
9e412ba7 | 1011 | static void tcp_cwnd_validate(struct sock *sk) |
a762a980 | 1012 | { |
9e412ba7 | 1013 | struct tcp_sock *tp = tcp_sk(sk); |
a762a980 | 1014 | |
d436d686 | 1015 | if (tp->packets_out >= tp->snd_cwnd) { |
a762a980 DM |
1016 | /* Network is feed fully. */ |
1017 | tp->snd_cwnd_used = 0; | |
1018 | tp->snd_cwnd_stamp = tcp_time_stamp; | |
1019 | } else { | |
1020 | /* Network starves. */ | |
1021 | if (tp->packets_out > tp->snd_cwnd_used) | |
1022 | tp->snd_cwnd_used = tp->packets_out; | |
1023 | ||
15d33c07 DM |
1024 | if (sysctl_tcp_slow_start_after_idle && |
1025 | (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) | |
a762a980 DM |
1026 | tcp_cwnd_application_limited(sk); |
1027 | } | |
1028 | } | |
1029 | ||
0e3a4803 IJ |
1030 | /* Returns the portion of skb which can be sent right away without |
1031 | * introducing MSS oddities to segment boundaries. In rare cases where | |
1032 | * mss_now != mss_cache, we will request caller to create a small skb | |
1033 | * per input skb which could be mostly avoided here (if desired). | |
5ea3a748 IJ |
1034 | * |
1035 | * We explicitly want to create a request for splitting write queue tail | |
1036 | * to a small skb for Nagle purposes while avoiding unnecessary modulos, | |
1037 | * thus all the complexity (cwnd_len is always MSS multiple which we | |
1038 | * return whenever allowed by the other factors). Basically we need the | |
1039 | * modulo only when the receiver window alone is the limiting factor or | |
1040 | * when we would be allowed to send the split-due-to-Nagle skb fully. | |
0e3a4803 IJ |
1041 | */ |
1042 | static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, | |
056834d9 | 1043 | unsigned int mss_now, unsigned int cwnd) |
c1b4a7e6 | 1044 | { |
0e3a4803 IJ |
1045 | struct tcp_sock *tp = tcp_sk(sk); |
1046 | u32 needed, window, cwnd_len; | |
c1b4a7e6 | 1047 | |
90840def | 1048 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
c1b4a7e6 | 1049 | cwnd_len = mss_now * cwnd; |
0e3a4803 IJ |
1050 | |
1051 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) | |
1052 | return cwnd_len; | |
1053 | ||
5ea3a748 IJ |
1054 | needed = min(skb->len, window); |
1055 | ||
17515408 | 1056 | if (cwnd_len <= needed) |
0e3a4803 IJ |
1057 | return cwnd_len; |
1058 | ||
0e3a4803 | 1059 | return needed - needed % mss_now; |
c1b4a7e6 DM |
1060 | } |
1061 | ||
1062 | /* Can at least one segment of SKB be sent right now, according to the | |
1063 | * congestion window rules? If so, return how many segments are allowed. | |
1064 | */ | |
056834d9 IJ |
1065 | static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, |
1066 | struct sk_buff *skb) | |
c1b4a7e6 DM |
1067 | { |
1068 | u32 in_flight, cwnd; | |
1069 | ||
1070 | /* Don't be strict about the congestion window for the final FIN. */ | |
104439a8 JH |
1071 | if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && |
1072 | tcp_skb_pcount(skb) == 1) | |
c1b4a7e6 DM |
1073 | return 1; |
1074 | ||
1075 | in_flight = tcp_packets_in_flight(tp); | |
1076 | cwnd = tp->snd_cwnd; | |
1077 | if (in_flight < cwnd) | |
1078 | return (cwnd - in_flight); | |
1079 | ||
1080 | return 0; | |
1081 | } | |
1082 | ||
1083 | /* This must be invoked the first time we consider transmitting | |
1084 | * SKB onto the wire. | |
1085 | */ | |
056834d9 IJ |
1086 | static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, |
1087 | unsigned int mss_now) | |
c1b4a7e6 DM |
1088 | { |
1089 | int tso_segs = tcp_skb_pcount(skb); | |
1090 | ||
056834d9 | 1091 | if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { |
846998ae | 1092 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
c1b4a7e6 DM |
1093 | tso_segs = tcp_skb_pcount(skb); |
1094 | } | |
1095 | return tso_segs; | |
1096 | } | |
1097 | ||
1098 | static inline int tcp_minshall_check(const struct tcp_sock *tp) | |
1099 | { | |
1100 | return after(tp->snd_sml,tp->snd_una) && | |
1101 | !after(tp->snd_sml, tp->snd_nxt); | |
1102 | } | |
1103 | ||
1104 | /* Return 0, if packet can be sent now without violation Nagle's rules: | |
1105 | * 1. It is full sized. | |
1106 | * 2. Or it contains FIN. (already checked by caller) | |
1107 | * 3. Or TCP_NODELAY was set. | |
1108 | * 4. Or TCP_CORK is not set, and all sent packets are ACKed. | |
1109 | * With Minshall's modification: all sent small packets are ACKed. | |
1110 | */ | |
c1b4a7e6 | 1111 | static inline int tcp_nagle_check(const struct tcp_sock *tp, |
e905a9ed | 1112 | const struct sk_buff *skb, |
c1b4a7e6 DM |
1113 | unsigned mss_now, int nonagle) |
1114 | { | |
1115 | return (skb->len < mss_now && | |
056834d9 IJ |
1116 | ((nonagle & TCP_NAGLE_CORK) || |
1117 | (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); | |
c1b4a7e6 DM |
1118 | } |
1119 | ||
1120 | /* Return non-zero if the Nagle test allows this packet to be | |
1121 | * sent now. | |
1122 | */ | |
1123 | static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, | |
1124 | unsigned int cur_mss, int nonagle) | |
1125 | { | |
1126 | /* Nagle rule does not apply to frames, which sit in the middle of the | |
1127 | * write_queue (they have no chances to get new data). | |
1128 | * | |
1129 | * This is implemented in the callers, where they modify the 'nonagle' | |
1130 | * argument based upon the location of SKB in the send queue. | |
1131 | */ | |
1132 | if (nonagle & TCP_NAGLE_PUSH) | |
1133 | return 1; | |
1134 | ||
d551e454 IJ |
1135 | /* Don't use the nagle rule for urgent data (or for the final FIN). |
1136 | * Nagle can be ignored during F-RTO too (see RFC4138). | |
1137 | */ | |
1138 | if (tp->urg_mode || (tp->frto_counter == 2) || | |
c1b4a7e6 DM |
1139 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) |
1140 | return 1; | |
1141 | ||
1142 | if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) | |
1143 | return 1; | |
1144 | ||
1145 | return 0; | |
1146 | } | |
1147 | ||
1148 | /* Does at least the first segment of SKB fit into the send window? */ | |
056834d9 IJ |
1149 | static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, |
1150 | unsigned int cur_mss) | |
c1b4a7e6 DM |
1151 | { |
1152 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | |
1153 | ||
1154 | if (skb->len > cur_mss) | |
1155 | end_seq = TCP_SKB_CB(skb)->seq + cur_mss; | |
1156 | ||
90840def | 1157 | return !after(end_seq, tcp_wnd_end(tp)); |
c1b4a7e6 DM |
1158 | } |
1159 | ||
fe067e8a | 1160 | /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) |
c1b4a7e6 DM |
1161 | * should be put on the wire right now. If so, it returns the number of |
1162 | * packets allowed by the congestion window. | |
1163 | */ | |
1164 | static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |
1165 | unsigned int cur_mss, int nonagle) | |
1166 | { | |
1167 | struct tcp_sock *tp = tcp_sk(sk); | |
1168 | unsigned int cwnd_quota; | |
1169 | ||
846998ae | 1170 | tcp_init_tso_segs(sk, skb, cur_mss); |
c1b4a7e6 DM |
1171 | |
1172 | if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) | |
1173 | return 0; | |
1174 | ||
1175 | cwnd_quota = tcp_cwnd_test(tp, skb); | |
056834d9 | 1176 | if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) |
c1b4a7e6 DM |
1177 | cwnd_quota = 0; |
1178 | ||
1179 | return cwnd_quota; | |
1180 | } | |
1181 | ||
9e412ba7 | 1182 | int tcp_may_send_now(struct sock *sk) |
c1b4a7e6 | 1183 | { |
9e412ba7 | 1184 | struct tcp_sock *tp = tcp_sk(sk); |
fe067e8a | 1185 | struct sk_buff *skb = tcp_send_head(sk); |
c1b4a7e6 DM |
1186 | |
1187 | return (skb && | |
1188 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), | |
1189 | (tcp_skb_is_last(sk, skb) ? |