2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
55 * Alan Cox : Tidied tcp_data to avoid a potential
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
213 * Description of States:
215 * TCP_SYN_SENT sent a connection request, waiting for ack
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
220 * TCP_ESTABLISHED connection established
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
245 * TCP_CLOSE socket is finished
248 #define pr_fmt(fmt) "TCP: " fmt
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/fs.h>
257 #include <linux/skbuff.h>
258 #include <linux/scatterlist.h>
259 #include <linux/splice.h>
260 #include <linux/net.h>
261 #include <linux/socket.h>
262 #include <linux/random.h>
263 #include <linux/bootmem.h>
264 #include <linux/highmem.h>
265 #include <linux/swap.h>
266 #include <linux/cache.h>
267 #include <linux/err.h>
268 #include <linux/crypto.h>
269 #include <linux/time.h>
270 #include <linux/slab.h>
272 #include <net/icmp.h>
273 #include <net/inet_common.h>
275 #include <net/xfrm.h>
277 #include <net/netdma.h>
278 #include <net/sock.h>
280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h>
282 #include <net/busy_poll.h>
284 int sysctl_tcp_fin_timeout __read_mostly
= TCP_FIN_TIMEOUT
;
286 int sysctl_tcp_min_tso_segs __read_mostly
= 2;
288 int sysctl_tcp_autocorking __read_mostly
= 1;
290 struct percpu_counter tcp_orphan_count
;
291 EXPORT_SYMBOL_GPL(tcp_orphan_count
);
293 long sysctl_tcp_mem
[3] __read_mostly
;
294 int sysctl_tcp_wmem
[3] __read_mostly
;
295 int sysctl_tcp_rmem
[3] __read_mostly
;
297 EXPORT_SYMBOL(sysctl_tcp_mem
);
298 EXPORT_SYMBOL(sysctl_tcp_rmem
);
299 EXPORT_SYMBOL(sysctl_tcp_wmem
);
301 atomic_long_t tcp_memory_allocated
; /* Current allocated memory. */
302 EXPORT_SYMBOL(tcp_memory_allocated
);
305 * Current number of TCP sockets.
307 struct percpu_counter tcp_sockets_allocated
;
308 EXPORT_SYMBOL(tcp_sockets_allocated
);
313 struct tcp_splice_state
{
314 struct pipe_inode_info
*pipe
;
320 * Pressure flag: try to collapse.
321 * Technical note: it is used by multiple contexts non atomically.
322 * All the __sk_mem_schedule() is of this nature: accounting
323 * is strict, actions are advisory and have some latency.
325 int tcp_memory_pressure __read_mostly
;
326 EXPORT_SYMBOL(tcp_memory_pressure
);
328 void tcp_enter_memory_pressure(struct sock
*sk
)
330 if (!tcp_memory_pressure
) {
331 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURES
);
332 tcp_memory_pressure
= 1;
335 EXPORT_SYMBOL(tcp_enter_memory_pressure
);
337 /* Convert seconds to retransmits based on initial and max timeout */
338 static u8
secs_to_retrans(int seconds
, int timeout
, int rto_max
)
343 int period
= timeout
;
346 while (seconds
> period
&& res
< 255) {
349 if (timeout
> rto_max
)
357 /* Convert retransmits to seconds based on initial and max timeout */
358 static int retrans_to_secs(u8 retrans
, int timeout
, int rto_max
)
366 if (timeout
> rto_max
)
374 /* Address-family independent initialization for a tcp_sock.
376 * NOTE: A lot of things set to zero explicitly by call to
377 * sk_alloc() so need not be done here.
379 void tcp_init_sock(struct sock
*sk
)
381 struct inet_connection_sock
*icsk
= inet_csk(sk
);
382 struct tcp_sock
*tp
= tcp_sk(sk
);
384 __skb_queue_head_init(&tp
->out_of_order_queue
);
385 tcp_init_xmit_timers(sk
);
386 tcp_prequeue_init(tp
);
387 INIT_LIST_HEAD(&tp
->tsq_node
);
389 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
390 tp
->mdev_us
= jiffies_to_usecs(TCP_TIMEOUT_INIT
);
392 /* So many TCP implementations out there (incorrectly) count the
393 * initial SYN frame in their delayed-ACK and congestion control
394 * algorithms that we must have the following bandaid to talk
395 * efficiently to them. -DaveM
397 tp
->snd_cwnd
= TCP_INIT_CWND
;
399 /* See draft-stevens-tcpca-spec-01 for discussion of the
400 * initialization of these values.
402 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
403 tp
->snd_cwnd_clamp
= ~0;
404 tp
->mss_cache
= TCP_MSS_DEFAULT
;
406 tp
->reordering
= sysctl_tcp_reordering
;
407 tcp_enable_early_retrans(tp
);
408 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
412 sk
->sk_state
= TCP_CLOSE
;
414 sk
->sk_write_space
= sk_stream_write_space
;
415 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
417 icsk
->icsk_sync_mss
= tcp_sync_mss
;
419 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
420 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
423 sock_update_memcg(sk
);
424 sk_sockets_allocated_inc(sk
);
427 EXPORT_SYMBOL(tcp_init_sock
);
429 void tcp_tx_timestamp(struct sock
*sk
, struct sk_buff
*skb
)
431 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
433 sock_tx_timestamp(sk
, &shinfo
->tx_flags
);
434 if (shinfo
->tx_flags
& SKBTX_ANY_SW_TSTAMP
)
435 shinfo
->tskey
= TCP_SKB_CB(skb
)->seq
+ skb
->len
- 1;
439 * Wait for a TCP event.
441 * Note that we don't need to lock the socket, as the upper poll layers
442 * take care of normal races (between the test and the event) and we don't
443 * go look at any of the socket buffers directly.
445 unsigned int tcp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
448 struct sock
*sk
= sock
->sk
;
449 const struct tcp_sock
*tp
= tcp_sk(sk
);
451 sock_rps_record_flow(sk
);
453 sock_poll_wait(file
, sk_sleep(sk
), wait
);
454 if (sk
->sk_state
== TCP_LISTEN
)
455 return inet_csk_listen_poll(sk
);
457 /* Socket is not locked. We are protected from async events
458 * by poll logic and correct handling of state changes
459 * made by other threads is impossible in any case.
465 * POLLHUP is certainly not done right. But poll() doesn't
466 * have a notion of HUP in just one direction, and for a
467 * socket the read side is more interesting.
469 * Some poll() documentation says that POLLHUP is incompatible
470 * with the POLLOUT/POLLWR flags, so somebody should check this
471 * all. But careful, it tends to be safer to return too many
472 * bits than too few, and you can easily break real applications
473 * if you don't tell them that something has hung up!
477 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
478 * our fs/select.c). It means that after we received EOF,
479 * poll always returns immediately, making impossible poll() on write()
480 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
481 * if and only if shutdown has been made in both directions.
482 * Actually, it is interesting to look how Solaris and DUX
483 * solve this dilemma. I would prefer, if POLLHUP were maskable,
484 * then we could set it on SND_SHUTDOWN. BTW examples given
485 * in Stevens' books assume exactly this behaviour, it explains
486 * why POLLHUP is incompatible with POLLOUT. --ANK
488 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
489 * blocking on fresh not-connected or disconnected socket. --ANK
491 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== TCP_CLOSE
)
493 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
494 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
496 /* Connected or passive Fast Open socket? */
497 if (sk
->sk_state
!= TCP_SYN_SENT
&&
498 (sk
->sk_state
!= TCP_SYN_RECV
|| tp
->fastopen_rsk
!= NULL
)) {
499 int target
= sock_rcvlowat(sk
, 0, INT_MAX
);
501 if (tp
->urg_seq
== tp
->copied_seq
&&
502 !sock_flag(sk
, SOCK_URGINLINE
) &&
506 /* Potential race condition. If read of tp below will
507 * escape above sk->sk_state, we can be illegally awaken
508 * in SYN_* states. */
509 if (tp
->rcv_nxt
- tp
->copied_seq
>= target
)
510 mask
|= POLLIN
| POLLRDNORM
;
512 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
513 if (sk_stream_is_writeable(sk
)) {
514 mask
|= POLLOUT
| POLLWRNORM
;
515 } else { /* send SIGIO later */
516 set_bit(SOCK_ASYNC_NOSPACE
,
517 &sk
->sk_socket
->flags
);
518 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
520 /* Race breaker. If space is freed after
521 * wspace test but before the flags are set,
522 * IO signal will be lost.
524 if (sk_stream_is_writeable(sk
))
525 mask
|= POLLOUT
| POLLWRNORM
;
528 mask
|= POLLOUT
| POLLWRNORM
;
530 if (tp
->urg_data
& TCP_URG_VALID
)
533 /* This barrier is coupled with smp_wmb() in tcp_reset() */
535 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
540 EXPORT_SYMBOL(tcp_poll
);
542 int tcp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
544 struct tcp_sock
*tp
= tcp_sk(sk
);
550 if (sk
->sk_state
== TCP_LISTEN
)
553 slow
= lock_sock_fast(sk
);
554 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
556 else if (sock_flag(sk
, SOCK_URGINLINE
) ||
558 before(tp
->urg_seq
, tp
->copied_seq
) ||
559 !before(tp
->urg_seq
, tp
->rcv_nxt
)) {
561 answ
= tp
->rcv_nxt
- tp
->copied_seq
;
563 /* Subtract 1, if FIN was received */
564 if (answ
&& sock_flag(sk
, SOCK_DONE
))
567 answ
= tp
->urg_seq
- tp
->copied_seq
;
568 unlock_sock_fast(sk
, slow
);
571 answ
= tp
->urg_data
&& tp
->urg_seq
== tp
->copied_seq
;
574 if (sk
->sk_state
== TCP_LISTEN
)
577 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
580 answ
= tp
->write_seq
- tp
->snd_una
;
583 if (sk
->sk_state
== TCP_LISTEN
)
586 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
589 answ
= tp
->write_seq
- tp
->snd_nxt
;
595 return put_user(answ
, (int __user
*)arg
);
597 EXPORT_SYMBOL(tcp_ioctl
);
599 static inline void tcp_mark_push(struct tcp_sock
*tp
, struct sk_buff
*skb
)
601 TCP_SKB_CB(skb
)->tcp_flags
|= TCPHDR_PSH
;
602 tp
->pushed_seq
= tp
->write_seq
;
605 static inline bool forced_push(const struct tcp_sock
*tp
)
607 return after(tp
->write_seq
, tp
->pushed_seq
+ (tp
->max_window
>> 1));
610 static inline void skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
612 struct tcp_sock
*tp
= tcp_sk(sk
);
613 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
616 tcb
->seq
= tcb
->end_seq
= tp
->write_seq
;
617 tcb
->tcp_flags
= TCPHDR_ACK
;
619 skb_header_release(skb
);
620 tcp_add_write_queue_tail(sk
, skb
);
621 sk
->sk_wmem_queued
+= skb
->truesize
;
622 sk_mem_charge(sk
, skb
->truesize
);
623 if (tp
->nonagle
& TCP_NAGLE_PUSH
)
624 tp
->nonagle
&= ~TCP_NAGLE_PUSH
;
627 static inline void tcp_mark_urg(struct tcp_sock
*tp
, int flags
)
630 tp
->snd_up
= tp
->write_seq
;
633 /* If a not yet filled skb is pushed, do not send it if
634 * we have data packets in Qdisc or NIC queues :
635 * Because TX completion will happen shortly, it gives a chance
636 * to coalesce future sendmsg() payload into this skb, without
637 * need for a timer, and with no latency trade off.
638 * As packets containing data payload have a bigger truesize
639 * than pure acks (dataless) packets, the last checks prevent
640 * autocorking if we only have an ACK in Qdisc/NIC queues,
641 * or if TX completion was delayed after we processed ACK packet.
643 static bool tcp_should_autocork(struct sock
*sk
, struct sk_buff
*skb
,
646 return skb
->len
< size_goal
&&
647 sysctl_tcp_autocorking
&&
648 skb
!= tcp_write_queue_head(sk
) &&
649 atomic_read(&sk
->sk_wmem_alloc
) > skb
->truesize
;
652 static void tcp_push(struct sock
*sk
, int flags
, int mss_now
,
653 int nonagle
, int size_goal
)
655 struct tcp_sock
*tp
= tcp_sk(sk
);
658 if (!tcp_send_head(sk
))
661 skb
= tcp_write_queue_tail(sk
);
662 if (!(flags
& MSG_MORE
) || forced_push(tp
))
663 tcp_mark_push(tp
, skb
);
665 tcp_mark_urg(tp
, flags
);
667 if (tcp_should_autocork(sk
, skb
, size_goal
)) {
669 /* avoid atomic op if TSQ_THROTTLED bit is already set */
670 if (!test_bit(TSQ_THROTTLED
, &tp
->tsq_flags
)) {
671 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPAUTOCORKING
);
672 set_bit(TSQ_THROTTLED
, &tp
->tsq_flags
);
674 /* It is possible TX completion already happened
675 * before we set TSQ_THROTTLED.
677 if (atomic_read(&sk
->sk_wmem_alloc
) > skb
->truesize
)
681 if (flags
& MSG_MORE
)
682 nonagle
= TCP_NAGLE_CORK
;
684 __tcp_push_pending_frames(sk
, mss_now
, nonagle
);
687 static int tcp_splice_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
688 unsigned int offset
, size_t len
)
690 struct tcp_splice_state
*tss
= rd_desc
->arg
.data
;
693 ret
= skb_splice_bits(skb
, offset
, tss
->pipe
, min(rd_desc
->count
, len
),
696 rd_desc
->count
-= ret
;
700 static int __tcp_splice_read(struct sock
*sk
, struct tcp_splice_state
*tss
)
702 /* Store TCP splice context information in read_descriptor_t. */
703 read_descriptor_t rd_desc
= {
708 return tcp_read_sock(sk
, &rd_desc
, tcp_splice_data_recv
);
712 * tcp_splice_read - splice data from TCP socket to a pipe
713 * @sock: socket to splice from
714 * @ppos: position (not valid)
715 * @pipe: pipe to splice to
716 * @len: number of bytes to splice
717 * @flags: splice modifier flags
720 * Will read pages from given socket and fill them into a pipe.
723 ssize_t
tcp_splice_read(struct socket
*sock
, loff_t
*ppos
,
724 struct pipe_inode_info
*pipe
, size_t len
,
727 struct sock
*sk
= sock
->sk
;
728 struct tcp_splice_state tss
= {
737 sock_rps_record_flow(sk
);
739 * We can't seek on a socket input
748 timeo
= sock_rcvtimeo(sk
, sock
->file
->f_flags
& O_NONBLOCK
);
750 ret
= __tcp_splice_read(sk
, &tss
);
756 if (sock_flag(sk
, SOCK_DONE
))
759 ret
= sock_error(sk
);
762 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
764 if (sk
->sk_state
== TCP_CLOSE
) {
766 * This occurs when user tries to read
767 * from never connected socket.
769 if (!sock_flag(sk
, SOCK_DONE
))
777 sk_wait_data(sk
, &timeo
);
778 if (signal_pending(current
)) {
779 ret
= sock_intr_errno(timeo
);
792 if (sk
->sk_err
|| sk
->sk_state
== TCP_CLOSE
||
793 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
794 signal_pending(current
))
805 EXPORT_SYMBOL(tcp_splice_read
);
807 struct sk_buff
*sk_stream_alloc_skb(struct sock
*sk
, int size
, gfp_t gfp
)
811 /* The TCP header must be at least 32-bit aligned. */
812 size
= ALIGN(size
, 4);
814 skb
= alloc_skb_fclone(size
+ sk
->sk_prot
->max_header
, gfp
);
816 if (sk_wmem_schedule(sk
, skb
->truesize
)) {
817 skb_reserve(skb
, sk
->sk_prot
->max_header
);
819 * Make sure that we have exactly size bytes
820 * available to the caller, no more, no less.
822 skb
->reserved_tailroom
= skb
->end
- skb
->tail
- size
;
827 sk
->sk_prot
->enter_memory_pressure(sk
);
828 sk_stream_moderate_sndbuf(sk
);
833 static unsigned int tcp_xmit_size_goal(struct sock
*sk
, u32 mss_now
,
836 struct tcp_sock
*tp
= tcp_sk(sk
);
837 u32 xmit_size_goal
, old_size_goal
;
839 xmit_size_goal
= mss_now
;
841 if (large_allowed
&& sk_can_gso(sk
)) {
844 /* Maybe we should/could use sk->sk_prot->max_header here ? */
845 hlen
= inet_csk(sk
)->icsk_af_ops
->net_header_len
+
846 inet_csk(sk
)->icsk_ext_hdr_len
+
849 /* Goal is to send at least one packet per ms,
850 * not one big TSO packet every 100 ms.
851 * This preserves ACK clocking and is consistent
852 * with tcp_tso_should_defer() heuristic.
854 gso_size
= sk
->sk_pacing_rate
/ (2 * MSEC_PER_SEC
);
855 gso_size
= max_t(u32
, gso_size
,
856 sysctl_tcp_min_tso_segs
* mss_now
);
858 xmit_size_goal
= min_t(u32
, gso_size
,
859 sk
->sk_gso_max_size
- 1 - hlen
);
861 xmit_size_goal
= tcp_bound_to_half_wnd(tp
, xmit_size_goal
);
863 /* We try hard to avoid divides here */
864 old_size_goal
= tp
->xmit_size_goal_segs
* mss_now
;
866 if (likely(old_size_goal
<= xmit_size_goal
&&
867 old_size_goal
+ mss_now
> xmit_size_goal
)) {
868 xmit_size_goal
= old_size_goal
;
870 tp
->xmit_size_goal_segs
=
871 min_t(u16
, xmit_size_goal
/ mss_now
,
872 sk
->sk_gso_max_segs
);
873 xmit_size_goal
= tp
->xmit_size_goal_segs
* mss_now
;
877 return max(xmit_size_goal
, mss_now
);
880 static int tcp_send_mss(struct sock
*sk
, int *size_goal
, int flags
)
884 mss_now
= tcp_current_mss(sk
);
885 *size_goal
= tcp_xmit_size_goal(sk
, mss_now
, !(flags
& MSG_OOB
));
890 static ssize_t
do_tcp_sendpages(struct sock
*sk
, struct page
*page
, int offset
,
891 size_t size
, int flags
)
893 struct tcp_sock
*tp
= tcp_sk(sk
);
894 int mss_now
, size_goal
;
897 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
899 /* Wait for a connection to finish. One exception is TCP Fast Open
900 * (passive side) where data is allowed to be sent before a connection
901 * is fully established.
903 if (((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) &&
904 !tcp_passive_fastopen(sk
)) {
905 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
909 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
911 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
915 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
919 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
923 if (!tcp_send_head(sk
) || (copy
= size_goal
- skb
->len
) <= 0) {
925 if (!sk_stream_memory_free(sk
))
926 goto wait_for_sndbuf
;
928 skb
= sk_stream_alloc_skb(sk
, 0, sk
->sk_allocation
);
930 goto wait_for_memory
;
939 i
= skb_shinfo(skb
)->nr_frags
;
940 can_coalesce
= skb_can_coalesce(skb
, i
, page
, offset
);
941 if (!can_coalesce
&& i
>= MAX_SKB_FRAGS
) {
942 tcp_mark_push(tp
, skb
);
945 if (!sk_wmem_schedule(sk
, copy
))
946 goto wait_for_memory
;
949 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
952 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
954 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
957 skb
->data_len
+= copy
;
958 skb
->truesize
+= copy
;
959 sk
->sk_wmem_queued
+= copy
;
960 sk_mem_charge(sk
, copy
);
961 skb
->ip_summed
= CHECKSUM_PARTIAL
;
962 tp
->write_seq
+= copy
;
963 TCP_SKB_CB(skb
)->end_seq
+= copy
;
964 skb_shinfo(skb
)->gso_segs
= 0;
967 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
971 if (!(size
-= copy
)) {
972 tcp_tx_timestamp(sk
, skb
);
976 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
))
979 if (forced_push(tp
)) {
980 tcp_mark_push(tp
, skb
);
981 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
982 } else if (skb
== tcp_send_head(sk
))
983 tcp_push_one(sk
, mss_now
);
987 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
989 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
,
990 TCP_NAGLE_PUSH
, size_goal
);
992 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
995 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
999 if (copied
&& !(flags
& MSG_SENDPAGE_NOTLAST
))
1000 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
, size_goal
);
1007 return sk_stream_error(sk
, flags
, err
);
1010 int tcp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1011 size_t size
, int flags
)
1015 if (!(sk
->sk_route_caps
& NETIF_F_SG
) ||
1016 !(sk
->sk_route_caps
& NETIF_F_ALL_CSUM
))
1017 return sock_no_sendpage(sk
->sk_socket
, page
, offset
, size
,
1021 res
= do_tcp_sendpages(sk
, page
, offset
, size
, flags
);
1025 EXPORT_SYMBOL(tcp_sendpage
);
1027 static inline int select_size(const struct sock
*sk
, bool sg
)
1029 const struct tcp_sock
*tp
= tcp_sk(sk
);
1030 int tmp
= tp
->mss_cache
;
1033 if (sk_can_gso(sk
)) {
1034 /* Small frames wont use a full page:
1035 * Payload will immediately follow tcp header.
1037 tmp
= SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER
);
1039 int pgbreak
= SKB_MAX_HEAD(MAX_TCP_HEADER
);
1041 if (tmp
>= pgbreak
&&
1042 tmp
<= pgbreak
+ (MAX_SKB_FRAGS
- 1) * PAGE_SIZE
)
1050 void tcp_free_fastopen_req(struct tcp_sock
*tp
)
1052 if (tp
->fastopen_req
!= NULL
) {
1053 kfree(tp
->fastopen_req
);
1054 tp
->fastopen_req
= NULL
;
1058 static int tcp_sendmsg_fastopen(struct sock
*sk
, struct msghdr
*msg
,
1059 int *copied
, size_t size
)
1061 struct tcp_sock
*tp
= tcp_sk(sk
);
1064 if (!(sysctl_tcp_fastopen
& TFO_CLIENT_ENABLE
))
1066 if (tp
->fastopen_req
!= NULL
)
1067 return -EALREADY
; /* Another Fast Open is in progress */
1069 tp
->fastopen_req
= kzalloc(sizeof(struct tcp_fastopen_request
),
1071 if (unlikely(tp
->fastopen_req
== NULL
))
1073 tp
->fastopen_req
->data
= msg
;
1074 tp
->fastopen_req
->size
= size
;
1076 flags
= (msg
->msg_flags
& MSG_DONTWAIT
) ? O_NONBLOCK
: 0;
1077 err
= __inet_stream_connect(sk
->sk_socket
, msg
->msg_name
,
1078 msg
->msg_namelen
, flags
);
1079 *copied
= tp
->fastopen_req
->copied
;
1080 tcp_free_fastopen_req(tp
);
1084 int tcp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
1088 struct tcp_sock
*tp
= tcp_sk(sk
);
1089 struct sk_buff
*skb
;
1090 int iovlen
, flags
, err
, copied
= 0;
1091 int mss_now
= 0, size_goal
, copied_syn
= 0, offset
= 0;
1097 flags
= msg
->msg_flags
;
1098 if (flags
& MSG_FASTOPEN
) {
1099 err
= tcp_sendmsg_fastopen(sk
, msg
, &copied_syn
, size
);
1100 if (err
== -EINPROGRESS
&& copied_syn
> 0)
1104 offset
= copied_syn
;
1107 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1109 /* Wait for a connection to finish. One exception is TCP Fast Open
1110 * (passive side) where data is allowed to be sent before a connection
1111 * is fully established.
1113 if (((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) &&
1114 !tcp_passive_fastopen(sk
)) {
1115 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
1119 if (unlikely(tp
->repair
)) {
1120 if (tp
->repair_queue
== TCP_RECV_QUEUE
) {
1121 copied
= tcp_send_rcvq(sk
, msg
, size
);
1126 if (tp
->repair_queue
== TCP_NO_QUEUE
)
1129 /* 'common' sending to sendq */
1132 /* This should be in poll */
1133 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
1135 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1137 /* Ok commence sending. */
1138 iovlen
= msg
->msg_iovlen
;
1143 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
1146 sg
= !!(sk
->sk_route_caps
& NETIF_F_SG
);
1148 while (--iovlen
>= 0) {
1149 size_t seglen
= iov
->iov_len
;
1150 unsigned char __user
*from
= iov
->iov_base
;
1153 if (unlikely(offset
> 0)) { /* Skip bytes copied in SYN */
1154 if (offset
>= seglen
) {
1163 while (seglen
> 0) {
1165 int max
= size_goal
;
1167 skb
= tcp_write_queue_tail(sk
);
1168 if (tcp_send_head(sk
)) {
1169 if (skb
->ip_summed
== CHECKSUM_NONE
)
1171 copy
= max
- skb
->len
;
1176 /* Allocate new segment. If the interface is SG,
1177 * allocate skb fitting to single page.
1179 if (!sk_stream_memory_free(sk
))
1180 goto wait_for_sndbuf
;
1182 skb
= sk_stream_alloc_skb(sk
,
1183 select_size(sk
, sg
),
1186 goto wait_for_memory
;
1189 * All packets are restored as if they have
1190 * already been sent.
1193 TCP_SKB_CB(skb
)->when
= tcp_time_stamp
;
1196 * Check whether we can use HW checksum.
1198 if (sk
->sk_route_caps
& NETIF_F_ALL_CSUM
)
1199 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1201 skb_entail(sk
, skb
);
1206 /* Try to append data to the end of skb. */
1210 /* Where to copy to? */
1211 if (skb_availroom(skb
) > 0) {
1212 /* We have some space in skb head. Superb! */
1213 copy
= min_t(int, copy
, skb_availroom(skb
));
1214 err
= skb_add_data_nocache(sk
, skb
, from
, copy
);
1219 int i
= skb_shinfo(skb
)->nr_frags
;
1220 struct page_frag
*pfrag
= sk_page_frag(sk
);
1222 if (!sk_page_frag_refill(sk
, pfrag
))
1223 goto wait_for_memory
;
1225 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1227 if (i
== MAX_SKB_FRAGS
|| !sg
) {
1228 tcp_mark_push(tp
, skb
);
1234 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1236 if (!sk_wmem_schedule(sk
, copy
))
1237 goto wait_for_memory
;
1239 err
= skb_copy_to_page_nocache(sk
, from
, skb
,
1246 /* Update the skb. */
1248 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1250 skb_fill_page_desc(skb
, i
, pfrag
->page
,
1251 pfrag
->offset
, copy
);
1252 get_page(pfrag
->page
);
1254 pfrag
->offset
+= copy
;
1258 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1260 tp
->write_seq
+= copy
;
1261 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1262 skb_shinfo(skb
)->gso_segs
= 0;
1266 if ((seglen
-= copy
) == 0 && iovlen
== 0) {
1267 tcp_tx_timestamp(sk
, skb
);
1271 if (skb
->len
< max
|| (flags
& MSG_OOB
) || unlikely(tp
->repair
))
1274 if (forced_push(tp
)) {
1275 tcp_mark_push(tp
, skb
);
1276 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
1277 } else if (skb
== tcp_send_head(sk
))
1278 tcp_push_one(sk
, mss_now
);
1282 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1285 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
,
1286 TCP_NAGLE_PUSH
, size_goal
);
1288 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
1291 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1297 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
, size_goal
);
1300 return copied
+ copied_syn
;
1304 tcp_unlink_write_queue(skb
, sk
);
1305 /* It is the one place in all of TCP, except connection
1306 * reset, where we can be unlinking the send_head.
1308 tcp_check_send_head(sk
, skb
);
1309 sk_wmem_free_skb(sk
, skb
);
1313 if (copied
+ copied_syn
)
1316 err
= sk_stream_error(sk
, flags
, err
);
1320 EXPORT_SYMBOL(tcp_sendmsg
);
1323 * Handle reading urgent data. BSD has very simple semantics for
1324 * this, no blocking and very strange errors 8)
1327 static int tcp_recv_urg(struct sock
*sk
, struct msghdr
*msg
, int len
, int flags
)
1329 struct tcp_sock
*tp
= tcp_sk(sk
);
1331 /* No URG data to read. */
1332 if (sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
||
1333 tp
->urg_data
== TCP_URG_READ
)
1334 return -EINVAL
; /* Yes this is right ! */
1336 if (sk
->sk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DONE
))
1339 if (tp
->urg_data
& TCP_URG_VALID
) {
1341 char c
= tp
->urg_data
;
1343 if (!(flags
& MSG_PEEK
))
1344 tp
->urg_data
= TCP_URG_READ
;
1346 /* Read urgent data. */
1347 msg
->msg_flags
|= MSG_OOB
;
1350 if (!(flags
& MSG_TRUNC
))
1351 err
= memcpy_toiovec(msg
->msg_iov
, &c
, 1);
1354 msg
->msg_flags
|= MSG_TRUNC
;
1356 return err
? -EFAULT
: len
;
1359 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1362 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1363 * the available implementations agree in this case:
1364 * this call should never block, independent of the
1365 * blocking state of the socket.
1366 * Mike <pall@rz.uni-karlsruhe.de>
1371 static int tcp_peek_sndq(struct sock
*sk
, struct msghdr
*msg
, int len
)
1373 struct sk_buff
*skb
;
1374 int copied
= 0, err
= 0;
1376 /* XXX -- need to support SO_PEEK_OFF */
1378 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
1379 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, skb
->len
);
1386 return err
?: copied
;
1389 /* Clean up the receive buffer for full frames taken by the user,
1390 * then send an ACK if necessary. COPIED is the number of bytes
1391 * tcp_recvmsg has given to the user so far, it speeds up the
1392 * calculation of whether or not we must ACK for the sake of
1395 void tcp_cleanup_rbuf(struct sock
*sk
, int copied
)
1397 struct tcp_sock
*tp
= tcp_sk(sk
);
1398 bool time_to_ack
= false;
1400 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1402 WARN(skb
&& !before(tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
),
1403 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1404 tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
);
1406 if (inet_csk_ack_scheduled(sk
)) {
1407 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1408 /* Delayed ACKs frequently hit locked sockets during bulk
1410 if (icsk
->icsk_ack
.blocked
||
1411 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1412 tp
->rcv_nxt
- tp
->rcv_wup
> icsk
->icsk_ack
.rcv_mss
||
1414 * If this read emptied read buffer, we send ACK, if
1415 * connection is not bidirectional, user drained
1416 * receive buffer and there was a small segment
1420 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED2
) ||
1421 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED
) &&
1422 !icsk
->icsk_ack
.pingpong
)) &&
1423 !atomic_read(&sk
->sk_rmem_alloc
)))
1427 /* We send an ACK if we can now advertise a non-zero window
1428 * which has been raised "significantly".
1430 * Even if window raised up to infinity, do not send window open ACK
1431 * in states, where we will not receive more. It is useless.
1433 if (copied
> 0 && !time_to_ack
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1434 __u32 rcv_window_now
= tcp_receive_window(tp
);
1436 /* Optimize, __tcp_select_window() is not cheap. */
1437 if (2*rcv_window_now
<= tp
->window_clamp
) {
1438 __u32 new_window
= __tcp_select_window(sk
);
1440 /* Send ACK now, if this read freed lots of space
1441 * in our buffer. Certainly, new_window is new window.
1442 * We can advertise it now, if it is not less than current one.
1443 * "Lots" means "at least twice" here.
1445 if (new_window
&& new_window
>= 2 * rcv_window_now
)
1453 static void tcp_prequeue_process(struct sock
*sk
)
1455 struct sk_buff
*skb
;
1456 struct tcp_sock
*tp
= tcp_sk(sk
);
1458 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPPREQUEUED
);
1460 /* RX process wants to run with disabled BHs, though it is not
1463 while ((skb
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1464 sk_backlog_rcv(sk
, skb
);
1467 /* Clear memory counter. */
1468 tp
->ucopy
.memory
= 0;
1471 #ifdef CONFIG_NET_DMA
1472 static void tcp_service_net_dma(struct sock
*sk
, bool wait
)
1474 dma_cookie_t done
, used
;
1475 dma_cookie_t last_issued
;
1476 struct tcp_sock
*tp
= tcp_sk(sk
);
1478 if (!tp
->ucopy
.dma_chan
)
1481 last_issued
= tp
->ucopy
.dma_cookie
;
1482 dma_async_issue_pending(tp
->ucopy
.dma_chan
);
1485 if (dma_async_is_tx_complete(tp
->ucopy
.dma_chan
,
1487 &used
) == DMA_COMPLETE
) {
1488 /* Safe to free early-copied skbs now */
1489 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1492 struct sk_buff
*skb
;
1493 while ((skb
= skb_peek(&sk
->sk_async_wait_queue
)) &&
1494 (dma_async_is_complete(skb
->dma_cookie
, done
,
1495 used
) == DMA_COMPLETE
)) {
1496 __skb_dequeue(&sk
->sk_async_wait_queue
);
1504 static struct sk_buff
*tcp_recv_skb(struct sock
*sk
, u32 seq
, u32
*off
)
1506 struct sk_buff
*skb
;
1509 while ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
) {
1510 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1511 if (tcp_hdr(skb
)->syn
)
1513 if (offset
< skb
->len
|| tcp_hdr(skb
)->fin
) {
1517 /* This looks weird, but this can happen if TCP collapsing
1518 * splitted a fat GRO packet, while we released socket lock
1519 * in skb_splice_bits()
1521 sk_eat_skb(sk
, skb
, false);
1527 * This routine provides an alternative to tcp_recvmsg() for routines
1528 * that would like to handle copying from skbuffs directly in 'sendfile'
1531 * - It is assumed that the socket was locked by the caller.
1532 * - The routine does not block.
1533 * - At present, there is no support for reading OOB data
1534 * or for 'peeking' the socket using this routine
1535 * (although both would be easy to implement).
1537 int tcp_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
1538 sk_read_actor_t recv_actor
)
1540 struct sk_buff
*skb
;
1541 struct tcp_sock
*tp
= tcp_sk(sk
);
1542 u32 seq
= tp
->copied_seq
;
1546 if (sk
->sk_state
== TCP_LISTEN
)
1548 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1549 if (offset
< skb
->len
) {
1553 len
= skb
->len
- offset
;
1554 /* Stop reading if we hit a patch of urgent data */
1556 u32 urg_offset
= tp
->urg_seq
- seq
;
1557 if (urg_offset
< len
)
1562 used
= recv_actor(desc
, skb
, offset
, len
);
1567 } else if (used
<= len
) {
1572 /* If recv_actor drops the lock (e.g. TCP splice
1573 * receive) the skb pointer might be invalid when
1574 * getting here: tcp_collapse might have deleted it
1575 * while aggregating skbs from the socket queue.
1577 skb
= tcp_recv_skb(sk
, seq
- 1, &offset
);
1580 /* TCP coalescing might have appended data to the skb.
1581 * Try to splice more frags
1583 if (offset
+ 1 != skb
->len
)
1586 if (tcp_hdr(skb
)->fin
) {
1587 sk_eat_skb(sk
, skb
, false);
1591 sk_eat_skb(sk
, skb
, false);
1594 tp
->copied_seq
= seq
;
1596 tp
->copied_seq
= seq
;
1598 tcp_rcv_space_adjust(sk
);
1600 /* Clean up data we have read: This will do ACK frames. */
1602 tcp_recv_skb(sk
, seq
, &offset
);
1603 tcp_cleanup_rbuf(sk
, copied
);
1607 EXPORT_SYMBOL(tcp_read_sock
);
1610 * This routine copies from a sock struct into the user buffer.
1612 * Technical note: in 2.3 we work on _locked_ socket, so that
1613 * tricks with *seq access order and skb->users are not required.
1614 * Probably, code can be easily improved even more.
1617 int tcp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
1618 size_t len
, int nonblock
, int flags
, int *addr_len
)
1620 struct tcp_sock
*tp
= tcp_sk(sk
);
1626 int target
; /* Read at least this many bytes */
1628 struct task_struct
*user_recv
= NULL
;
1629 bool copied_early
= false;
1630 struct sk_buff
*skb
;
1633 if (unlikely(flags
& MSG_ERRQUEUE
))
1634 return ip_recv_error(sk
, msg
, len
, addr_len
);
1636 if (sk_can_busy_loop(sk
) && skb_queue_empty(&sk
->sk_receive_queue
) &&
1637 (sk
->sk_state
== TCP_ESTABLISHED
))
1638 sk_busy_loop(sk
, nonblock
);
1643 if (sk
->sk_state
== TCP_LISTEN
)
1646 timeo
= sock_rcvtimeo(sk
, nonblock
);
1648 /* Urgent data needs to be handled specially. */
1649 if (flags
& MSG_OOB
)
1652 if (unlikely(tp
->repair
)) {
1654 if (!(flags
& MSG_PEEK
))
1657 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
1661 if (tp
->repair_queue
== TCP_NO_QUEUE
)
1664 /* 'common' recv queue MSG_PEEK-ing */
1667 seq
= &tp
->copied_seq
;
1668 if (flags
& MSG_PEEK
) {
1669 peek_seq
= tp
->copied_seq
;
1673 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1675 #ifdef CONFIG_NET_DMA
1676 tp
->ucopy
.dma_chan
= NULL
;
1678 skb
= skb_peek_tail(&sk
->sk_receive_queue
);
1683 available
= TCP_SKB_CB(skb
)->seq
+ skb
->len
- (*seq
);
1684 if ((available
< target
) &&
1685 (len
> sysctl_tcp_dma_copybreak
) && !(flags
& MSG_PEEK
) &&
1686 !sysctl_tcp_low_latency
&&
1687 net_dma_find_channel()) {
1689 tp
->ucopy
.pinned_list
=
1690 dma_pin_iovec_pages(msg
->msg_iov
, len
);
1700 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1701 if (tp
->urg_data
&& tp
->urg_seq
== *seq
) {
1704 if (signal_pending(current
)) {
1705 copied
= timeo
? sock_intr_errno(timeo
) : -EAGAIN
;
1710 /* Next get a buffer. */
1712 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
1713 /* Now that we have two receive queues this
1716 if (WARN(before(*seq
, TCP_SKB_CB(skb
)->seq
),
1717 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1718 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
,
1722 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
1723 if (tcp_hdr(skb
)->syn
)
1725 if (offset
< skb
->len
)
1727 if (tcp_hdr(skb
)->fin
)
1729 WARN(!(flags
& MSG_PEEK
),
1730 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1731 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
, flags
);
1734 /* Well, if we have backlog, try to process it now yet. */
1736 if (copied
>= target
&& !sk
->sk_backlog
.tail
)
1741 sk
->sk_state
== TCP_CLOSE
||
1742 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1744 signal_pending(current
))
1747 if (sock_flag(sk
, SOCK_DONE
))
1751 copied
= sock_error(sk
);
1755 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1758 if (sk
->sk_state
== TCP_CLOSE
) {
1759 if (!sock_flag(sk
, SOCK_DONE
)) {
1760 /* This occurs when user tries to read
1761 * from never connected socket.
1774 if (signal_pending(current
)) {
1775 copied
= sock_intr_errno(timeo
);
1780 tcp_cleanup_rbuf(sk
, copied
);
1782 if (!sysctl_tcp_low_latency
&& tp
->ucopy
.task
== user_recv
) {
1783 /* Install new reader */
1784 if (!user_recv
&& !(flags
& (MSG_TRUNC
| MSG_PEEK
))) {
1785 user_recv
= current
;
1786 tp
->ucopy
.task
= user_recv
;
1787 tp
->ucopy
.iov
= msg
->msg_iov
;
1790 tp
->ucopy
.len
= len
;
1792 WARN_ON(tp
->copied_seq
!= tp
->rcv_nxt
&&
1793 !(flags
& (MSG_PEEK
| MSG_TRUNC
)));
1795 /* Ugly... If prequeue is not empty, we have to
1796 * process it before releasing socket, otherwise
1797 * order will be broken at second iteration.
1798 * More elegant solution is required!!!
1800 * Look: we have the following (pseudo)queues:
1802 * 1. packets in flight
1807 * Each queue can be processed only if the next ones
1808 * are empty. At this point we have empty receive_queue.
1809 * But prequeue _can_ be not empty after 2nd iteration,
1810 * when we jumped to start of loop because backlog
1811 * processing added something to receive_queue.
1812 * We cannot release_sock(), because backlog contains
1813 * packets arrived _after_ prequeued ones.
1815 * Shortly, algorithm is clear --- to process all
1816 * the queues in order. We could make it more directly,
1817 * requeueing packets from backlog to prequeue, if
1818 * is not empty. It is more elegant, but eats cycles,
1821 if (!skb_queue_empty(&tp
->ucopy
.prequeue
))
1824 /* __ Set realtime policy in scheduler __ */
1827 #ifdef CONFIG_NET_DMA
1828 if (tp
->ucopy
.dma_chan
) {
1829 if (tp
->rcv_wnd
== 0 &&
1830 !skb_queue_empty(&sk
->sk_async_wait_queue
)) {
1831 tcp_service_net_dma(sk
, true);
1832 tcp_cleanup_rbuf(sk
, copied
);
1834 dma_async_issue_pending(tp
->ucopy
.dma_chan
);
1837 if (copied
>= target
) {
1838 /* Do not sleep, just process backlog. */
1842 sk_wait_data(sk
, &timeo
);
1844 #ifdef CONFIG_NET_DMA
1845 tcp_service_net_dma(sk
, false); /* Don't block */
1846 tp
->ucopy
.wakeup
= 0;
1852 /* __ Restore normal policy in scheduler __ */
1854 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1855 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG
, chunk
);
1860 if (tp
->rcv_nxt
== tp
->copied_seq
&&
1861 !skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1863 tcp_prequeue_process(sk
);
1865 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1866 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1872 if ((flags
& MSG_PEEK
) &&
1873 (peek_seq
- copied
- urg_hole
!= tp
->copied_seq
)) {
1874 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1876 task_pid_nr(current
));
1877 peek_seq
= tp
->copied_seq
;
1882 /* Ok so how much can we use? */
1883 used
= skb
->len
- offset
;
1887 /* Do we have urgent data here? */
1889 u32 urg_offset
= tp
->urg_seq
- *seq
;
1890 if (urg_offset
< used
) {
1892 if (!sock_flag(sk
, SOCK_URGINLINE
)) {
1905 if (!(flags
& MSG_TRUNC
)) {
1906 #ifdef CONFIG_NET_DMA
1907 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1908 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1910 if (tp
->ucopy
.dma_chan
) {
1911 tp
->ucopy
.dma_cookie
= dma_skb_copy_datagram_iovec(
1912 tp
->ucopy
.dma_chan
, skb
, offset
,
1914 tp
->ucopy
.pinned_list
);
1916 if (tp
->ucopy
.dma_cookie
< 0) {
1918 pr_alert("%s: dma_cookie < 0\n",
1921 /* Exception. Bailout! */
1927 dma_async_issue_pending(tp
->ucopy
.dma_chan
);
1929 if ((offset
+ used
) == skb
->len
)
1930 copied_early
= true;
1935 err
= skb_copy_datagram_iovec(skb
, offset
,
1936 msg
->msg_iov
, used
);
1938 /* Exception. Bailout! */
1950 tcp_rcv_space_adjust(sk
);
1953 if (tp
->urg_data
&& after(tp
->copied_seq
, tp
->urg_seq
)) {
1955 tcp_fast_path_check(sk
);
1957 if (used
+ offset
< skb
->len
)
1960 if (tcp_hdr(skb
)->fin
)
1962 if (!(flags
& MSG_PEEK
)) {
1963 sk_eat_skb(sk
, skb
, copied_early
);
1964 copied_early
= false;
1969 /* Process the FIN. */
1971 if (!(flags
& MSG_PEEK
)) {
1972 sk_eat_skb(sk
, skb
, copied_early
);
1973 copied_early
= false;
1979 if (!skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1982 tp
->ucopy
.len
= copied
> 0 ? len
: 0;
1984 tcp_prequeue_process(sk
);
1986 if (copied
> 0 && (chunk
= len
- tp
->ucopy
.len
) != 0) {
1987 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1993 tp
->ucopy
.task
= NULL
;
1997 #ifdef CONFIG_NET_DMA
1998 tcp_service_net_dma(sk
, true); /* Wait for queue to drain */
1999 tp
->ucopy
.dma_chan
= NULL
;
2001 if (tp
->ucopy
.pinned_list
) {
2002 dma_unpin_iovec_pages(tp
->ucopy
.pinned_list
);
2003 tp
->ucopy
.pinned_list
= NULL
;
2007 /* According to UNIX98, msg_name/msg_namelen are ignored
2008 * on connected socket. I was just happy when found this 8) --ANK
2011 /* Clean up data we have read: This will do ACK frames. */
2012 tcp_cleanup_rbuf(sk
, copied
);
2022 err
= tcp_recv_urg(sk
, msg
, len
, flags
);
2026 err
= tcp_peek_sndq(sk
, msg
, len
);
2029 EXPORT_SYMBOL(tcp_recvmsg
);
2031 void tcp_set_state(struct sock
*sk
, int state
)
2033 int oldstate
= sk
->sk_state
;
2036 case TCP_ESTABLISHED
:
2037 if (oldstate
!= TCP_ESTABLISHED
)
2038 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
2042 if (oldstate
== TCP_CLOSE_WAIT
|| oldstate
== TCP_ESTABLISHED
)
2043 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ESTABRESETS
);
2045 sk
->sk_prot
->unhash(sk
);
2046 if (inet_csk(sk
)->icsk_bind_hash
&&
2047 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
2051 if (oldstate
== TCP_ESTABLISHED
)
2052 TCP_DEC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
2055 /* Change state AFTER socket is unhashed to avoid closed
2056 * socket sitting in hash tables.
2058 sk
->sk_state
= state
;
2061 SOCK_DEBUG(sk
, "TCP sk=%p, State %s -> %s\n", sk
, statename
[oldstate
], statename
[state
]);
2064 EXPORT_SYMBOL_GPL(tcp_set_state
);
2067 * State processing on a close. This implements the state shift for
2068 * sending our FIN frame. Note that we only send a FIN for some
2069 * states. A shutdown() may have already sent the FIN, or we may be
2073 static const unsigned char new_state
[16] = {
2074 /* current state: new state: action: */
2075 /* (Invalid) */ TCP_CLOSE
,
2076 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2077 /* TCP_SYN_SENT */ TCP_CLOSE
,
2078 /* TCP_SYN_RECV */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2079 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1
,
2080 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2
,
2081 /* TCP_TIME_WAIT */ TCP_CLOSE
,
2082 /* TCP_CLOSE */ TCP_CLOSE
,
2083 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK
| TCP_ACTION_FIN
,
2084 /* TCP_LAST_ACK */ TCP_LAST_ACK
,
2085 /* TCP_LISTEN */ TCP_CLOSE
,
2086 /* TCP_CLOSING */ TCP_CLOSING
,
2089 static int tcp_close_state(struct sock
*sk
)
2091 int next
= (int)new_state
[sk
->sk_state
];
2092 int ns
= next
& TCP_STATE_MASK
;
2094 tcp_set_state(sk
, ns
);
2096 return next
& TCP_ACTION_FIN
;
2100 * Shutdown the sending side of a connection. Much like close except
2101 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2104 void tcp_shutdown(struct sock
*sk
, int how
)
2106 /* We need to grab some memory, and put together a FIN,
2107 * and then put it into the queue to be sent.
2108 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2110 if (!(how
& SEND_SHUTDOWN
))
2113 /* If we've already sent a FIN, or it's a closed state, skip this. */
2114 if ((1 << sk
->sk_state
) &
2115 (TCPF_ESTABLISHED
| TCPF_SYN_SENT
|
2116 TCPF_SYN_RECV
| TCPF_CLOSE_WAIT
)) {
2117 /* Clear out any half completed packets. FIN if needed. */
2118 if (tcp_close_state(sk
))
2122 EXPORT_SYMBOL(tcp_shutdown
);
2124 bool tcp_check_oom(struct sock
*sk
, int shift
)
2126 bool too_many_orphans
, out_of_socket_memory
;
2128 too_many_orphans
= tcp_too_many_orphans(sk
, shift
);
2129 out_of_socket_memory
= tcp_out_of_memory(sk
);
2131 if (too_many_orphans
)
2132 net_info_ratelimited("too many orphaned sockets\n");
2133 if (out_of_socket_memory
)
2134 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2135 return too_many_orphans
|| out_of_socket_memory
;
2138 void tcp_close(struct sock
*sk
, long timeout
)
2140 struct sk_buff
*skb
;
2141 int data_was_unread
= 0;
2145 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2147 if (sk
->sk_state
== TCP_LISTEN
) {
2148 tcp_set_state(sk
, TCP_CLOSE
);
2151 inet_csk_listen_stop(sk
);
2153 goto adjudge_to_death
;
2156 /* We need to flush the recv. buffs. We do this only on the
2157 * descriptor close, not protocol-sourced closes, because the
2158 * reader process may not have drained the data yet!
2160 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
2161 u32 len
= TCP_SKB_CB(skb
)->end_seq
- TCP_SKB_CB(skb
)->seq
-
2163 data_was_unread
+= len
;
2169 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2170 if (sk
->sk_state
== TCP_CLOSE
)
2171 goto adjudge_to_death
;
2173 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2174 * data was lost. To witness the awful effects of the old behavior of
2175 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2176 * GET in an FTP client, suspend the process, wait for the client to
2177 * advertise a zero window, then kill -9 the FTP client, wheee...
2178 * Note: timeout is always zero in such a case.
2180 if (unlikely(tcp_sk(sk
)->repair
)) {
2181 sk
->sk_prot
->disconnect(sk
, 0);
2182 } else if (data_was_unread
) {
2183 /* Unread data was tossed, zap the connection. */
2184 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPABORTONCLOSE
);
2185 tcp_set_state(sk
, TCP_CLOSE
);
2186 tcp_send_active_reset(sk
, sk
->sk_allocation
);
2187 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
2188 /* Check zero linger _after_ checking for unread data. */
2189 sk
->sk_prot
->disconnect(sk
, 0);
2190 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPABORTONDATA
);
2191 } else if (tcp_close_state(sk
)) {
2192 /* We FIN if the application ate all the data before
2193 * zapping the connection.
2196 /* RED-PEN. Formally speaking, we have broken TCP state
2197 * machine. State transitions:
2199 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2200 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2201 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2203 * are legal only when FIN has been sent (i.e. in window),
2204 * rather than queued out of window. Purists blame.
2206 * F.e. "RFC state" is ESTABLISHED,
2207 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2209 * The visible declinations are that sometimes
2210 * we enter time-wait state, when it is not required really
2211 * (harmless), do not send active resets, when they are
2212 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2213 * they look as CLOSING or LAST_ACK for Linux)
2214 * Probably, I missed some more holelets.
2216 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2217 * in a single packet! (May consider it later but will
2218 * probably need API support or TCP_CORK SYN-ACK until
2219 * data is written and socket is closed.)
2224 sk_stream_wait_close(sk
, timeout
);
2227 state
= sk
->sk_state
;
2231 /* It is the last release_sock in its life. It will remove backlog. */
2235 /* Now socket is owned by kernel and we acquire BH lock
2236 to finish close. No need to check for user refs.
2240 WARN_ON(sock_owned_by_user(sk
));
2242 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
2244 /* Have we already been destroyed by a softirq or backlog? */
2245 if (state
!= TCP_CLOSE
&& sk
->sk_state
== TCP_CLOSE
)
2248 /* This is a (useful) BSD violating of the RFC. There is a
2249 * problem with TCP as specified in that the other end could
2250 * keep a socket open forever with no application left this end.
2251 * We use a 1 minute timeout (about the same as BSD) then kill
2252 * our end. If they send after that then tough - BUT: long enough
2253 * that we won't make the old 4*rto = almost no time - whoops
2256 * Nope, it was not mistake. It is really desired behaviour
2257 * f.e. on http servers, when such sockets are useless, but
2258 * consume significant resources. Let's do it with special
2259 * linger2 option. --ANK
2262 if (sk
->sk_state
== TCP_FIN_WAIT2
) {
2263 struct tcp_sock
*tp
= tcp_sk(sk
);
2264 if (tp
->linger2
< 0) {
2265 tcp_set_state(sk
, TCP_CLOSE
);
2266 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2267 NET_INC_STATS_BH(sock_net(sk
),
2268 LINUX_MIB_TCPABORTONLINGER
);
2270 const int tmo
= tcp_fin_time(sk
);
2272 if (tmo
> TCP_TIMEWAIT_LEN
) {
2273 inet_csk_reset_keepalive_timer(sk
,
2274 tmo
- TCP_TIMEWAIT_LEN
);
2276 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
2281 if (sk
->sk_state
!= TCP_CLOSE
) {
2283 if (tcp_check_oom(sk
, 0)) {
2284 tcp_set_state(sk
, TCP_CLOSE
);
2285 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2286 NET_INC_STATS_BH(sock_net(sk
),
2287 LINUX_MIB_TCPABORTONMEMORY
);
2291 if (sk
->sk_state
== TCP_CLOSE
) {
2292 struct request_sock
*req
= tcp_sk(sk
)->fastopen_rsk
;
2293 /* We could get here with a non-NULL req if the socket is
2294 * aborted (e.g., closed with unread data) before 3WHS
2298 reqsk_fastopen_remove(sk
, req
, false);
2299 inet_csk_destroy_sock(sk
);
2301 /* Otherwise, socket is reprieved until protocol close. */
2308 EXPORT_SYMBOL(tcp_close
);
2310 /* These states need RST on ABORT according to RFC793 */
2312 static inline bool tcp_need_reset(int state
)
2314 return (1 << state
) &
2315 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
| TCPF_FIN_WAIT1
|
2316 TCPF_FIN_WAIT2
| TCPF_SYN_RECV
);
2319 int tcp_disconnect(struct sock
*sk
, int flags
)
2321 struct inet_sock
*inet
= inet_sk(sk
);
2322 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2323 struct tcp_sock
*tp
= tcp_sk(sk
);
2325 int old_state
= sk
->sk_state
;
2327 if (old_state
!= TCP_CLOSE
)
2328 tcp_set_state(sk
, TCP_CLOSE
);
2330 /* ABORT function of RFC793 */
2331 if (old_state
== TCP_LISTEN
) {
2332 inet_csk_listen_stop(sk
);
2333 } else if (unlikely(tp
->repair
)) {
2334 sk
->sk_err
= ECONNABORTED
;
2335 } else if (tcp_need_reset(old_state
) ||
2336 (tp
->snd_nxt
!= tp
->write_seq
&&
2337 (1 << old_state
) & (TCPF_CLOSING
| TCPF_LAST_ACK
))) {
2338 /* The last check adjusts for discrepancy of Linux wrt. RFC
2341 tcp_send_active_reset(sk
, gfp_any());
2342 sk
->sk_err
= ECONNRESET
;
2343 } else if (old_state
== TCP_SYN_SENT
)
2344 sk
->sk_err
= ECONNRESET
;
2346 tcp_clear_xmit_timers(sk
);
2347 __skb_queue_purge(&sk
->sk_receive_queue
);
2348 tcp_write_queue_purge(sk
);
2349 __skb_queue_purge(&tp
->out_of_order_queue
);
2350 #ifdef CONFIG_NET_DMA
2351 __skb_queue_purge(&sk
->sk_async_wait_queue
);
2354 inet
->inet_dport
= 0;
2356 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
2357 inet_reset_saddr(sk
);
2359 sk
->sk_shutdown
= 0;
2360 sock_reset_flag(sk
, SOCK_DONE
);
2362 if ((tp
->write_seq
+= tp
->max_window
+ 2) == 0)
2364 icsk
->icsk_backoff
= 0;
2366 icsk
->icsk_probes_out
= 0;
2367 tp
->packets_out
= 0;
2368 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
2369 tp
->snd_cwnd_cnt
= 0;
2370 tp
->window_clamp
= 0;
2371 tcp_set_ca_state(sk
, TCP_CA_Open
);
2372 tcp_clear_retrans(tp
);
2373 inet_csk_delack_init(sk
);
2374 tcp_init_send_head(sk
);
2375 memset(&tp
->rx_opt
, 0, sizeof(tp
->rx_opt
));
2378 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
2380 sk
->sk_error_report(sk
);
2383 EXPORT_SYMBOL(tcp_disconnect
);
2385 void tcp_sock_destruct(struct sock
*sk
)
2387 inet_sock_destruct(sk
);
2389 kfree(inet_csk(sk
)->icsk_accept_queue
.fastopenq
);
2392 static inline bool tcp_can_repair_sock(const struct sock
*sk
)
2394 return ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
) &&
2395 ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_ESTABLISHED
));
2398 static int tcp_repair_options_est(struct tcp_sock
*tp
,
2399 struct tcp_repair_opt __user
*optbuf
, unsigned int len
)
2401 struct tcp_repair_opt opt
;
2403 while (len
>= sizeof(opt
)) {
2404 if (copy_from_user(&opt
, optbuf
, sizeof(opt
)))
2410 switch (opt
.opt_code
) {
2412 tp
->rx_opt
.mss_clamp
= opt
.opt_val
;
2416 u16 snd_wscale
= opt
.opt_val
& 0xFFFF;
2417 u16 rcv_wscale
= opt
.opt_val
>> 16;
2419 if (snd_wscale
> 14 || rcv_wscale
> 14)
2422 tp
->rx_opt
.snd_wscale
= snd_wscale
;
2423 tp
->rx_opt
.rcv_wscale
= rcv_wscale
;
2424 tp
->rx_opt
.wscale_ok
= 1;
2427 case TCPOPT_SACK_PERM
:
2428 if (opt
.opt_val
!= 0)
2431 tp
->rx_opt
.sack_ok
|= TCP_SACK_SEEN
;
2432 if (sysctl_tcp_fack
)
2433 tcp_enable_fack(tp
);
2435 case TCPOPT_TIMESTAMP
:
2436 if (opt
.opt_val
!= 0)
2439 tp
->rx_opt
.tstamp_ok
= 1;
2448 * Socket option code for TCP.
2450 static int do_tcp_setsockopt(struct sock
*sk
, int level
,
2451 int optname
, char __user
*optval
, unsigned int optlen
)
2453 struct tcp_sock
*tp
= tcp_sk(sk
);
2454 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2458 /* These are data/string values, all the others are ints */
2460 case TCP_CONGESTION
: {
2461 char name
[TCP_CA_NAME_MAX
];
2466 val
= strncpy_from_user(name
, optval
,
2467 min_t(long, TCP_CA_NAME_MAX
-1, optlen
));
2473 err
= tcp_set_congestion_control(sk
, name
);
2482 if (optlen
< sizeof(int))
2485 if (get_user(val
, (int __user
*)optval
))
2492 /* Values greater than interface MTU won't take effect. However
2493 * at the point when this call is done we typically don't yet
2494 * know which interface is going to be used */
2495 if (val
< TCP_MIN_MSS
|| val
> MAX_TCP_WINDOW
) {
2499 tp
->rx_opt
.user_mss
= val
;
2504 /* TCP_NODELAY is weaker than TCP_CORK, so that
2505 * this option on corked socket is remembered, but
2506 * it is not activated until cork is cleared.
2508 * However, when TCP_NODELAY is set we make
2509 * an explicit push, which overrides even TCP_CORK
2510 * for currently queued segments.
2512 tp
->nonagle
|= TCP_NAGLE_OFF
|TCP_NAGLE_PUSH
;
2513 tcp_push_pending_frames(sk
);
2515 tp
->nonagle
&= ~TCP_NAGLE_OFF
;
2519 case TCP_THIN_LINEAR_TIMEOUTS
:
2520 if (val
< 0 || val
> 1)
2526 case TCP_THIN_DUPACK
:
2527 if (val
< 0 || val
> 1)
2530 tp
->thin_dupack
= val
;
2531 if (tp
->thin_dupack
)
2532 tcp_disable_early_retrans(tp
);
2537 if (!tcp_can_repair_sock(sk
))
2539 else if (val
== 1) {
2541 sk
->sk_reuse
= SK_FORCE_REUSE
;
2542 tp
->repair_queue
= TCP_NO_QUEUE
;
2543 } else if (val
== 0) {
2545 sk
->sk_reuse
= SK_NO_REUSE
;
2546 tcp_send_window_probe(sk
);
2552 case TCP_REPAIR_QUEUE
:
2555 else if (val
< TCP_QUEUES_NR
)
2556 tp
->repair_queue
= val
;
2562 if (sk
->sk_state
!= TCP_CLOSE
)
2564 else if (tp
->repair_queue
== TCP_SEND_QUEUE
)
2565 tp
->write_seq
= val
;
2566 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
2572 case TCP_REPAIR_OPTIONS
:
2575 else if (sk
->sk_state
== TCP_ESTABLISHED
)
2576 err
= tcp_repair_options_est(tp
,
2577 (struct tcp_repair_opt __user
*)optval
,
2584 /* When set indicates to always queue non-full frames.
2585 * Later the user clears this option and we transmit
2586 * any pending partial frames in the queue. This is
2587 * meant to be used alongside sendfile() to get properly
2588 * filled frames when the user (for example) must write
2589 * out headers with a write() call first and then use
2590 * sendfile to send out the data parts.
2592 * TCP_CORK can be set together with TCP_NODELAY and it is
2593 * stronger than TCP_NODELAY.
2596 tp
->nonagle
|= TCP_NAGLE_CORK
;
2598 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
2599 if (tp
->nonagle
&TCP_NAGLE_OFF
)
2600 tp
->nonagle
|= TCP_NAGLE_PUSH
;
2601 tcp_push_pending_frames(sk
);
2606 if (val
< 1 || val
> MAX_TCP_KEEPIDLE
)
2609 tp
->keepalive_time
= val
* HZ
;
2610 if (sock_flag(sk
, SOCK_KEEPOPEN
) &&
2611 !((1 << sk
->sk_state
) &
2612 (TCPF_CLOSE
| TCPF_LISTEN
))) {
2613 u32 elapsed
= keepalive_time_elapsed(tp
);
2614 if (tp
->keepalive_time
> elapsed
)
2615 elapsed
= tp
->keepalive_time
- elapsed
;
2618 inet_csk_reset_keepalive_timer(sk
, elapsed
);
2623 if (val
< 1 || val
> MAX_TCP_KEEPINTVL
)
2626 tp
->keepalive_intvl
= val
* HZ
;
2629 if (val
< 1 || val
> MAX_TCP_KEEPCNT
)
2632 tp
->keepalive_probes
= val
;
2635 if (val
< 1 || val
> MAX_TCP_SYNCNT
)
2638 icsk
->icsk_syn_retries
= val
;
2644 else if (val
> sysctl_tcp_fin_timeout
/ HZ
)
2647 tp
->linger2
= val
* HZ
;
2650 case TCP_DEFER_ACCEPT
:
2651 /* Translate value in seconds to number of retransmits */
2652 icsk
->icsk_accept_queue
.rskq_defer_accept
=
2653 secs_to_retrans(val
, TCP_TIMEOUT_INIT
/ HZ
,
2657 case TCP_WINDOW_CLAMP
:
2659 if (sk
->sk_state
!= TCP_CLOSE
) {
2663 tp
->window_clamp
= 0;
2665 tp
->window_clamp
= val
< SOCK_MIN_RCVBUF
/ 2 ?
2666 SOCK_MIN_RCVBUF
/ 2 : val
;
2671 icsk
->icsk_ack
.pingpong
= 1;
2673 icsk
->icsk_ack
.pingpong
= 0;
2674 if ((1 << sk
->sk_state
) &
2675 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
) &&
2676 inet_csk_ack_scheduled(sk
)) {
2677 icsk
->icsk_ack
.pending
|= ICSK_ACK_PUSHED
;
2678 tcp_cleanup_rbuf(sk
, 1);
2680 icsk
->icsk_ack
.pingpong
= 1;
2685 #ifdef CONFIG_TCP_MD5SIG
2687 /* Read the IP->Key mappings from userspace */
2688 err
= tp
->af_specific
->md5_parse(sk
, optval
, optlen
);
2691 case TCP_USER_TIMEOUT
:
2692 /* Cap the max timeout in ms TCP will retry/retrans
2693 * before giving up and aborting (ETIMEDOUT) a connection.
2698 icsk
->icsk_user_timeout
= msecs_to_jiffies(val
);
2702 if (val
>= 0 && ((1 << sk
->sk_state
) & (TCPF_CLOSE
|
2704 err
= fastopen_init_queue(sk
, val
);
2712 tp
->tsoffset
= val
- tcp_time_stamp
;
2714 case TCP_NOTSENT_LOWAT
:
2715 tp
->notsent_lowat
= val
;
2716 sk
->sk_write_space(sk
);
2727 int tcp_setsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2728 unsigned int optlen
)
2730 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2732 if (level
!= SOL_TCP
)
2733 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
2735 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
2737 EXPORT_SYMBOL(tcp_setsockopt
);
2739 #ifdef CONFIG_COMPAT
2740 int compat_tcp_setsockopt(struct sock
*sk
, int level
, int optname
,
2741 char __user
*optval
, unsigned int optlen
)
2743 if (level
!= SOL_TCP
)
2744 return inet_csk_compat_setsockopt(sk
, level
, optname
,
2746 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
2748 EXPORT_SYMBOL(compat_tcp_setsockopt
);
2751 /* Return information about state of tcp endpoint in API format. */
2752 void tcp_get_info(const struct sock
*sk
, struct tcp_info
*info
)
2754 const struct tcp_sock
*tp
= tcp_sk(sk
);
2755 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2756 u32 now
= tcp_time_stamp
;
2758 memset(info
, 0, sizeof(*info
));
2760 info
->tcpi_state
= sk
->sk_state
;
2761 info
->tcpi_ca_state
= icsk
->icsk_ca_state
;
2762 info
->tcpi_retransmits
= icsk
->icsk_retransmits
;
2763 info
->tcpi_probes
= icsk
->icsk_probes_out
;
2764 info
->tcpi_backoff
= icsk
->icsk_backoff
;
2766 if (tp
->rx_opt
.tstamp_ok
)
2767 info
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
2768 if (tcp_is_sack(tp
))
2769 info
->tcpi_options
|= TCPI_OPT_SACK
;
2770 if (tp
->rx_opt
.wscale_ok
) {
2771 info
->tcpi_options
|= TCPI_OPT_WSCALE
;
2772 info
->tcpi_snd_wscale
= tp
->rx_opt
.snd_wscale
;
2773 info
->tcpi_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
2776 if (tp
->ecn_flags
& TCP_ECN_OK
)
2777 info
->tcpi_options
|= TCPI_OPT_ECN
;
2778 if (tp
->ecn_flags
& TCP_ECN_SEEN
)
2779 info
->tcpi_options
|= TCPI_OPT_ECN_SEEN
;
2780 if (tp
->syn_data_acked
)
2781 info
->tcpi_options
|= TCPI_OPT_SYN_DATA
;
2783 info
->tcpi_rto
= jiffies_to_usecs(icsk
->icsk_rto
);
2784 info
->tcpi_ato
= jiffies_to_usecs(icsk
->icsk_ack
.ato
);
2785 info
->tcpi_snd_mss
= tp
->mss_cache
;
2786 info
->tcpi_rcv_mss
= icsk
->icsk_ack
.rcv_mss
;
2788 if (sk
->sk_state
== TCP_LISTEN
) {
2789 info
->tcpi_unacked
= sk
->sk_ack_backlog
;
2790 info
->tcpi_sacked
= sk
->sk_max_ack_backlog
;
2792 info
->tcpi_unacked
= tp
->packets_out
;
2793 info
->tcpi_sacked
= tp
->sacked_out
;
2795 info
->tcpi_lost
= tp
->lost_out
;
2796 info
->tcpi_retrans
= tp
->retrans_out
;
2797 info
->tcpi_fackets
= tp
->fackets_out
;
2799 info
->tcpi_last_data_sent
= jiffies_to_msecs(now
- tp
->lsndtime
);
2800 info
->tcpi_last_data_recv
= jiffies_to_msecs(now
- icsk
->icsk_ack
.lrcvtime
);
2801 info
->tcpi_last_ack_recv
= jiffies_to_msecs(now
- tp
->rcv_tstamp
);
2803 info
->tcpi_pmtu
= icsk
->icsk_pmtu_cookie
;
2804 info
->tcpi_rcv_ssthresh
= tp
->rcv_ssthresh
;
2805 info
->tcpi_rtt
= tp
->srtt_us
>> 3;
2806 info
->tcpi_rttvar
= tp
->mdev_us
>> 2;
2807 info
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
2808 info
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
2809 info
->tcpi_advmss
= tp
->advmss
;
2810 info
->tcpi_reordering
= tp
->reordering
;
2812 info
->tcpi_rcv_rtt
= jiffies_to_usecs(tp
->rcv_rtt_est
.rtt
)>>3;
2813 info
->tcpi_rcv_space
= tp
->rcvq_space
.space
;
2815 info
->tcpi_total_retrans
= tp
->total_retrans
;
2817 info
->tcpi_pacing_rate
= sk
->sk_pacing_rate
!= ~0U ?
2818 sk
->sk_pacing_rate
: ~0ULL;
2819 info
->tcpi_max_pacing_rate
= sk
->sk_max_pacing_rate
!= ~0U ?
2820 sk
->sk_max_pacing_rate
: ~0ULL;
2822 EXPORT_SYMBOL_GPL(tcp_get_info
);
2824 static int do_tcp_getsockopt(struct sock
*sk
, int level
,
2825 int optname
, char __user
*optval
, int __user
*optlen
)
2827 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2828 struct tcp_sock
*tp
= tcp_sk(sk
);
2831 if (get_user(len
, optlen
))
2834 len
= min_t(unsigned int, len
, sizeof(int));
2841 val
= tp
->mss_cache
;
2842 if (!val
&& ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2843 val
= tp
->rx_opt
.user_mss
;
2845 val
= tp
->rx_opt
.mss_clamp
;
2848 val
= !!(tp
->nonagle
&TCP_NAGLE_OFF
);
2851 val
= !!(tp
->nonagle
&TCP_NAGLE_CORK
);
2854 val
= keepalive_time_when(tp
) / HZ
;
2857 val
= keepalive_intvl_when(tp
) / HZ
;
2860 val
= keepalive_probes(tp
);
2863 val
= icsk
->icsk_syn_retries
? : sysctl_tcp_syn_retries
;
2868 val
= (val
? : sysctl_tcp_fin_timeout
) / HZ
;
2870 case TCP_DEFER_ACCEPT
:
2871 val
= retrans_to_secs(icsk
->icsk_accept_queue
.rskq_defer_accept
,
2872 TCP_TIMEOUT_INIT
/ HZ
, TCP_RTO_MAX
/ HZ
);
2874 case TCP_WINDOW_CLAMP
:
2875 val
= tp
->window_clamp
;
2878 struct tcp_info info
;
2880 if (get_user(len
, optlen
))
2883 tcp_get_info(sk
, &info
);
2885 len
= min_t(unsigned int, len
, sizeof(info
));
2886 if (put_user(len
, optlen
))
2888 if (copy_to_user(optval
, &info
, len
))
2893 val
= !icsk
->icsk_ack
.pingpong
;
2896 case TCP_CONGESTION
:
2897 if (get_user(len
, optlen
))
2899 len
= min_t(unsigned int, len
, TCP_CA_NAME_MAX
);
2900 if (put_user(len
, optlen
))
2902 if (copy_to_user(optval
, icsk
->icsk_ca_ops
->name
, len
))
2906 case TCP_THIN_LINEAR_TIMEOUTS
:
2909 case TCP_THIN_DUPACK
:
2910 val
= tp
->thin_dupack
;
2917 case TCP_REPAIR_QUEUE
:
2919 val
= tp
->repair_queue
;
2925 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
2926 val
= tp
->write_seq
;
2927 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
2933 case TCP_USER_TIMEOUT
:
2934 val
= jiffies_to_msecs(icsk
->icsk_user_timeout
);
2938 if (icsk
->icsk_accept_queue
.fastopenq
!= NULL
)
2939 val
= icsk
->icsk_accept_queue
.fastopenq
->max_qlen
;
2945 val
= tcp_time_stamp
+ tp
->tsoffset
;
2947 case TCP_NOTSENT_LOWAT
:
2948 val
= tp
->notsent_lowat
;
2951 return -ENOPROTOOPT
;
2954 if (put_user(len
, optlen
))
2956 if (copy_to_user(optval
, &val
, len
))
2961 int tcp_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2964 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2966 if (level
!= SOL_TCP
)
2967 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
2969 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
2971 EXPORT_SYMBOL(tcp_getsockopt
);
2973 #ifdef CONFIG_COMPAT
2974 int compat_tcp_getsockopt(struct sock
*sk
, int level
, int optname
,
2975 char __user
*optval
, int __user
*optlen
)
2977 if (level
!= SOL_TCP
)
2978 return inet_csk_compat_getsockopt(sk
, level
, optname
,
2980 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
2982 EXPORT_SYMBOL(compat_tcp_getsockopt
);
2985 #ifdef CONFIG_TCP_MD5SIG
2986 static struct tcp_md5sig_pool __percpu
*tcp_md5sig_pool __read_mostly
;
2987 static DEFINE_MUTEX(tcp_md5sig_mutex
);
2989 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu
*pool
)
2993 for_each_possible_cpu(cpu
) {
2994 struct tcp_md5sig_pool
*p
= per_cpu_ptr(pool
, cpu
);
2996 if (p
->md5_desc
.tfm
)
2997 crypto_free_hash(p
->md5_desc
.tfm
);
3002 static void __tcp_alloc_md5sig_pool(void)
3005 struct tcp_md5sig_pool __percpu
*pool
;
3007 pool
= alloc_percpu(struct tcp_md5sig_pool
);
3011 for_each_possible_cpu(cpu
) {
3012 struct crypto_hash
*hash
;
3014 hash
= crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC
);
3015 if (IS_ERR_OR_NULL(hash
))
3018 per_cpu_ptr(pool
, cpu
)->md5_desc
.tfm
= hash
;
3020 /* before setting tcp_md5sig_pool, we must commit all writes
3021 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
3024 tcp_md5sig_pool
= pool
;
3027 __tcp_free_md5sig_pool(pool
);
3030 bool tcp_alloc_md5sig_pool(void)
3032 if (unlikely(!tcp_md5sig_pool
)) {
3033 mutex_lock(&tcp_md5sig_mutex
);
3035 if (!tcp_md5sig_pool
)
3036 __tcp_alloc_md5sig_pool();
3038 mutex_unlock(&tcp_md5sig_mutex
);
3040 return tcp_md5sig_pool
!= NULL
;
3042 EXPORT_SYMBOL(tcp_alloc_md5sig_pool
);
3046 * tcp_get_md5sig_pool - get md5sig_pool for this user
3048 * We use percpu structure, so if we succeed, we exit with preemption
3049 * and BH disabled, to make sure another thread or softirq handling
3050 * wont try to get same context.
3052 struct tcp_md5sig_pool
*tcp_get_md5sig_pool(void)
3054 struct tcp_md5sig_pool __percpu
*p
;
3057 p
= ACCESS_ONCE(tcp_md5sig_pool
);
3059 return __this_cpu_ptr(p
);
3064 EXPORT_SYMBOL(tcp_get_md5sig_pool
);
3066 int tcp_md5_hash_header(struct tcp_md5sig_pool
*hp
,
3067 const struct tcphdr
*th
)
3069 struct scatterlist sg
;
3073 /* We are not allowed to change tcphdr, make a local copy */
3074 memcpy(&hdr
, th
, sizeof(hdr
));
3077 /* options aren't included in the hash */
3078 sg_init_one(&sg
, &hdr
, sizeof(hdr
));
3079 err
= crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(hdr
));
3082 EXPORT_SYMBOL(tcp_md5_hash_header
);
3084 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool
*hp
,
3085 const struct sk_buff
*skb
, unsigned int header_len
)
3087 struct scatterlist sg
;
3088 const struct tcphdr
*tp
= tcp_hdr(skb
);
3089 struct hash_desc
*desc
= &hp
->md5_desc
;
3091 const unsigned int head_data_len
= skb_headlen(skb
) > header_len
?
3092 skb_headlen(skb
) - header_len
: 0;
3093 const struct skb_shared_info
*shi
= skb_shinfo(skb
);
3094 struct sk_buff
*frag_iter
;
3096 sg_init_table(&sg
, 1);
3098 sg_set_buf(&sg
, ((u8
*) tp
) + header_len
, head_data_len
);
3099 if (crypto_hash_update(desc
, &sg
, head_data_len
))
3102 for (i
= 0; i
< shi
->nr_frags
; ++i
) {
3103 const struct skb_frag_struct
*f
= &shi
->frags
[i
];
3104 unsigned int offset
= f
->page_offset
;
3105 struct page
*page
= skb_frag_page(f
) + (offset
>> PAGE_SHIFT
);
3107 sg_set_page(&sg
, page
, skb_frag_size(f
),
3108 offset_in_page(offset
));
3109 if (crypto_hash_update(desc
, &sg
, skb_frag_size(f
)))
3113 skb_walk_frags(skb
, frag_iter
)
3114 if (tcp_md5_hash_skb_data(hp
, frag_iter
, 0))
3119 EXPORT_SYMBOL(tcp_md5_hash_skb_data
);
3121 int tcp_md5_hash_key(struct tcp_md5sig_pool
*hp
, const struct tcp_md5sig_key
*key
)
3123 struct scatterlist sg
;
3125 sg_init_one(&sg
, key
->key
, key
->keylen
);
3126 return crypto_hash_update(&hp
->md5_desc
, &sg
, key
->keylen
);
3128 EXPORT_SYMBOL(tcp_md5_hash_key
);
3132 void tcp_done(struct sock
*sk
)
3134 struct request_sock
*req
= tcp_sk(sk
)->fastopen_rsk
;
3136 if (sk
->sk_state
== TCP_SYN_SENT
|| sk
->sk_state
== TCP_SYN_RECV
)
3137 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_ATTEMPTFAILS
);
3139 tcp_set_state(sk
, TCP_CLOSE
);
3140 tcp_clear_xmit_timers(sk
);
3142 reqsk_fastopen_remove(sk
, req
, false);
3144 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3146 if (!sock_flag(sk
, SOCK_DEAD
))
3147 sk
->sk_state_change(sk
);
3149 inet_csk_destroy_sock(sk
);
3151 EXPORT_SYMBOL_GPL(tcp_done
);
3153 extern struct tcp_congestion_ops tcp_reno
;
3155 static __initdata
unsigned long thash_entries
;
3156 static int __init
set_thash_entries(char *str
)
3163 ret
= kstrtoul(str
, 0, &thash_entries
);
3169 __setup("thash_entries=", set_thash_entries
);
3171 static void tcp_init_mem(void)
3173 unsigned long limit
= nr_free_buffer_pages() / 8;
3174 limit
= max(limit
, 128UL);
3175 sysctl_tcp_mem
[0] = limit
/ 4 * 3;
3176 sysctl_tcp_mem
[1] = limit
;
3177 sysctl_tcp_mem
[2] = sysctl_tcp_mem
[0] * 2;
3180 void __init
tcp_init(void)
3182 struct sk_buff
*skb
= NULL
;
3183 unsigned long limit
;
3184 int max_rshare
, max_wshare
, cnt
;
3187 BUILD_BUG_ON(sizeof(struct tcp_skb_cb
) > sizeof(skb
->cb
));
3189 percpu_counter_init(&tcp_sockets_allocated
, 0);
3190 percpu_counter_init(&tcp_orphan_count
, 0);
3191 tcp_hashinfo
.bind_bucket_cachep
=
3192 kmem_cache_create("tcp_bind_bucket",
3193 sizeof(struct inet_bind_bucket
), 0,
3194 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3196 /* Size and allocate the main established and bind bucket
3199 * The methodology is similar to that of the buffer cache.
3201 tcp_hashinfo
.ehash
=
3202 alloc_large_system_hash("TCP established",
3203 sizeof(struct inet_ehash_bucket
),
3205 17, /* one slot per 128 KB of memory */
3208 &tcp_hashinfo
.ehash_mask
,
3210 thash_entries
? 0 : 512 * 1024);
3211 for (i
= 0; i
<= tcp_hashinfo
.ehash_mask
; i
++)
3212 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].chain
, i
);
3214 if (inet_ehash_locks_alloc(&tcp_hashinfo
))
3215 panic("TCP: failed to alloc ehash_locks");
3216 tcp_hashinfo
.bhash
=
3217 alloc_large_system_hash("TCP bind",
3218 sizeof(struct inet_bind_hashbucket
),
3219 tcp_hashinfo
.ehash_mask
+ 1,
3220 17, /* one slot per 128 KB of memory */
3222 &tcp_hashinfo
.bhash_size
,
3226 tcp_hashinfo
.bhash_size
= 1U << tcp_hashinfo
.bhash_size
;
3227 for (i
= 0; i
< tcp_hashinfo
.bhash_size
; i
++) {
3228 spin_lock_init(&tcp_hashinfo
.bhash
[i
].lock
);
3229 INIT_HLIST_HEAD(&tcp_hashinfo
.bhash
[i
].chain
);
3233 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
3235 tcp_death_row
.sysctl_max_tw_buckets
= cnt
/ 2;
3236 sysctl_tcp_max_orphans
= cnt
/ 2;
3237 sysctl_max_syn_backlog
= max(128, cnt
/ 256);
3240 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3241 limit
= nr_free_buffer_pages() << (PAGE_SHIFT
- 7);
3242 max_wshare
= min(4UL*1024*1024, limit
);
3243 max_rshare
= min(6UL*1024*1024, limit
);
3245 sysctl_tcp_wmem
[0] = SK_MEM_QUANTUM
;
3246 sysctl_tcp_wmem
[1] = 16*1024;
3247 sysctl_tcp_wmem
[2] = max(64*1024, max_wshare
);
3249 sysctl_tcp_rmem
[0] = SK_MEM_QUANTUM
;
3250 sysctl_tcp_rmem
[1] = 87380;
3251 sysctl_tcp_rmem
[2] = max(87380, max_rshare
);
3253 pr_info("Hash tables configured (established %u bind %u)\n",
3254 tcp_hashinfo
.ehash_mask
+ 1, tcp_hashinfo
.bhash_size
);
3258 tcp_register_congestion_control(&tcp_reno
);