2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
55 * Alan Cox : Tidied tcp_data to avoid a potential
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
213 * Description of States:
215 * TCP_SYN_SENT sent a connection request, waiting for ack
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
220 * TCP_ESTABLISHED connection established
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
245 * TCP_CLOSE socket is finished
248 #include <linux/kernel.h>
249 #include <linux/module.h>
250 #include <linux/types.h>
251 #include <linux/fcntl.h>
252 #include <linux/poll.h>
253 #include <linux/init.h>
254 #include <linux/fs.h>
255 #include <linux/skbuff.h>
256 #include <linux/scatterlist.h>
257 #include <linux/splice.h>
258 #include <linux/net.h>
259 #include <linux/socket.h>
260 #include <linux/random.h>
261 #include <linux/bootmem.h>
262 #include <linux/highmem.h>
263 #include <linux/swap.h>
264 #include <linux/cache.h>
265 #include <linux/err.h>
266 #include <linux/crypto.h>
268 #include <net/icmp.h>
270 #include <net/xfrm.h>
272 #include <net/netdma.h>
273 #include <net/sock.h>
275 #include <asm/uaccess.h>
276 #include <asm/ioctls.h>
278 int sysctl_tcp_fin_timeout __read_mostly
= TCP_FIN_TIMEOUT
;
280 atomic_t tcp_orphan_count
= ATOMIC_INIT(0);
282 EXPORT_SYMBOL_GPL(tcp_orphan_count
);
284 int sysctl_tcp_mem
[3] __read_mostly
;
285 int sysctl_tcp_wmem
[3] __read_mostly
;
286 int sysctl_tcp_rmem
[3] __read_mostly
;
288 EXPORT_SYMBOL(sysctl_tcp_mem
);
289 EXPORT_SYMBOL(sysctl_tcp_rmem
);
290 EXPORT_SYMBOL(sysctl_tcp_wmem
);
292 atomic_t tcp_memory_allocated
; /* Current allocated memory. */
293 EXPORT_SYMBOL(tcp_memory_allocated
);
296 * Current number of TCP sockets.
298 struct percpu_counter tcp_sockets_allocated
;
299 EXPORT_SYMBOL(tcp_sockets_allocated
);
304 struct tcp_splice_state
{
305 struct pipe_inode_info
*pipe
;
311 * Pressure flag: try to collapse.
312 * Technical note: it is used by multiple contexts non atomically.
313 * All the __sk_mem_schedule() is of this nature: accounting
314 * is strict, actions are advisory and have some latency.
316 int tcp_memory_pressure __read_mostly
;
318 EXPORT_SYMBOL(tcp_memory_pressure
);
320 void tcp_enter_memory_pressure(struct sock
*sk
)
322 if (!tcp_memory_pressure
) {
323 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURES
);
324 tcp_memory_pressure
= 1;
328 EXPORT_SYMBOL(tcp_enter_memory_pressure
);
331 * Wait for a TCP event.
333 * Note that we don't need to lock the socket, as the upper poll layers
334 * take care of normal races (between the test and the event) and we don't
335 * go look at any of the socket buffers directly.
337 unsigned int tcp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
340 struct sock
*sk
= sock
->sk
;
341 struct tcp_sock
*tp
= tcp_sk(sk
);
343 poll_wait(file
, sk
->sk_sleep
, wait
);
344 if (sk
->sk_state
== TCP_LISTEN
)
345 return inet_csk_listen_poll(sk
);
347 /* Socket is not locked. We are protected from async events
348 * by poll logic and correct handling of state changes
349 * made by other threads is impossible in any case.
357 * POLLHUP is certainly not done right. But poll() doesn't
358 * have a notion of HUP in just one direction, and for a
359 * socket the read side is more interesting.
361 * Some poll() documentation says that POLLHUP is incompatible
362 * with the POLLOUT/POLLWR flags, so somebody should check this
363 * all. But careful, it tends to be safer to return too many
364 * bits than too few, and you can easily break real applications
365 * if you don't tell them that something has hung up!
369 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
370 * our fs/select.c). It means that after we received EOF,
371 * poll always returns immediately, making impossible poll() on write()
372 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
373 * if and only if shutdown has been made in both directions.
374 * Actually, it is interesting to look how Solaris and DUX
375 * solve this dilemma. I would prefer, if POLLHUP were maskable,
376 * then we could set it on SND_SHUTDOWN. BTW examples given
377 * in Stevens' books assume exactly this behaviour, it explains
378 * why POLLHUP is incompatible with POLLOUT. --ANK
380 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
381 * blocking on fresh not-connected or disconnected socket. --ANK
383 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== TCP_CLOSE
)
385 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
386 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
389 if ((1 << sk
->sk_state
) & ~(TCPF_SYN_SENT
| TCPF_SYN_RECV
)) {
390 int target
= sock_rcvlowat(sk
, 0, INT_MAX
);
392 if (tp
->urg_seq
== tp
->copied_seq
&&
393 !sock_flag(sk
, SOCK_URGINLINE
) &&
397 /* Potential race condition. If read of tp below will
398 * escape above sk->sk_state, we can be illegally awaken
399 * in SYN_* states. */
400 if (tp
->rcv_nxt
- tp
->copied_seq
>= target
)
401 mask
|= POLLIN
| POLLRDNORM
;
403 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
404 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
405 mask
|= POLLOUT
| POLLWRNORM
;
406 } else { /* send SIGIO later */
407 set_bit(SOCK_ASYNC_NOSPACE
,
408 &sk
->sk_socket
->flags
);
409 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
411 /* Race breaker. If space is freed after
412 * wspace test but before the flags are set,
413 * IO signal will be lost.
415 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
416 mask
|= POLLOUT
| POLLWRNORM
;
420 if (tp
->urg_data
& TCP_URG_VALID
)
426 int tcp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
428 struct tcp_sock
*tp
= tcp_sk(sk
);
433 if (sk
->sk_state
== TCP_LISTEN
)
437 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
439 else if (sock_flag(sk
, SOCK_URGINLINE
) ||
441 before(tp
->urg_seq
, tp
->copied_seq
) ||
442 !before(tp
->urg_seq
, tp
->rcv_nxt
)) {
443 answ
= tp
->rcv_nxt
- tp
->copied_seq
;
445 /* Subtract 1, if FIN is in queue. */
446 if (answ
&& !skb_queue_empty(&sk
->sk_receive_queue
))
448 tcp_hdr((struct sk_buff
*)sk
->sk_receive_queue
.prev
)->fin
;
450 answ
= tp
->urg_seq
- tp
->copied_seq
;
454 answ
= tp
->urg_data
&& tp
->urg_seq
== tp
->copied_seq
;
457 if (sk
->sk_state
== TCP_LISTEN
)
460 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
463 answ
= tp
->write_seq
- tp
->snd_una
;
469 return put_user(answ
, (int __user
*)arg
);
472 static inline void tcp_mark_push(struct tcp_sock
*tp
, struct sk_buff
*skb
)
474 TCP_SKB_CB(skb
)->flags
|= TCPCB_FLAG_PSH
;
475 tp
->pushed_seq
= tp
->write_seq
;
478 static inline int forced_push(struct tcp_sock
*tp
)
480 return after(tp
->write_seq
, tp
->pushed_seq
+ (tp
->max_window
>> 1));
483 static inline void skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
485 struct tcp_sock
*tp
= tcp_sk(sk
);
486 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
489 tcb
->seq
= tcb
->end_seq
= tp
->write_seq
;
490 tcb
->flags
= TCPCB_FLAG_ACK
;
492 skb_header_release(skb
);
493 tcp_add_write_queue_tail(sk
, skb
);
494 sk
->sk_wmem_queued
+= skb
->truesize
;
495 sk_mem_charge(sk
, skb
->truesize
);
496 if (tp
->nonagle
& TCP_NAGLE_PUSH
)
497 tp
->nonagle
&= ~TCP_NAGLE_PUSH
;
500 static inline void tcp_mark_urg(struct tcp_sock
*tp
, int flags
,
504 tp
->snd_up
= tp
->write_seq
;
507 static inline void tcp_push(struct sock
*sk
, int flags
, int mss_now
,
510 struct tcp_sock
*tp
= tcp_sk(sk
);
512 if (tcp_send_head(sk
)) {
513 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
514 if (!(flags
& MSG_MORE
) || forced_push(tp
))
515 tcp_mark_push(tp
, skb
);
516 tcp_mark_urg(tp
, flags
, skb
);
517 __tcp_push_pending_frames(sk
, mss_now
,
518 (flags
& MSG_MORE
) ? TCP_NAGLE_CORK
: nonagle
);
522 static int tcp_splice_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
523 unsigned int offset
, size_t len
)
525 struct tcp_splice_state
*tss
= rd_desc
->arg
.data
;
527 return skb_splice_bits(skb
, offset
, tss
->pipe
, tss
->len
, tss
->flags
);
530 static int __tcp_splice_read(struct sock
*sk
, struct tcp_splice_state
*tss
)
532 /* Store TCP splice context information in read_descriptor_t. */
533 read_descriptor_t rd_desc
= {
537 return tcp_read_sock(sk
, &rd_desc
, tcp_splice_data_recv
);
541 * tcp_splice_read - splice data from TCP socket to a pipe
542 * @sock: socket to splice from
543 * @ppos: position (not valid)
544 * @pipe: pipe to splice to
545 * @len: number of bytes to splice
546 * @flags: splice modifier flags
549 * Will read pages from given socket and fill them into a pipe.
552 ssize_t
tcp_splice_read(struct socket
*sock
, loff_t
*ppos
,
553 struct pipe_inode_info
*pipe
, size_t len
,
556 struct sock
*sk
= sock
->sk
;
557 struct tcp_splice_state tss
= {
567 * We can't seek on a socket input
576 timeo
= sock_rcvtimeo(sk
, flags
& SPLICE_F_NONBLOCK
);
578 ret
= __tcp_splice_read(sk
, &tss
);
584 if (flags
& SPLICE_F_NONBLOCK
) {
588 if (sock_flag(sk
, SOCK_DONE
))
591 ret
= sock_error(sk
);
594 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
596 if (sk
->sk_state
== TCP_CLOSE
) {
598 * This occurs when user tries to read
599 * from never connected socket.
601 if (!sock_flag(sk
, SOCK_DONE
))
609 sk_wait_data(sk
, &timeo
);
610 if (signal_pending(current
)) {
611 ret
= sock_intr_errno(timeo
);
622 if (sk
->sk_err
|| sk
->sk_state
== TCP_CLOSE
||
623 (sk
->sk_shutdown
& RCV_SHUTDOWN
) || !timeo
||
624 signal_pending(current
))
636 struct sk_buff
*sk_stream_alloc_skb(struct sock
*sk
, int size
, gfp_t gfp
)
640 /* The TCP header must be at least 32-bit aligned. */
641 size
= ALIGN(size
, 4);
643 skb
= alloc_skb_fclone(size
+ sk
->sk_prot
->max_header
, gfp
);
645 if (sk_wmem_schedule(sk
, skb
->truesize
)) {
647 * Make sure that we have exactly size bytes
648 * available to the caller, no more, no less.
650 skb_reserve(skb
, skb_tailroom(skb
) - size
);
655 sk
->sk_prot
->enter_memory_pressure(sk
);
656 sk_stream_moderate_sndbuf(sk
);
661 static ssize_t
do_tcp_sendpages(struct sock
*sk
, struct page
**pages
, int poffset
,
662 size_t psize
, int flags
)
664 struct tcp_sock
*tp
= tcp_sk(sk
);
665 int mss_now
, size_goal
;
668 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
670 /* Wait for a connection to finish. */
671 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
))
672 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
675 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
677 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
678 size_goal
= tp
->xmit_size_goal
;
682 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
686 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
687 struct page
*page
= pages
[poffset
/ PAGE_SIZE
];
688 int copy
, i
, can_coalesce
;
689 int offset
= poffset
% PAGE_SIZE
;
690 int size
= min_t(size_t, psize
, PAGE_SIZE
- offset
);
692 if (!tcp_send_head(sk
) || (copy
= size_goal
- skb
->len
) <= 0) {
694 if (!sk_stream_memory_free(sk
))
695 goto wait_for_sndbuf
;
697 skb
= sk_stream_alloc_skb(sk
, 0, sk
->sk_allocation
);
699 goto wait_for_memory
;
708 i
= skb_shinfo(skb
)->nr_frags
;
709 can_coalesce
= skb_can_coalesce(skb
, i
, page
, offset
);
710 if (!can_coalesce
&& i
>= MAX_SKB_FRAGS
) {
711 tcp_mark_push(tp
, skb
);
714 if (!sk_wmem_schedule(sk
, copy
))
715 goto wait_for_memory
;
718 skb_shinfo(skb
)->frags
[i
- 1].size
+= copy
;
721 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
725 skb
->data_len
+= copy
;
726 skb
->truesize
+= copy
;
727 sk
->sk_wmem_queued
+= copy
;
728 sk_mem_charge(sk
, copy
);
729 skb
->ip_summed
= CHECKSUM_PARTIAL
;
730 tp
->write_seq
+= copy
;
731 TCP_SKB_CB(skb
)->end_seq
+= copy
;
732 skb_shinfo(skb
)->gso_segs
= 0;
735 TCP_SKB_CB(skb
)->flags
&= ~TCPCB_FLAG_PSH
;
739 if (!(psize
-= copy
))
742 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
))
745 if (forced_push(tp
)) {
746 tcp_mark_push(tp
, skb
);
747 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
748 } else if (skb
== tcp_send_head(sk
))
749 tcp_push_one(sk
, mss_now
);
753 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
756 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
, TCP_NAGLE_PUSH
);
758 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
761 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
762 size_goal
= tp
->xmit_size_goal
;
767 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
);
774 return sk_stream_error(sk
, flags
, err
);
777 ssize_t
tcp_sendpage(struct socket
*sock
, struct page
*page
, int offset
,
778 size_t size
, int flags
)
781 struct sock
*sk
= sock
->sk
;
783 if (!(sk
->sk_route_caps
& NETIF_F_SG
) ||
784 !(sk
->sk_route_caps
& NETIF_F_ALL_CSUM
))
785 return sock_no_sendpage(sock
, page
, offset
, size
, flags
);
789 res
= do_tcp_sendpages(sk
, &page
, offset
, size
, flags
);
795 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
796 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
798 static inline int select_size(struct sock
*sk
)
800 struct tcp_sock
*tp
= tcp_sk(sk
);
801 int tmp
= tp
->mss_cache
;
803 if (sk
->sk_route_caps
& NETIF_F_SG
) {
807 int pgbreak
= SKB_MAX_HEAD(MAX_TCP_HEADER
);
809 if (tmp
>= pgbreak
&&
810 tmp
<= pgbreak
+ (MAX_SKB_FRAGS
- 1) * PAGE_SIZE
)
818 int tcp_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
821 struct sock
*sk
= sock
->sk
;
823 struct tcp_sock
*tp
= tcp_sk(sk
);
826 int mss_now
, size_goal
;
833 flags
= msg
->msg_flags
;
834 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
836 /* Wait for a connection to finish. */
837 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
))
838 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
841 /* This should be in poll */
842 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
844 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
845 size_goal
= tp
->xmit_size_goal
;
847 /* Ok commence sending. */
848 iovlen
= msg
->msg_iovlen
;
853 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
856 while (--iovlen
>= 0) {
857 int seglen
= iov
->iov_len
;
858 unsigned char __user
*from
= iov
->iov_base
;
865 skb
= tcp_write_queue_tail(sk
);
867 if (!tcp_send_head(sk
) ||
868 (copy
= size_goal
- skb
->len
) <= 0) {
871 /* Allocate new segment. If the interface is SG,
872 * allocate skb fitting to single page.
874 if (!sk_stream_memory_free(sk
))
875 goto wait_for_sndbuf
;
877 skb
= sk_stream_alloc_skb(sk
, select_size(sk
),
880 goto wait_for_memory
;
883 * Check whether we can use HW checksum.
885 if (sk
->sk_route_caps
& NETIF_F_ALL_CSUM
)
886 skb
->ip_summed
= CHECKSUM_PARTIAL
;
892 /* Try to append data to the end of skb. */
896 /* Where to copy to? */
897 if (skb_tailroom(skb
) > 0) {
898 /* We have some space in skb head. Superb! */
899 if (copy
> skb_tailroom(skb
))
900 copy
= skb_tailroom(skb
);
901 if ((err
= skb_add_data(skb
, from
, copy
)) != 0)
905 int i
= skb_shinfo(skb
)->nr_frags
;
906 struct page
*page
= TCP_PAGE(sk
);
907 int off
= TCP_OFF(sk
);
909 if (skb_can_coalesce(skb
, i
, page
, off
) &&
911 /* We can extend the last page
914 } else if (i
== MAX_SKB_FRAGS
||
916 !(sk
->sk_route_caps
& NETIF_F_SG
))) {
917 /* Need to add new fragment and cannot
918 * do this because interface is non-SG,
919 * or because all the page slots are
921 tcp_mark_push(tp
, skb
);
924 if (off
== PAGE_SIZE
) {
926 TCP_PAGE(sk
) = page
= NULL
;
932 if (copy
> PAGE_SIZE
- off
)
933 copy
= PAGE_SIZE
- off
;
935 if (!sk_wmem_schedule(sk
, copy
))
936 goto wait_for_memory
;
939 /* Allocate new cache page. */
940 if (!(page
= sk_stream_alloc_page(sk
)))
941 goto wait_for_memory
;
944 /* Time to copy data. We are close to
946 err
= skb_copy_to_page(sk
, from
, skb
, page
,
949 /* If this page was new, give it to the
950 * socket so it does not get leaked.
959 /* Update the skb. */
961 skb_shinfo(skb
)->frags
[i
- 1].size
+=
964 skb_fill_page_desc(skb
, i
, page
, off
, copy
);
967 } else if (off
+ copy
< PAGE_SIZE
) {
973 TCP_OFF(sk
) = off
+ copy
;
977 TCP_SKB_CB(skb
)->flags
&= ~TCPCB_FLAG_PSH
;
979 tp
->write_seq
+= copy
;
980 TCP_SKB_CB(skb
)->end_seq
+= copy
;
981 skb_shinfo(skb
)->gso_segs
= 0;
985 if ((seglen
-= copy
) == 0 && iovlen
== 0)
988 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
))
991 if (forced_push(tp
)) {
992 tcp_mark_push(tp
, skb
);
993 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
994 } else if (skb
== tcp_send_head(sk
))
995 tcp_push_one(sk
, mss_now
);
999 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1002 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
, TCP_NAGLE_PUSH
);
1004 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
1007 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
1008 size_goal
= tp
->xmit_size_goal
;
1014 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
);
1015 TCP_CHECK_TIMER(sk
);
1021 tcp_unlink_write_queue(skb
, sk
);
1022 /* It is the one place in all of TCP, except connection
1023 * reset, where we can be unlinking the send_head.
1025 tcp_check_send_head(sk
, skb
);
1026 sk_wmem_free_skb(sk
, skb
);
1033 err
= sk_stream_error(sk
, flags
, err
);
1034 TCP_CHECK_TIMER(sk
);
1040 * Handle reading urgent data. BSD has very simple semantics for
1041 * this, no blocking and very strange errors 8)
1044 static int tcp_recv_urg(struct sock
*sk
, long timeo
,
1045 struct msghdr
*msg
, int len
, int flags
,
1048 struct tcp_sock
*tp
= tcp_sk(sk
);
1050 /* No URG data to read. */
1051 if (sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
||
1052 tp
->urg_data
== TCP_URG_READ
)
1053 return -EINVAL
; /* Yes this is right ! */
1055 if (sk
->sk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DONE
))
1058 if (tp
->urg_data
& TCP_URG_VALID
) {
1060 char c
= tp
->urg_data
;
1062 if (!(flags
& MSG_PEEK
))
1063 tp
->urg_data
= TCP_URG_READ
;
1065 /* Read urgent data. */
1066 msg
->msg_flags
|= MSG_OOB
;
1069 if (!(flags
& MSG_TRUNC
))
1070 err
= memcpy_toiovec(msg
->msg_iov
, &c
, 1);
1073 msg
->msg_flags
|= MSG_TRUNC
;
1075 return err
? -EFAULT
: len
;
1078 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1081 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1082 * the available implementations agree in this case:
1083 * this call should never block, independent of the
1084 * blocking state of the socket.
1085 * Mike <pall@rz.uni-karlsruhe.de>
1090 /* Clean up the receive buffer for full frames taken by the user,
1091 * then send an ACK if necessary. COPIED is the number of bytes
1092 * tcp_recvmsg has given to the user so far, it speeds up the
1093 * calculation of whether or not we must ACK for the sake of
1096 void tcp_cleanup_rbuf(struct sock
*sk
, int copied
)
1098 struct tcp_sock
*tp
= tcp_sk(sk
);
1099 int time_to_ack
= 0;
1102 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1104 WARN_ON(skb
&& !before(tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
));
1107 if (inet_csk_ack_scheduled(sk
)) {
1108 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1109 /* Delayed ACKs frequently hit locked sockets during bulk
1111 if (icsk
->icsk_ack
.blocked
||
1112 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1113 tp
->rcv_nxt
- tp
->rcv_wup
> icsk
->icsk_ack
.rcv_mss
||
1115 * If this read emptied read buffer, we send ACK, if
1116 * connection is not bidirectional, user drained
1117 * receive buffer and there was a small segment
1121 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED2
) ||
1122 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED
) &&
1123 !icsk
->icsk_ack
.pingpong
)) &&
1124 !atomic_read(&sk
->sk_rmem_alloc
)))
1128 /* We send an ACK if we can now advertise a non-zero window
1129 * which has been raised "significantly".
1131 * Even if window raised up to infinity, do not send window open ACK
1132 * in states, where we will not receive more. It is useless.
1134 if (copied
> 0 && !time_to_ack
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1135 __u32 rcv_window_now
= tcp_receive_window(tp
);
1137 /* Optimize, __tcp_select_window() is not cheap. */
1138 if (2*rcv_window_now
<= tp
->window_clamp
) {
1139 __u32 new_window
= __tcp_select_window(sk
);
1141 /* Send ACK now, if this read freed lots of space
1142 * in our buffer. Certainly, new_window is new window.
1143 * We can advertise it now, if it is not less than current one.
1144 * "Lots" means "at least twice" here.
1146 if (new_window
&& new_window
>= 2 * rcv_window_now
)
1154 static void tcp_prequeue_process(struct sock
*sk
)
1156 struct sk_buff
*skb
;
1157 struct tcp_sock
*tp
= tcp_sk(sk
);
1159 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPPREQUEUED
);
1161 /* RX process wants to run with disabled BHs, though it is not
1164 while ((skb
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1165 sk_backlog_rcv(sk
, skb
);
1168 /* Clear memory counter. */
1169 tp
->ucopy
.memory
= 0;
1172 static inline struct sk_buff
*tcp_recv_skb(struct sock
*sk
, u32 seq
, u32
*off
)
1174 struct sk_buff
*skb
;
1177 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
1178 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1179 if (tcp_hdr(skb
)->syn
)
1181 if (offset
< skb
->len
|| tcp_hdr(skb
)->fin
) {
1190 * This routine provides an alternative to tcp_recvmsg() for routines
1191 * that would like to handle copying from skbuffs directly in 'sendfile'
1194 * - It is assumed that the socket was locked by the caller.
1195 * - The routine does not block.
1196 * - At present, there is no support for reading OOB data
1197 * or for 'peeking' the socket using this routine
1198 * (although both would be easy to implement).
1200 int tcp_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
1201 sk_read_actor_t recv_actor
)
1203 struct sk_buff
*skb
;
1204 struct tcp_sock
*tp
= tcp_sk(sk
);
1205 u32 seq
= tp
->copied_seq
;
1209 if (sk
->sk_state
== TCP_LISTEN
)
1211 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1212 if (offset
< skb
->len
) {
1216 len
= skb
->len
- offset
;
1217 /* Stop reading if we hit a patch of urgent data */
1219 u32 urg_offset
= tp
->urg_seq
- seq
;
1220 if (urg_offset
< len
)
1225 used
= recv_actor(desc
, skb
, offset
, len
);
1230 } else if (used
<= len
) {
1236 * If recv_actor drops the lock (e.g. TCP splice
1237 * receive) the skb pointer might be invalid when
1238 * getting here: tcp_collapse might have deleted it
1239 * while aggregating skbs from the socket queue.
1241 skb
= tcp_recv_skb(sk
, seq
-1, &offset
);
1242 if (!skb
|| (offset
+1 != skb
->len
))
1245 if (tcp_hdr(skb
)->fin
) {
1246 sk_eat_skb(sk
, skb
, 0);
1250 sk_eat_skb(sk
, skb
, 0);
1254 tp
->copied_seq
= seq
;
1256 tcp_rcv_space_adjust(sk
);
1258 /* Clean up data we have read: This will do ACK frames. */
1260 tcp_cleanup_rbuf(sk
, copied
);
1265 * This routine copies from a sock struct into the user buffer.
1267 * Technical note: in 2.3 we work on _locked_ socket, so that
1268 * tricks with *seq access order and skb->users are not required.
1269 * Probably, code can be easily improved even more.
1272 int tcp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
1273 size_t len
, int nonblock
, int flags
, int *addr_len
)
1275 struct tcp_sock
*tp
= tcp_sk(sk
);
1281 int target
; /* Read at least this many bytes */
1283 struct task_struct
*user_recv
= NULL
;
1284 int copied_early
= 0;
1285 struct sk_buff
*skb
;
1289 TCP_CHECK_TIMER(sk
);
1292 if (sk
->sk_state
== TCP_LISTEN
)
1295 timeo
= sock_rcvtimeo(sk
, nonblock
);
1297 /* Urgent data needs to be handled specially. */
1298 if (flags
& MSG_OOB
)
1301 seq
= &tp
->copied_seq
;
1302 if (flags
& MSG_PEEK
) {
1303 peek_seq
= tp
->copied_seq
;
1307 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1309 #ifdef CONFIG_NET_DMA
1310 tp
->ucopy
.dma_chan
= NULL
;
1312 skb
= skb_peek_tail(&sk
->sk_receive_queue
);
1317 available
= TCP_SKB_CB(skb
)->seq
+ skb
->len
- (*seq
);
1318 if ((available
< target
) &&
1319 (len
> sysctl_tcp_dma_copybreak
) && !(flags
& MSG_PEEK
) &&
1320 !sysctl_tcp_low_latency
&&
1321 __get_cpu_var(softnet_data
).net_dma
) {
1322 preempt_enable_no_resched();
1323 tp
->ucopy
.pinned_list
=
1324 dma_pin_iovec_pages(msg
->msg_iov
, len
);
1326 preempt_enable_no_resched();
1334 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1335 if (tp
->urg_data
&& tp
->urg_seq
== *seq
) {
1338 if (signal_pending(current
)) {
1339 copied
= timeo
? sock_intr_errno(timeo
) : -EAGAIN
;
1344 /* Next get a buffer. */
1346 skb
= skb_peek(&sk
->sk_receive_queue
);
1351 /* Now that we have two receive queues this
1354 if (before(*seq
, TCP_SKB_CB(skb
)->seq
)) {
1355 printk(KERN_INFO
"recvmsg bug: copied %X "
1356 "seq %X\n", *seq
, TCP_SKB_CB(skb
)->seq
);
1359 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
1360 if (tcp_hdr(skb
)->syn
)
1362 if (offset
< skb
->len
)
1364 if (tcp_hdr(skb
)->fin
)
1366 WARN_ON(!(flags
& MSG_PEEK
));
1368 } while (skb
!= (struct sk_buff
*)&sk
->sk_receive_queue
);
1370 /* Well, if we have backlog, try to process it now yet. */
1372 if (copied
>= target
&& !sk
->sk_backlog
.tail
)
1377 sk
->sk_state
== TCP_CLOSE
||
1378 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1380 signal_pending(current
))
1383 if (sock_flag(sk
, SOCK_DONE
))
1387 copied
= sock_error(sk
);
1391 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1394 if (sk
->sk_state
== TCP_CLOSE
) {
1395 if (!sock_flag(sk
, SOCK_DONE
)) {
1396 /* This occurs when user tries to read
1397 * from never connected socket.
1410 if (signal_pending(current
)) {
1411 copied
= sock_intr_errno(timeo
);
1416 tcp_cleanup_rbuf(sk
, copied
);
1418 if (!sysctl_tcp_low_latency
&& tp
->ucopy
.task
== user_recv
) {
1419 /* Install new reader */
1420 if (!user_recv
&& !(flags
& (MSG_TRUNC
| MSG_PEEK
))) {
1421 user_recv
= current
;
1422 tp
->ucopy
.task
= user_recv
;
1423 tp
->ucopy
.iov
= msg
->msg_iov
;
1426 tp
->ucopy
.len
= len
;
1428 WARN_ON(tp
->copied_seq
!= tp
->rcv_nxt
&&
1429 !(flags
& (MSG_PEEK
| MSG_TRUNC
)));
1431 /* Ugly... If prequeue is not empty, we have to
1432 * process it before releasing socket, otherwise
1433 * order will be broken at second iteration.
1434 * More elegant solution is required!!!
1436 * Look: we have the following (pseudo)queues:
1438 * 1. packets in flight
1443 * Each queue can be processed only if the next ones
1444 * are empty. At this point we have empty receive_queue.
1445 * But prequeue _can_ be not empty after 2nd iteration,
1446 * when we jumped to start of loop because backlog
1447 * processing added something to receive_queue.
1448 * We cannot release_sock(), because backlog contains
1449 * packets arrived _after_ prequeued ones.
1451 * Shortly, algorithm is clear --- to process all
1452 * the queues in order. We could make it more directly,
1453 * requeueing packets from backlog to prequeue, if
1454 * is not empty. It is more elegant, but eats cycles,
1457 if (!skb_queue_empty(&tp
->ucopy
.prequeue
))
1460 /* __ Set realtime policy in scheduler __ */
1463 if (copied
>= target
) {
1464 /* Do not sleep, just process backlog. */
1468 sk_wait_data(sk
, &timeo
);
1470 #ifdef CONFIG_NET_DMA
1471 tp
->ucopy
.wakeup
= 0;
1477 /* __ Restore normal policy in scheduler __ */
1479 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1480 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG
, chunk
);
1485 if (tp
->rcv_nxt
== tp
->copied_seq
&&
1486 !skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1488 tcp_prequeue_process(sk
);
1490 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1491 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1497 if ((flags
& MSG_PEEK
) && peek_seq
!= tp
->copied_seq
) {
1498 if (net_ratelimit())
1499 printk(KERN_DEBUG
"TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1500 current
->comm
, task_pid_nr(current
));
1501 peek_seq
= tp
->copied_seq
;
1506 /* Ok so how much can we use? */
1507 used
= skb
->len
- offset
;
1511 /* Do we have urgent data here? */
1513 u32 urg_offset
= tp
->urg_seq
- *seq
;
1514 if (urg_offset
< used
) {
1516 if (!sock_flag(sk
, SOCK_URGINLINE
)) {
1528 if (!(flags
& MSG_TRUNC
)) {
1529 #ifdef CONFIG_NET_DMA
1530 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1531 tp
->ucopy
.dma_chan
= get_softnet_dma();
1533 if (tp
->ucopy
.dma_chan
) {
1534 tp
->ucopy
.dma_cookie
= dma_skb_copy_datagram_iovec(
1535 tp
->ucopy
.dma_chan
, skb
, offset
,
1537 tp
->ucopy
.pinned_list
);
1539 if (tp
->ucopy
.dma_cookie
< 0) {
1541 printk(KERN_ALERT
"dma_cookie < 0\n");
1543 /* Exception. Bailout! */
1548 if ((offset
+ used
) == skb
->len
)
1554 err
= skb_copy_datagram_iovec(skb
, offset
,
1555 msg
->msg_iov
, used
);
1557 /* Exception. Bailout! */
1569 tcp_rcv_space_adjust(sk
);
1572 if (tp
->urg_data
&& after(tp
->copied_seq
, tp
->urg_seq
)) {
1574 tcp_fast_path_check(sk
);
1576 if (used
+ offset
< skb
->len
)
1579 if (tcp_hdr(skb
)->fin
)
1581 if (!(flags
& MSG_PEEK
)) {
1582 sk_eat_skb(sk
, skb
, copied_early
);
1588 /* Process the FIN. */
1590 if (!(flags
& MSG_PEEK
)) {
1591 sk_eat_skb(sk
, skb
, copied_early
);
1598 if (!skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1601 tp
->ucopy
.len
= copied
> 0 ? len
: 0;
1603 tcp_prequeue_process(sk
);
1605 if (copied
> 0 && (chunk
= len
- tp
->ucopy
.len
) != 0) {
1606 NET_ADD_STATS_USER(sock_net(sk
), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1612 tp
->ucopy
.task
= NULL
;
1616 #ifdef CONFIG_NET_DMA
1617 if (tp
->ucopy
.dma_chan
) {
1618 dma_cookie_t done
, used
;
1620 dma_async_memcpy_issue_pending(tp
->ucopy
.dma_chan
);
1622 while (dma_async_memcpy_complete(tp
->ucopy
.dma_chan
,
1623 tp
->ucopy
.dma_cookie
, &done
,
1624 &used
) == DMA_IN_PROGRESS
) {
1625 /* do partial cleanup of sk_async_wait_queue */
1626 while ((skb
= skb_peek(&sk
->sk_async_wait_queue
)) &&
1627 (dma_async_is_complete(skb
->dma_cookie
, done
,
1628 used
) == DMA_SUCCESS
)) {
1629 __skb_dequeue(&sk
->sk_async_wait_queue
);
1634 /* Safe to free early-copied skbs now */
1635 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1636 dma_chan_put(tp
->ucopy
.dma_chan
);
1637 tp
->ucopy
.dma_chan
= NULL
;
1639 if (tp
->ucopy
.pinned_list
) {
1640 dma_unpin_iovec_pages(tp
->ucopy
.pinned_list
);
1641 tp
->ucopy
.pinned_list
= NULL
;
1645 /* According to UNIX98, msg_name/msg_namelen are ignored
1646 * on connected socket. I was just happy when found this 8) --ANK
1649 /* Clean up data we have read: This will do ACK frames. */
1650 tcp_cleanup_rbuf(sk
, copied
);
1652 TCP_CHECK_TIMER(sk
);
1657 TCP_CHECK_TIMER(sk
);
1662 err
= tcp_recv_urg(sk
, timeo
, msg
, len
, flags
, addr_len
);
1666 void tcp_set_state(struct sock
*sk
, int state
)
1668 int oldstate
= sk
->sk_state
;
1671 case TCP_ESTABLISHED
:
1672 if (oldstate
!= TCP_ESTABLISHED
)
1673 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
1677 if (oldstate
== TCP_CLOSE_WAIT
|| oldstate
== TCP_ESTABLISHED
)
1678 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ESTABRESETS
);
1680 sk
->sk_prot
->unhash(sk
);
1681 if (inet_csk(sk
)->icsk_bind_hash
&&
1682 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
1686 if (oldstate
== TCP_ESTABLISHED
)
1687 TCP_DEC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
1690 /* Change state AFTER socket is unhashed to avoid closed
1691 * socket sitting in hash tables.
1693 sk
->sk_state
= state
;
1696 SOCK_DEBUG(sk
, "TCP sk=%p, State %s -> %s\n", sk
, statename
[oldstate
], statename
[state
]);
1699 EXPORT_SYMBOL_GPL(tcp_set_state
);
1702 * State processing on a close. This implements the state shift for
1703 * sending our FIN frame. Note that we only send a FIN for some
1704 * states. A shutdown() may have already sent the FIN, or we may be
1708 static const unsigned char new_state
[16] = {
1709 /* current state: new state: action: */
1710 /* (Invalid) */ TCP_CLOSE
,
1711 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
1712 /* TCP_SYN_SENT */ TCP_CLOSE
,
1713 /* TCP_SYN_RECV */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
1714 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1
,
1715 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2
,
1716 /* TCP_TIME_WAIT */ TCP_CLOSE
,
1717 /* TCP_CLOSE */ TCP_CLOSE
,
1718 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK
| TCP_ACTION_FIN
,
1719 /* TCP_LAST_ACK */ TCP_LAST_ACK
,
1720 /* TCP_LISTEN */ TCP_CLOSE
,
1721 /* TCP_CLOSING */ TCP_CLOSING
,
1724 static int tcp_close_state(struct sock
*sk
)
1726 int next
= (int)new_state
[sk
->sk_state
];
1727 int ns
= next
& TCP_STATE_MASK
;
1729 tcp_set_state(sk
, ns
);
1731 return next
& TCP_ACTION_FIN
;
1735 * Shutdown the sending side of a connection. Much like close except
1736 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1739 void tcp_shutdown(struct sock
*sk
, int how
)
1741 /* We need to grab some memory, and put together a FIN,
1742 * and then put it into the queue to be sent.
1743 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1745 if (!(how
& SEND_SHUTDOWN
))
1748 /* If we've already sent a FIN, or it's a closed state, skip this. */
1749 if ((1 << sk
->sk_state
) &
1750 (TCPF_ESTABLISHED
| TCPF_SYN_SENT
|
1751 TCPF_SYN_RECV
| TCPF_CLOSE_WAIT
)) {
1752 /* Clear out any half completed packets. FIN if needed. */
1753 if (tcp_close_state(sk
))
1758 void tcp_close(struct sock
*sk
, long timeout
)
1760 struct sk_buff
*skb
;
1761 int data_was_unread
= 0;
1765 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1767 if (sk
->sk_state
== TCP_LISTEN
) {
1768 tcp_set_state(sk
, TCP_CLOSE
);
1771 inet_csk_listen_stop(sk
);
1773 goto adjudge_to_death
;
1776 /* We need to flush the recv. buffs. We do this only on the
1777 * descriptor close, not protocol-sourced closes, because the
1778 * reader process may not have drained the data yet!
1780 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
1781 u32 len
= TCP_SKB_CB(skb
)->end_seq
- TCP_SKB_CB(skb
)->seq
-
1783 data_was_unread
+= len
;
1789 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1790 * data was lost. To witness the awful effects of the old behavior of
1791 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1792 * GET in an FTP client, suspend the process, wait for the client to
1793 * advertise a zero window, then kill -9 the FTP client, wheee...
1794 * Note: timeout is always zero in such a case.
1796 if (data_was_unread
) {
1797 /* Unread data was tossed, zap the connection. */
1798 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPABORTONCLOSE
);
1799 tcp_set_state(sk
, TCP_CLOSE
);
1800 tcp_send_active_reset(sk
, GFP_KERNEL
);
1801 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
1802 /* Check zero linger _after_ checking for unread data. */
1803 sk
->sk_prot
->disconnect(sk
, 0);
1804 NET_INC_STATS_USER(sock_net(sk
), LINUX_MIB_TCPABORTONDATA
);
1805 } else if (tcp_close_state(sk
)) {
1806 /* We FIN if the application ate all the data before
1807 * zapping the connection.
1810 /* RED-PEN. Formally speaking, we have broken TCP state
1811 * machine. State transitions:
1813 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1814 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1815 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1817 * are legal only when FIN has been sent (i.e. in window),
1818 * rather than queued out of window. Purists blame.
1820 * F.e. "RFC state" is ESTABLISHED,
1821 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1823 * The visible declinations are that sometimes
1824 * we enter time-wait state, when it is not required really
1825 * (harmless), do not send active resets, when they are
1826 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1827 * they look as CLOSING or LAST_ACK for Linux)
1828 * Probably, I missed some more holelets.
1834 sk_stream_wait_close(sk
, timeout
);
1837 state
= sk
->sk_state
;
1840 atomic_inc(sk
->sk_prot
->orphan_count
);
1842 /* It is the last release_sock in its life. It will remove backlog. */
1846 /* Now socket is owned by kernel and we acquire BH lock
1847 to finish close. No need to check for user refs.
1851 WARN_ON(sock_owned_by_user(sk
));
1853 /* Have we already been destroyed by a softirq or backlog? */
1854 if (state
!= TCP_CLOSE
&& sk
->sk_state
== TCP_CLOSE
)
1857 /* This is a (useful) BSD violating of the RFC. There is a
1858 * problem with TCP as specified in that the other end could
1859 * keep a socket open forever with no application left this end.
1860 * We use a 3 minute timeout (about the same as BSD) then kill
1861 * our end. If they send after that then tough - BUT: long enough
1862 * that we won't make the old 4*rto = almost no time - whoops
1865 * Nope, it was not mistake. It is really desired behaviour
1866 * f.e. on http servers, when such sockets are useless, but
1867 * consume significant resources. Let's do it with special
1868 * linger2 option. --ANK
1871 if (sk
->sk_state
== TCP_FIN_WAIT2
) {
1872 struct tcp_sock
*tp
= tcp_sk(sk
);
1873 if (tp
->linger2
< 0) {
1874 tcp_set_state(sk
, TCP_CLOSE
);
1875 tcp_send_active_reset(sk
, GFP_ATOMIC
);
1876 NET_INC_STATS_BH(sock_net(sk
),
1877 LINUX_MIB_TCPABORTONLINGER
);
1879 const int tmo
= tcp_fin_time(sk
);
1881 if (tmo
> TCP_TIMEWAIT_LEN
) {
1882 inet_csk_reset_keepalive_timer(sk
,
1883 tmo
- TCP_TIMEWAIT_LEN
);
1885 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
1890 if (sk
->sk_state
!= TCP_CLOSE
) {
1892 if (tcp_too_many_orphans(sk
,
1893 atomic_read(sk
->sk_prot
->orphan_count
))) {
1894 if (net_ratelimit())
1895 printk(KERN_INFO
"TCP: too many of orphaned "
1897 tcp_set_state(sk
, TCP_CLOSE
);
1898 tcp_send_active_reset(sk
, GFP_ATOMIC
);
1899 NET_INC_STATS_BH(sock_net(sk
),
1900 LINUX_MIB_TCPABORTONMEMORY
);
1904 if (sk
->sk_state
== TCP_CLOSE
)
1905 inet_csk_destroy_sock(sk
);
1906 /* Otherwise, socket is reprieved until protocol close. */
1914 /* These states need RST on ABORT according to RFC793 */
1916 static inline int tcp_need_reset(int state
)
1918 return (1 << state
) &
1919 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
| TCPF_FIN_WAIT1
|
1920 TCPF_FIN_WAIT2
| TCPF_SYN_RECV
);
1923 int tcp_disconnect(struct sock
*sk
, int flags
)
1925 struct inet_sock
*inet
= inet_sk(sk
);
1926 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1927 struct tcp_sock
*tp
= tcp_sk(sk
);
1929 int old_state
= sk
->sk_state
;
1931 if (old_state
!= TCP_CLOSE
)
1932 tcp_set_state(sk
, TCP_CLOSE
);
1934 /* ABORT function of RFC793 */
1935 if (old_state
== TCP_LISTEN
) {
1936 inet_csk_listen_stop(sk
);
1937 } else if (tcp_need_reset(old_state
) ||
1938 (tp
->snd_nxt
!= tp
->write_seq
&&
1939 (1 << old_state
) & (TCPF_CLOSING
| TCPF_LAST_ACK
))) {
1940 /* The last check adjusts for discrepancy of Linux wrt. RFC
1943 tcp_send_active_reset(sk
, gfp_any());
1944 sk
->sk_err
= ECONNRESET
;
1945 } else if (old_state
== TCP_SYN_SENT
)
1946 sk
->sk_err
= ECONNRESET
;
1948 tcp_clear_xmit_timers(sk
);
1949 __skb_queue_purge(&sk
->sk_receive_queue
);
1950 tcp_write_queue_purge(sk
);
1951 __skb_queue_purge(&tp
->out_of_order_queue
);
1952 #ifdef CONFIG_NET_DMA
1953 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1958 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
1959 inet_reset_saddr(sk
);
1961 sk
->sk_shutdown
= 0;
1962 sock_reset_flag(sk
, SOCK_DONE
);
1964 if ((tp
->write_seq
+= tp
->max_window
+ 2) == 0)
1966 icsk
->icsk_backoff
= 0;
1968 icsk
->icsk_probes_out
= 0;
1969 tp
->packets_out
= 0;
1970 tp
->snd_ssthresh
= 0x7fffffff;
1971 tp
->snd_cwnd_cnt
= 0;
1972 tp
->bytes_acked
= 0;
1973 tcp_set_ca_state(sk
, TCP_CA_Open
);
1974 tcp_clear_retrans(tp
);
1975 inet_csk_delack_init(sk
);
1976 tcp_init_send_head(sk
);
1977 memset(&tp
->rx_opt
, 0, sizeof(tp
->rx_opt
));
1980 WARN_ON(inet
->num
&& !icsk
->icsk_bind_hash
);
1982 sk
->sk_error_report(sk
);
1987 * Socket option code for TCP.
1989 static int do_tcp_setsockopt(struct sock
*sk
, int level
,
1990 int optname
, char __user
*optval
, int optlen
)
1992 struct tcp_sock
*tp
= tcp_sk(sk
);
1993 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1997 /* This is a string value all the others are int's */
1998 if (optname
== TCP_CONGESTION
) {
1999 char name
[TCP_CA_NAME_MAX
];
2004 val
= strncpy_from_user(name
, optval
,
2005 min(TCP_CA_NAME_MAX
-1, optlen
));
2011 err
= tcp_set_congestion_control(sk
, name
);
2016 if (optlen
< sizeof(int))
2019 if (get_user(val
, (int __user
*)optval
))
2026 /* Values greater than interface MTU won't take effect. However
2027 * at the point when this call is done we typically don't yet
2028 * know which interface is going to be used */
2029 if (val
< 8 || val
> MAX_TCP_WINDOW
) {
2033 tp
->rx_opt
.user_mss
= val
;
2038 /* TCP_NODELAY is weaker than TCP_CORK, so that
2039 * this option on corked socket is remembered, but
2040 * it is not activated until cork is cleared.
2042 * However, when TCP_NODELAY is set we make
2043 * an explicit push, which overrides even TCP_CORK
2044 * for currently queued segments.
2046 tp
->nonagle
|= TCP_NAGLE_OFF
|TCP_NAGLE_PUSH
;
2047 tcp_push_pending_frames(sk
);
2049 tp
->nonagle
&= ~TCP_NAGLE_OFF
;
2054 /* When set indicates to always queue non-full frames.
2055 * Later the user clears this option and we transmit
2056 * any pending partial frames in the queue. This is
2057 * meant to be used alongside sendfile() to get properly
2058 * filled frames when the user (for example) must write
2059 * out headers with a write() call first and then use
2060 * sendfile to send out the data parts.
2062 * TCP_CORK can be set together with TCP_NODELAY and it is
2063 * stronger than TCP_NODELAY.
2066 tp
->nonagle
|= TCP_NAGLE_CORK
;
2068 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
2069 if (tp
->nonagle
&TCP_NAGLE_OFF
)
2070 tp
->nonagle
|= TCP_NAGLE_PUSH
;
2071 tcp_push_pending_frames(sk
);
2076 if (val
< 1 || val
> MAX_TCP_KEEPIDLE
)
2079 tp
->keepalive_time
= val
* HZ
;
2080 if (sock_flag(sk
, SOCK_KEEPOPEN
) &&
2081 !((1 << sk
->sk_state
) &
2082 (TCPF_CLOSE
| TCPF_LISTEN
))) {
2083 __u32 elapsed
= tcp_time_stamp
- tp
->rcv_tstamp
;
2084 if (tp
->keepalive_time
> elapsed
)
2085 elapsed
= tp
->keepalive_time
- elapsed
;
2088 inet_csk_reset_keepalive_timer(sk
, elapsed
);
2093 if (val
< 1 || val
> MAX_TCP_KEEPINTVL
)
2096 tp
->keepalive_intvl
= val
* HZ
;
2099 if (val
< 1 || val
> MAX_TCP_KEEPCNT
)
2102 tp
->keepalive_probes
= val
;
2105 if (val
< 1 || val
> MAX_TCP_SYNCNT
)
2108 icsk
->icsk_syn_retries
= val
;
2114 else if (val
> sysctl_tcp_fin_timeout
/ HZ
)
2117 tp
->linger2
= val
* HZ
;
2120 case TCP_DEFER_ACCEPT
:
2121 icsk
->icsk_accept_queue
.rskq_defer_accept
= 0;
2123 /* Translate value in seconds to number of
2125 while (icsk
->icsk_accept_queue
.rskq_defer_accept
< 32 &&
2126 val
> ((TCP_TIMEOUT_INIT
/ HZ
) <<
2127 icsk
->icsk_accept_queue
.rskq_defer_accept
))
2128 icsk
->icsk_accept_queue
.rskq_defer_accept
++;
2129 icsk
->icsk_accept_queue
.rskq_defer_accept
++;
2133 case TCP_WINDOW_CLAMP
:
2135 if (sk
->sk_state
!= TCP_CLOSE
) {
2139 tp
->window_clamp
= 0;
2141 tp
->window_clamp
= val
< SOCK_MIN_RCVBUF
/ 2 ?
2142 SOCK_MIN_RCVBUF
/ 2 : val
;
2147 icsk
->icsk_ack
.pingpong
= 1;
2149 icsk
->icsk_ack
.pingpong
= 0;
2150 if ((1 << sk
->sk_state
) &
2151 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
) &&
2152 inet_csk_ack_scheduled(sk
)) {
2153 icsk
->icsk_ack
.pending
|= ICSK_ACK_PUSHED
;
2154 tcp_cleanup_rbuf(sk
, 1);
2156 icsk
->icsk_ack
.pingpong
= 1;
2161 #ifdef CONFIG_TCP_MD5SIG
2163 /* Read the IP->Key mappings from userspace */
2164 err
= tp
->af_specific
->md5_parse(sk
, optval
, optlen
);
2177 int tcp_setsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2180 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2182 if (level
!= SOL_TCP
)
2183 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
2185 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
2188 #ifdef CONFIG_COMPAT
2189 int compat_tcp_setsockopt(struct sock
*sk
, int level
, int optname
,
2190 char __user
*optval
, int optlen
)
2192 if (level
!= SOL_TCP
)
2193 return inet_csk_compat_setsockopt(sk
, level
, optname
,
2195 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
2198 EXPORT_SYMBOL(compat_tcp_setsockopt
);
2201 /* Return information about state of tcp endpoint in API format. */
2202 void tcp_get_info(struct sock
*sk
, struct tcp_info
*info
)
2204 struct tcp_sock
*tp
= tcp_sk(sk
);
2205 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2206 u32 now
= tcp_time_stamp
;
2208 memset(info
, 0, sizeof(*info
));
2210 info
->tcpi_state
= sk
->sk_state
;
2211 info
->tcpi_ca_state
= icsk
->icsk_ca_state
;
2212 info
->tcpi_retransmits
= icsk
->icsk_retransmits
;
2213 info
->tcpi_probes
= icsk
->icsk_probes_out
;
2214 info
->tcpi_backoff
= icsk
->icsk_backoff
;
2216 if (tp
->rx_opt
.tstamp_ok
)
2217 info
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
2218 if (tcp_is_sack(tp
))
2219 info
->tcpi_options
|= TCPI_OPT_SACK
;
2220 if (tp
->rx_opt
.wscale_ok
) {
2221 info
->tcpi_options
|= TCPI_OPT_WSCALE
;
2222 info
->tcpi_snd_wscale
= tp
->rx_opt
.snd_wscale
;
2223 info
->tcpi_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
2226 if (tp
->ecn_flags
&TCP_ECN_OK
)
2227 info
->tcpi_options
|= TCPI_OPT_ECN
;
2229 info
->tcpi_rto
= jiffies_to_usecs(icsk
->icsk_rto
);
2230 info
->tcpi_ato
= jiffies_to_usecs(icsk
->icsk_ack
.ato
);
2231 info
->tcpi_snd_mss
= tp
->mss_cache
;
2232 info
->tcpi_rcv_mss
= icsk
->icsk_ack
.rcv_mss
;
2234 if (sk
->sk_state
== TCP_LISTEN
) {
2235 info
->tcpi_unacked
= sk
->sk_ack_backlog
;
2236 info
->tcpi_sacked
= sk
->sk_max_ack_backlog
;
2238 info
->tcpi_unacked
= tp
->packets_out
;
2239 info
->tcpi_sacked
= tp
->sacked_out
;
2241 info
->tcpi_lost
= tp
->lost_out
;
2242 info
->tcpi_retrans
= tp
->retrans_out
;
2243 info
->tcpi_fackets
= tp
->fackets_out
;
2245 info
->tcpi_last_data_sent
= jiffies_to_msecs(now
- tp
->lsndtime
);
2246 info
->tcpi_last_data_recv
= jiffies_to_msecs(now
- icsk
->icsk_ack
.lrcvtime
);
2247 info
->tcpi_last_ack_recv
= jiffies_to_msecs(now
- tp
->rcv_tstamp
);
2249 info
->tcpi_pmtu
= icsk
->icsk_pmtu_cookie
;
2250 info
->tcpi_rcv_ssthresh
= tp
->rcv_ssthresh
;
2251 info
->tcpi_rtt
= jiffies_to_usecs(tp
->srtt
)>>3;
2252 info
->tcpi_rttvar
= jiffies_to_usecs(tp
->mdev
)>>2;
2253 info
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
2254 info
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
2255 info
->tcpi_advmss
= tp
->advmss
;
2256 info
->tcpi_reordering
= tp
->reordering
;
2258 info
->tcpi_rcv_rtt
= jiffies_to_usecs(tp
->rcv_rtt_est
.rtt
)>>3;
2259 info
->tcpi_rcv_space
= tp
->rcvq_space
.space
;
2261 info
->tcpi_total_retrans
= tp
->total_retrans
;
2264 EXPORT_SYMBOL_GPL(tcp_get_info
);
2266 static int do_tcp_getsockopt(struct sock
*sk
, int level
,
2267 int optname
, char __user
*optval
, int __user
*optlen
)
2269 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2270 struct tcp_sock
*tp
= tcp_sk(sk
);
2273 if (get_user(len
, optlen
))
2276 len
= min_t(unsigned int, len
, sizeof(int));
2283 val
= tp
->mss_cache
;
2284 if (!val
&& ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2285 val
= tp
->rx_opt
.user_mss
;
2288 val
= !!(tp
->nonagle
&TCP_NAGLE_OFF
);
2291 val
= !!(tp
->nonagle
&TCP_NAGLE_CORK
);
2294 val
= (tp
->keepalive_time
? : sysctl_tcp_keepalive_time
) / HZ
;
2297 val
= (tp
->keepalive_intvl
? : sysctl_tcp_keepalive_intvl
) / HZ
;
2300 val
= tp
->keepalive_probes
? : sysctl_tcp_keepalive_probes
;
2303 val
= icsk
->icsk_syn_retries
? : sysctl_tcp_syn_retries
;
2308 val
= (val
? : sysctl_tcp_fin_timeout
) / HZ
;
2310 case TCP_DEFER_ACCEPT
:
2311 val
= !icsk
->icsk_accept_queue
.rskq_defer_accept
? 0 :
2312 ((TCP_TIMEOUT_INIT
/ HZ
) << (icsk
->icsk_accept_queue
.rskq_defer_accept
- 1));
2314 case TCP_WINDOW_CLAMP
:
2315 val
= tp
->window_clamp
;
2318 struct tcp_info info
;
2320 if (get_user(len
, optlen
))
2323 tcp_get_info(sk
, &info
);
2325 len
= min_t(unsigned int, len
, sizeof(info
));
2326 if (put_user(len
, optlen
))
2328 if (copy_to_user(optval
, &info
, len
))
2333 val
= !icsk
->icsk_ack
.pingpong
;
2336 case TCP_CONGESTION
:
2337 if (get_user(len
, optlen
))
2339 len
= min_t(unsigned int, len
, TCP_CA_NAME_MAX
);
2340 if (put_user(len
, optlen
))
2342 if (copy_to_user(optval
, icsk
->icsk_ca_ops
->name
, len
))
2346 return -ENOPROTOOPT
;
2349 if (put_user(len
, optlen
))
2351 if (copy_to_user(optval
, &val
, len
))
2356 int tcp_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2359 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2361 if (level
!= SOL_TCP
)
2362 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
2364 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
2367 #ifdef CONFIG_COMPAT
2368 int compat_tcp_getsockopt(struct sock
*sk
, int level
, int optname
,
2369 char __user
*optval
, int __user
*optlen
)
2371 if (level
!= SOL_TCP
)
2372 return inet_csk_compat_getsockopt(sk
, level
, optname
,
2374 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
2377 EXPORT_SYMBOL(compat_tcp_getsockopt
);
2380 struct sk_buff
*tcp_tso_segment(struct sk_buff
*skb
, int features
)
2382 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
2387 unsigned int oldlen
;
2390 if (!pskb_may_pull(skb
, sizeof(*th
)))
2394 thlen
= th
->doff
* 4;
2395 if (thlen
< sizeof(*th
))
2398 if (!pskb_may_pull(skb
, thlen
))
2401 oldlen
= (u16
)~skb
->len
;
2402 __skb_pull(skb
, thlen
);
2404 if (skb_gso_ok(skb
, features
| NETIF_F_GSO_ROBUST
)) {
2405 /* Packet is from an untrusted source, reset gso_segs. */
2406 int type
= skb_shinfo(skb
)->gso_type
;
2415 !(type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))))
2418 mss
= skb_shinfo(skb
)->gso_size
;
2419 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(skb
->len
, mss
);
2425 segs
= skb_segment(skb
, features
);
2429 len
= skb_shinfo(skb
)->gso_size
;
2430 delta
= htonl(oldlen
+ (thlen
+ len
));
2434 seq
= ntohl(th
->seq
);
2437 th
->fin
= th
->psh
= 0;
2439 th
->check
= ~csum_fold((__force __wsum
)((__force u32
)th
->check
+
2440 (__force u32
)delta
));
2441 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2443 csum_fold(csum_partial(skb_transport_header(skb
),
2450 th
->seq
= htonl(seq
);
2452 } while (skb
->next
);
2454 delta
= htonl(oldlen
+ (skb
->tail
- skb
->transport_header
) +
2456 th
->check
= ~csum_fold((__force __wsum
)((__force u32
)th
->check
+
2457 (__force u32
)delta
));
2458 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2459 th
->check
= csum_fold(csum_partial(skb_transport_header(skb
),
2465 EXPORT_SYMBOL(tcp_tso_segment
);
2467 #ifdef CONFIG_TCP_MD5SIG
2468 static unsigned long tcp_md5sig_users
;
2469 static struct tcp_md5sig_pool
**tcp_md5sig_pool
;
2470 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock
);
2472 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool
**pool
)
2475 for_each_possible_cpu(cpu
) {
2476 struct tcp_md5sig_pool
*p
= *per_cpu_ptr(pool
, cpu
);
2478 if (p
->md5_desc
.tfm
)
2479 crypto_free_hash(p
->md5_desc
.tfm
);
2487 void tcp_free_md5sig_pool(void)
2489 struct tcp_md5sig_pool
**pool
= NULL
;
2491 spin_lock_bh(&tcp_md5sig_pool_lock
);
2492 if (--tcp_md5sig_users
== 0) {
2493 pool
= tcp_md5sig_pool
;
2494 tcp_md5sig_pool
= NULL
;
2496 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2498 __tcp_free_md5sig_pool(pool
);
2501 EXPORT_SYMBOL(tcp_free_md5sig_pool
);
2503 static struct tcp_md5sig_pool
**__tcp_alloc_md5sig_pool(void)
2506 struct tcp_md5sig_pool
**pool
;
2508 pool
= alloc_percpu(struct tcp_md5sig_pool
*);
2512 for_each_possible_cpu(cpu
) {
2513 struct tcp_md5sig_pool
*p
;
2514 struct crypto_hash
*hash
;
2516 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
2519 *per_cpu_ptr(pool
, cpu
) = p
;
2521 hash
= crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC
);
2522 if (!hash
|| IS_ERR(hash
))
2525 p
->md5_desc
.tfm
= hash
;
2529 __tcp_free_md5sig_pool(pool
);
2533 struct tcp_md5sig_pool
**tcp_alloc_md5sig_pool(void)
2535 struct tcp_md5sig_pool
**pool
;
2539 spin_lock_bh(&tcp_md5sig_pool_lock
);
2540 pool
= tcp_md5sig_pool
;
2541 if (tcp_md5sig_users
++ == 0) {
2543 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2546 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2550 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2553 /* we cannot hold spinlock here because this may sleep. */
2554 struct tcp_md5sig_pool
**p
= __tcp_alloc_md5sig_pool();
2555 spin_lock_bh(&tcp_md5sig_pool_lock
);
2558 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2561 pool
= tcp_md5sig_pool
;
2563 /* oops, it has already been assigned. */
2564 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2565 __tcp_free_md5sig_pool(p
);
2567 tcp_md5sig_pool
= pool
= p
;
2568 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2574 EXPORT_SYMBOL(tcp_alloc_md5sig_pool
);
2576 struct tcp_md5sig_pool
*__tcp_get_md5sig_pool(int cpu
)
2578 struct tcp_md5sig_pool
**p
;
2579 spin_lock_bh(&tcp_md5sig_pool_lock
);
2580 p
= tcp_md5sig_pool
;
2583 spin_unlock_bh(&tcp_md5sig_pool_lock
);
2584 return (p
? *per_cpu_ptr(p
, cpu
) : NULL
);
2587 EXPORT_SYMBOL(__tcp_get_md5sig_pool
);
2589 void __tcp_put_md5sig_pool(void)
2591 tcp_free_md5sig_pool();
2594 EXPORT_SYMBOL(__tcp_put_md5sig_pool
);
2596 int tcp_md5_hash_header(struct tcp_md5sig_pool
*hp
,
2599 struct scatterlist sg
;
2602 __sum16 old_checksum
= th
->check
;
2604 /* options aren't included in the hash */
2605 sg_init_one(&sg
, th
, sizeof(struct tcphdr
));
2606 err
= crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(struct tcphdr
));
2607 th
->check
= old_checksum
;
2611 EXPORT_SYMBOL(tcp_md5_hash_header
);
2613 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool
*hp
,
2614 struct sk_buff
*skb
, unsigned header_len
)
2616 struct scatterlist sg
;
2617 const struct tcphdr
*tp
= tcp_hdr(skb
);
2618 struct hash_desc
*desc
= &hp
->md5_desc
;
2620 const unsigned head_data_len
= skb_headlen(skb
) > header_len
?
2621 skb_headlen(skb
) - header_len
: 0;
2622 const struct skb_shared_info
*shi
= skb_shinfo(skb
);
2624 sg_init_table(&sg
, 1);
2626 sg_set_buf(&sg
, ((u8
*) tp
) + header_len
, head_data_len
);
2627 if (crypto_hash_update(desc
, &sg
, head_data_len
))
2630 for (i
= 0; i
< shi
->nr_frags
; ++i
) {
2631 const struct skb_frag_struct
*f
= &shi
->frags
[i
];
2632 sg_set_page(&sg
, f
->page
, f
->size
, f
->page_offset
);
2633 if (crypto_hash_update(desc
, &sg
, f
->size
))
2640 EXPORT_SYMBOL(tcp_md5_hash_skb_data
);
2642 int tcp_md5_hash_key(struct tcp_md5sig_pool
*hp
, struct tcp_md5sig_key
*key
)
2644 struct scatterlist sg
;
2646 sg_init_one(&sg
, key
->key
, key
->keylen
);
2647 return crypto_hash_update(&hp
->md5_desc
, &sg
, key
->keylen
);
2650 EXPORT_SYMBOL(tcp_md5_hash_key
);
2654 void tcp_done(struct sock
*sk
)
2656 if (sk
->sk_state
== TCP_SYN_SENT
|| sk
->sk_state
== TCP_SYN_RECV
)
2657 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_ATTEMPTFAILS
);
2659 tcp_set_state(sk
, TCP_CLOSE
);
2660 tcp_clear_xmit_timers(sk
);
2662 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2664 if (!sock_flag(sk
, SOCK_DEAD
))
2665 sk
->sk_state_change(sk
);
2667 inet_csk_destroy_sock(sk
);
2669 EXPORT_SYMBOL_GPL(tcp_done
);
2671 extern struct tcp_congestion_ops tcp_reno
;
2673 static __initdata
unsigned long thash_entries
;
2674 static int __init
set_thash_entries(char *str
)
2678 thash_entries
= simple_strtoul(str
, &str
, 0);
2681 __setup("thash_entries=", set_thash_entries
);
2683 void __init
tcp_init(void)
2685 struct sk_buff
*skb
= NULL
;
2686 unsigned long nr_pages
, limit
;
2687 int order
, i
, max_share
;
2689 BUILD_BUG_ON(sizeof(struct tcp_skb_cb
) > sizeof(skb
->cb
));
2691 percpu_counter_init(&tcp_sockets_allocated
, 0);
2692 tcp_hashinfo
.bind_bucket_cachep
=
2693 kmem_cache_create("tcp_bind_bucket",
2694 sizeof(struct inet_bind_bucket
), 0,
2695 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
2697 /* Size and allocate the main established and bind bucket
2700 * The methodology is similar to that of the buffer cache.
2702 tcp_hashinfo
.ehash
=
2703 alloc_large_system_hash("TCP established",
2704 sizeof(struct inet_ehash_bucket
),
2706 (num_physpages
>= 128 * 1024) ?
2709 &tcp_hashinfo
.ehash_size
,
2711 thash_entries
? 0 : 512 * 1024);
2712 tcp_hashinfo
.ehash_size
= 1 << tcp_hashinfo
.ehash_size
;
2713 for (i
= 0; i
< tcp_hashinfo
.ehash_size
; i
++) {
2714 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].chain
, i
);
2715 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].twchain
, i
);
2717 if (inet_ehash_locks_alloc(&tcp_hashinfo
))
2718 panic("TCP: failed to alloc ehash_locks");
2719 tcp_hashinfo
.bhash
=
2720 alloc_large_system_hash("TCP bind",
2721 sizeof(struct inet_bind_hashbucket
),
2722 tcp_hashinfo
.ehash_size
,
2723 (num_physpages
>= 128 * 1024) ?
2726 &tcp_hashinfo
.bhash_size
,
2729 tcp_hashinfo
.bhash_size
= 1 << tcp_hashinfo
.bhash_size
;
2730 for (i
= 0; i
< tcp_hashinfo
.bhash_size
; i
++) {
2731 spin_lock_init(&tcp_hashinfo
.bhash
[i
].lock
);
2732 INIT_HLIST_HEAD(&tcp_hashinfo
.bhash
[i
].chain
);
2735 /* Try to be a bit smarter and adjust defaults depending
2736 * on available memory.
2738 for (order
= 0; ((1 << order
) << PAGE_SHIFT
) <
2739 (tcp_hashinfo
.bhash_size
* sizeof(struct inet_bind_hashbucket
));
2743 tcp_death_row
.sysctl_max_tw_buckets
= 180000;
2744 sysctl_tcp_max_orphans
= 4096 << (order
- 4);
2745 sysctl_max_syn_backlog
= 1024;
2746 } else if (order
< 3) {
2747 tcp_death_row
.sysctl_max_tw_buckets
>>= (3 - order
);
2748 sysctl_tcp_max_orphans
>>= (3 - order
);
2749 sysctl_max_syn_backlog
= 128;
2752 /* Set the pressure threshold to be a fraction of global memory that
2753 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2754 * memory, with a floor of 128 pages.
2756 nr_pages
= totalram_pages
- totalhigh_pages
;
2757 limit
= min(nr_pages
, 1UL<<(28-PAGE_SHIFT
)) >> (20-PAGE_SHIFT
);
2758 limit
= (limit
* (nr_pages
>> (20-PAGE_SHIFT
))) >> (PAGE_SHIFT
-11);
2759 limit
= max(limit
, 128UL);
2760 sysctl_tcp_mem
[0] = limit
/ 4 * 3;
2761 sysctl_tcp_mem
[1] = limit
;
2762 sysctl_tcp_mem
[2] = sysctl_tcp_mem
[0] * 2;
2764 /* Set per-socket limits to no more than 1/128 the pressure threshold */
2765 limit
= ((unsigned long)sysctl_tcp_mem
[1]) << (PAGE_SHIFT
- 7);
2766 max_share
= min(4UL*1024*1024, limit
);
2768 sysctl_tcp_wmem
[0] = SK_MEM_QUANTUM
;
2769 sysctl_tcp_wmem
[1] = 16*1024;
2770 sysctl_tcp_wmem
[2] = max(64*1024, max_share
);
2772 sysctl_tcp_rmem
[0] = SK_MEM_QUANTUM
;
2773 sysctl_tcp_rmem
[1] = 87380;
2774 sysctl_tcp_rmem
[2] = max(87380, max_share
);
2776 printk(KERN_INFO
"TCP: Hash tables configured "
2777 "(established %d bind %d)\n",
2778 tcp_hashinfo
.ehash_size
, tcp_hashinfo
.bhash_size
);
2780 tcp_register_congestion_control(&tcp_reno
);
2783 EXPORT_SYMBOL(tcp_close
);
2784 EXPORT_SYMBOL(tcp_disconnect
);
2785 EXPORT_SYMBOL(tcp_getsockopt
);
2786 EXPORT_SYMBOL(tcp_ioctl
);
2787 EXPORT_SYMBOL(tcp_poll
);
2788 EXPORT_SYMBOL(tcp_read_sock
);
2789 EXPORT_SYMBOL(tcp_recvmsg
);
2790 EXPORT_SYMBOL(tcp_sendmsg
);
2791 EXPORT_SYMBOL(tcp_splice_read
);
2792 EXPORT_SYMBOL(tcp_sendpage
);
2793 EXPORT_SYMBOL(tcp_setsockopt
);
2794 EXPORT_SYMBOL(tcp_shutdown
);