2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/bootmem.h>
261 #include <net/icmp.h>
263 #include <net/xfrm.h>
267 #include <asm/uaccess.h>
268 #include <asm/ioctls.h>
270 int sysctl_tcp_fin_timeout
= TCP_FIN_TIMEOUT
;
272 DEFINE_SNMP_STAT(struct tcp_mib
, tcp_statistics
);
274 kmem_cache_t
*tcp_timewait_cachep
;
276 atomic_t tcp_orphan_count
= ATOMIC_INIT(0);
278 int sysctl_tcp_mem
[3];
279 int sysctl_tcp_wmem
[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
280 int sysctl_tcp_rmem
[3] = { 4 * 1024, 87380, 87380 * 2 };
282 EXPORT_SYMBOL(sysctl_tcp_mem
);
283 EXPORT_SYMBOL(sysctl_tcp_rmem
);
284 EXPORT_SYMBOL(sysctl_tcp_wmem
);
286 atomic_t tcp_memory_allocated
; /* Current allocated memory. */
287 atomic_t tcp_sockets_allocated
; /* Current number of TCP sockets. */
289 EXPORT_SYMBOL(tcp_memory_allocated
);
290 EXPORT_SYMBOL(tcp_sockets_allocated
);
293 * Pressure flag: try to collapse.
294 * Technical note: it is used by multiple contexts non atomically.
295 * All the sk_stream_mem_schedule() is of this nature: accounting
296 * is strict, actions are advisory and have some latency.
298 int tcp_memory_pressure
;
300 EXPORT_SYMBOL(tcp_memory_pressure
);
302 void tcp_enter_memory_pressure(void)
304 if (!tcp_memory_pressure
) {
305 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES
);
306 tcp_memory_pressure
= 1;
310 EXPORT_SYMBOL(tcp_enter_memory_pressure
);
313 * LISTEN is a special case for poll..
315 static __inline__
unsigned int tcp_listen_poll(struct sock
*sk
,
318 return !reqsk_queue_empty(&tcp_sk(sk
)->accept_queue
) ? (POLLIN
| POLLRDNORM
) : 0;
322 * Wait for a TCP event.
324 * Note that we don't need to lock the socket, as the upper poll layers
325 * take care of normal races (between the test and the event) and we don't
326 * go look at any of the socket buffers directly.
328 unsigned int tcp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
331 struct sock
*sk
= sock
->sk
;
332 struct tcp_sock
*tp
= tcp_sk(sk
);
334 poll_wait(file
, sk
->sk_sleep
, wait
);
335 if (sk
->sk_state
== TCP_LISTEN
)
336 return tcp_listen_poll(sk
, wait
);
338 /* Socket is not locked. We are protected from async events
339 by poll logic and correct handling of state changes
340 made by another threads is impossible in any case.
348 * POLLHUP is certainly not done right. But poll() doesn't
349 * have a notion of HUP in just one direction, and for a
350 * socket the read side is more interesting.
352 * Some poll() documentation says that POLLHUP is incompatible
353 * with the POLLOUT/POLLWR flags, so somebody should check this
354 * all. But careful, it tends to be safer to return too many
355 * bits than too few, and you can easily break real applications
356 * if you don't tell them that something has hung up!
360 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
361 * our fs/select.c). It means that after we received EOF,
362 * poll always returns immediately, making impossible poll() on write()
363 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
364 * if and only if shutdown has been made in both directions.
365 * Actually, it is interesting to look how Solaris and DUX
366 * solve this dilemma. I would prefer, if PULLHUP were maskable,
367 * then we could set it on SND_SHUTDOWN. BTW examples given
368 * in Stevens' books assume exactly this behaviour, it explains
369 * why PULLHUP is incompatible with POLLOUT. --ANK
371 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
372 * blocking on fresh not-connected or disconnected socket. --ANK
374 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| sk
->sk_state
== TCP_CLOSE
)
376 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
377 mask
|= POLLIN
| POLLRDNORM
;
380 if ((1 << sk
->sk_state
) & ~(TCPF_SYN_SENT
| TCPF_SYN_RECV
)) {
381 /* Potential race condition. If read of tp below will
382 * escape above sk->sk_state, we can be illegally awaken
383 * in SYN_* states. */
384 if ((tp
->rcv_nxt
!= tp
->copied_seq
) &&
385 (tp
->urg_seq
!= tp
->copied_seq
||
386 tp
->rcv_nxt
!= tp
->copied_seq
+ 1 ||
387 sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
))
388 mask
|= POLLIN
| POLLRDNORM
;
390 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
391 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
392 mask
|= POLLOUT
| POLLWRNORM
;
393 } else { /* send SIGIO later */
394 set_bit(SOCK_ASYNC_NOSPACE
,
395 &sk
->sk_socket
->flags
);
396 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
398 /* Race breaker. If space is freed after
399 * wspace test but before the flags are set,
400 * IO signal will be lost.
402 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
))
403 mask
|= POLLOUT
| POLLWRNORM
;
407 if (tp
->urg_data
& TCP_URG_VALID
)
413 int tcp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
415 struct tcp_sock
*tp
= tcp_sk(sk
);
420 if (sk
->sk_state
== TCP_LISTEN
)
424 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
426 else if (sock_flag(sk
, SOCK_URGINLINE
) ||
428 before(tp
->urg_seq
, tp
->copied_seq
) ||
429 !before(tp
->urg_seq
, tp
->rcv_nxt
)) {
430 answ
= tp
->rcv_nxt
- tp
->copied_seq
;
432 /* Subtract 1, if FIN is in queue. */
433 if (answ
&& !skb_queue_empty(&sk
->sk_receive_queue
))
435 ((struct sk_buff
*)sk
->sk_receive_queue
.prev
)->h
.th
->fin
;
437 answ
= tp
->urg_seq
- tp
->copied_seq
;
441 answ
= tp
->urg_data
&& tp
->urg_seq
== tp
->copied_seq
;
444 if (sk
->sk_state
== TCP_LISTEN
)
447 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
450 answ
= tp
->write_seq
- tp
->snd_una
;
456 return put_user(answ
, (int __user
*)arg
);
460 int tcp_listen_start(struct sock
*sk
)
462 struct inet_sock
*inet
= inet_sk(sk
);
463 struct tcp_sock
*tp
= tcp_sk(sk
);
464 int rc
= reqsk_queue_alloc(&tp
->accept_queue
, TCP_SYNQ_HSIZE
);
469 sk
->sk_max_ack_backlog
= 0;
470 sk
->sk_ack_backlog
= 0;
473 /* There is race window here: we announce ourselves listening,
474 * but this transition is still not validated by get_port().
475 * It is OK, because this socket enters to hash table only
476 * after validation is complete.
478 sk
->sk_state
= TCP_LISTEN
;
479 if (!sk
->sk_prot
->get_port(sk
, inet
->num
)) {
480 inet
->sport
= htons(inet
->num
);
483 sk
->sk_prot
->hash(sk
);
488 sk
->sk_state
= TCP_CLOSE
;
489 __reqsk_queue_destroy(&tp
->accept_queue
);
494 * This routine closes sockets which have been at least partially
495 * opened, but not yet accepted.
498 static void tcp_listen_stop (struct sock
*sk
)
500 struct tcp_sock
*tp
= tcp_sk(sk
);
501 struct request_sock
*acc_req
;
502 struct request_sock
*req
;
504 tcp_delete_keepalive_timer(sk
);
506 /* make all the listen_opt local to us */
507 acc_req
= reqsk_queue_yank_acceptq(&tp
->accept_queue
);
509 /* Following specs, it would be better either to send FIN
510 * (and enter FIN-WAIT-1, it is normal close)
511 * or to send active reset (abort).
512 * Certainly, it is pretty dangerous while synflood, but it is
513 * bad justification for our negligence 8)
514 * To be honest, we are not able to make either
515 * of the variants now. --ANK
517 reqsk_queue_destroy(&tp
->accept_queue
);
519 while ((req
= acc_req
) != NULL
) {
520 struct sock
*child
= req
->sk
;
522 acc_req
= req
->dl_next
;
526 BUG_TRAP(!sock_owned_by_user(child
));
529 tcp_disconnect(child
, O_NONBLOCK
);
533 atomic_inc(&tcp_orphan_count
);
535 tcp_destroy_sock(child
);
537 bh_unlock_sock(child
);
541 sk_acceptq_removed(sk
);
544 BUG_TRAP(!sk
->sk_ack_backlog
);
547 static inline void tcp_mark_push(struct tcp_sock
*tp
, struct sk_buff
*skb
)
549 TCP_SKB_CB(skb
)->flags
|= TCPCB_FLAG_PSH
;
550 tp
->pushed_seq
= tp
->write_seq
;
553 static inline int forced_push(struct tcp_sock
*tp
)
555 return after(tp
->write_seq
, tp
->pushed_seq
+ (tp
->max_window
>> 1));
558 static inline void skb_entail(struct sock
*sk
, struct tcp_sock
*tp
,
562 TCP_SKB_CB(skb
)->seq
= tp
->write_seq
;
563 TCP_SKB_CB(skb
)->end_seq
= tp
->write_seq
;
564 TCP_SKB_CB(skb
)->flags
= TCPCB_FLAG_ACK
;
565 TCP_SKB_CB(skb
)->sacked
= 0;
566 skb_header_release(skb
);
567 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
568 sk_charge_skb(sk
, skb
);
569 if (!sk
->sk_send_head
)
570 sk
->sk_send_head
= skb
;
571 if (tp
->nonagle
& TCP_NAGLE_PUSH
)
572 tp
->nonagle
&= ~TCP_NAGLE_PUSH
;
575 static inline void tcp_mark_urg(struct tcp_sock
*tp
, int flags
,
578 if (flags
& MSG_OOB
) {
580 tp
->snd_up
= tp
->write_seq
;
581 TCP_SKB_CB(skb
)->sacked
|= TCPCB_URG
;
585 static inline void tcp_push(struct sock
*sk
, struct tcp_sock
*tp
, int flags
,
586 int mss_now
, int nonagle
)
588 if (sk
->sk_send_head
) {
589 struct sk_buff
*skb
= sk
->sk_write_queue
.prev
;
590 if (!(flags
& MSG_MORE
) || forced_push(tp
))
591 tcp_mark_push(tp
, skb
);
592 tcp_mark_urg(tp
, flags
, skb
);
593 __tcp_push_pending_frames(sk
, tp
, mss_now
,
594 (flags
& MSG_MORE
) ? TCP_NAGLE_CORK
: nonagle
);
598 static ssize_t
do_tcp_sendpages(struct sock
*sk
, struct page
**pages
, int poffset
,
599 size_t psize
, int flags
)
601 struct tcp_sock
*tp
= tcp_sk(sk
);
602 int mss_now
, size_goal
;
605 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
607 /* Wait for a connection to finish. */
608 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
))
609 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
612 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
614 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
615 size_goal
= tp
->xmit_size_goal
;
619 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
623 struct sk_buff
*skb
= sk
->sk_write_queue
.prev
;
624 struct page
*page
= pages
[poffset
/ PAGE_SIZE
];
625 int copy
, i
, can_coalesce
;
626 int offset
= poffset
% PAGE_SIZE
;
627 int size
= min_t(size_t, psize
, PAGE_SIZE
- offset
);
629 if (!sk
->sk_send_head
|| (copy
= size_goal
- skb
->len
) <= 0) {
631 if (!sk_stream_memory_free(sk
))
632 goto wait_for_sndbuf
;
634 skb
= sk_stream_alloc_pskb(sk
, 0, 0,
637 goto wait_for_memory
;
639 skb_entail(sk
, tp
, skb
);
646 i
= skb_shinfo(skb
)->nr_frags
;
647 can_coalesce
= skb_can_coalesce(skb
, i
, page
, offset
);
648 if (!can_coalesce
&& i
>= MAX_SKB_FRAGS
) {
649 tcp_mark_push(tp
, skb
);
652 if (sk
->sk_forward_alloc
< copy
&&
653 !sk_stream_mem_schedule(sk
, copy
, 0))
654 goto wait_for_memory
;
657 skb_shinfo(skb
)->frags
[i
- 1].size
+= copy
;
660 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
664 skb
->data_len
+= copy
;
665 skb
->truesize
+= copy
;
666 sk
->sk_wmem_queued
+= copy
;
667 sk
->sk_forward_alloc
-= copy
;
668 skb
->ip_summed
= CHECKSUM_HW
;
669 tp
->write_seq
+= copy
;
670 TCP_SKB_CB(skb
)->end_seq
+= copy
;
671 skb_shinfo(skb
)->tso_segs
= 0;
674 TCP_SKB_CB(skb
)->flags
&= ~TCPCB_FLAG_PSH
;
678 if (!(psize
-= copy
))
681 if (skb
->len
< mss_now
|| (flags
& MSG_OOB
))
684 if (forced_push(tp
)) {
685 tcp_mark_push(tp
, skb
);
686 __tcp_push_pending_frames(sk
, tp
, mss_now
, TCP_NAGLE_PUSH
);
687 } else if (skb
== sk
->sk_send_head
)
688 tcp_push_one(sk
, mss_now
);
692 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
695 tcp_push(sk
, tp
, flags
& ~MSG_MORE
, mss_now
, TCP_NAGLE_PUSH
);
697 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
700 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
701 size_goal
= tp
->xmit_size_goal
;
706 tcp_push(sk
, tp
, flags
, mss_now
, tp
->nonagle
);
713 return sk_stream_error(sk
, flags
, err
);
716 ssize_t
tcp_sendpage(struct socket
*sock
, struct page
*page
, int offset
,
717 size_t size
, int flags
)
720 struct sock
*sk
= sock
->sk
;
722 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
724 if (!(sk
->sk_route_caps
& NETIF_F_SG
) ||
725 !(sk
->sk_route_caps
& TCP_ZC_CSUM_FLAGS
))
726 return sock_no_sendpage(sock
, page
, offset
, size
, flags
);
728 #undef TCP_ZC_CSUM_FLAGS
732 res
= do_tcp_sendpages(sk
, &page
, offset
, size
, flags
);
738 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
739 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
741 static inline int select_size(struct sock
*sk
, struct tcp_sock
*tp
)
743 int tmp
= tp
->mss_cache
;
745 if (sk
->sk_route_caps
& NETIF_F_SG
) {
746 if (sk
->sk_route_caps
& NETIF_F_TSO
)
749 int pgbreak
= SKB_MAX_HEAD(MAX_TCP_HEADER
);
751 if (tmp
>= pgbreak
&&
752 tmp
<= pgbreak
+ (MAX_SKB_FRAGS
- 1) * PAGE_SIZE
)
760 int tcp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
764 struct tcp_sock
*tp
= tcp_sk(sk
);
767 int mss_now
, size_goal
;
774 flags
= msg
->msg_flags
;
775 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
777 /* Wait for a connection to finish. */
778 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
))
779 if ((err
= sk_stream_wait_connect(sk
, &timeo
)) != 0)
782 /* This should be in poll */
783 clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
785 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
786 size_goal
= tp
->xmit_size_goal
;
788 /* Ok commence sending. */
789 iovlen
= msg
->msg_iovlen
;
794 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
797 while (--iovlen
>= 0) {
798 int seglen
= iov
->iov_len
;
799 unsigned char __user
*from
= iov
->iov_base
;
806 skb
= sk
->sk_write_queue
.prev
;
808 if (!sk
->sk_send_head
||
809 (copy
= size_goal
- skb
->len
) <= 0) {
812 /* Allocate new segment. If the interface is SG,
813 * allocate skb fitting to single page.
815 if (!sk_stream_memory_free(sk
))
816 goto wait_for_sndbuf
;
818 skb
= sk_stream_alloc_pskb(sk
, select_size(sk
, tp
),
819 0, sk
->sk_allocation
);
821 goto wait_for_memory
;
824 * Check whether we can use HW checksum.
826 if (sk
->sk_route_caps
&
827 (NETIF_F_IP_CSUM
| NETIF_F_NO_CSUM
|
829 skb
->ip_summed
= CHECKSUM_HW
;
831 skb_entail(sk
, tp
, skb
);
835 /* Try to append data to the end of skb. */
839 /* Where to copy to? */
840 if (skb_tailroom(skb
) > 0) {
841 /* We have some space in skb head. Superb! */
842 if (copy
> skb_tailroom(skb
))
843 copy
= skb_tailroom(skb
);
844 if ((err
= skb_add_data(skb
, from
, copy
)) != 0)
848 int i
= skb_shinfo(skb
)->nr_frags
;
849 struct page
*page
= TCP_PAGE(sk
);
850 int off
= TCP_OFF(sk
);
852 if (skb_can_coalesce(skb
, i
, page
, off
) &&
854 /* We can extend the last page
857 } else if (i
== MAX_SKB_FRAGS
||
859 !(sk
->sk_route_caps
& NETIF_F_SG
))) {
860 /* Need to add new fragment and cannot
861 * do this because interface is non-SG,
862 * or because all the page slots are
864 tcp_mark_push(tp
, skb
);
867 if (off
== PAGE_SIZE
) {
869 TCP_PAGE(sk
) = page
= NULL
;
874 /* Allocate new cache page. */
875 if (!(page
= sk_stream_alloc_page(sk
)))
876 goto wait_for_memory
;
880 if (copy
> PAGE_SIZE
- off
)
881 copy
= PAGE_SIZE
- off
;
883 /* Time to copy data. We are close to
885 err
= skb_copy_to_page(sk
, from
, skb
, page
,
888 /* If this page was new, give it to the
889 * socket so it does not get leaked.
898 /* Update the skb. */
900 skb_shinfo(skb
)->frags
[i
- 1].size
+=
903 skb_fill_page_desc(skb
, i
, page
, off
, copy
);
906 } else if (off
+ copy
< PAGE_SIZE
) {
912 TCP_OFF(sk
) = off
+ copy
;
916 TCP_SKB_CB(skb
)->flags
&= ~TCPCB_FLAG_PSH
;
918 tp
->write_seq
+= copy
;
919 TCP_SKB_CB(skb
)->end_seq
+= copy
;
920 skb_shinfo(skb
)->tso_segs
= 0;
924 if ((seglen
-= copy
) == 0 && iovlen
== 0)
927 if (skb
->len
< mss_now
|| (flags
& MSG_OOB
))
930 if (forced_push(tp
)) {
931 tcp_mark_push(tp
, skb
);
932 __tcp_push_pending_frames(sk
, tp
, mss_now
, TCP_NAGLE_PUSH
);
933 } else if (skb
== sk
->sk_send_head
)
934 tcp_push_one(sk
, mss_now
);
938 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
941 tcp_push(sk
, tp
, flags
& ~MSG_MORE
, mss_now
, TCP_NAGLE_PUSH
);
943 if ((err
= sk_stream_wait_memory(sk
, &timeo
)) != 0)
946 mss_now
= tcp_current_mss(sk
, !(flags
&MSG_OOB
));
947 size_goal
= tp
->xmit_size_goal
;
953 tcp_push(sk
, tp
, flags
, mss_now
, tp
->nonagle
);
960 if (sk
->sk_send_head
== skb
)
961 sk
->sk_send_head
= NULL
;
962 __skb_unlink(skb
, &sk
->sk_write_queue
);
963 sk_stream_free_skb(sk
, skb
);
970 err
= sk_stream_error(sk
, flags
, err
);
977 * Handle reading urgent data. BSD has very simple semantics for
978 * this, no blocking and very strange errors 8)
981 static int tcp_recv_urg(struct sock
*sk
, long timeo
,
982 struct msghdr
*msg
, int len
, int flags
,
985 struct tcp_sock
*tp
= tcp_sk(sk
);
987 /* No URG data to read. */
988 if (sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
||
989 tp
->urg_data
== TCP_URG_READ
)
990 return -EINVAL
; /* Yes this is right ! */
992 if (sk
->sk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DONE
))
995 if (tp
->urg_data
& TCP_URG_VALID
) {
997 char c
= tp
->urg_data
;
999 if (!(flags
& MSG_PEEK
))
1000 tp
->urg_data
= TCP_URG_READ
;
1002 /* Read urgent data. */
1003 msg
->msg_flags
|= MSG_OOB
;
1006 if (!(flags
& MSG_TRUNC
))
1007 err
= memcpy_toiovec(msg
->msg_iov
, &c
, 1);
1010 msg
->msg_flags
|= MSG_TRUNC
;
1012 return err
? -EFAULT
: len
;
1015 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1018 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1019 * the available implementations agree in this case:
1020 * this call should never block, independent of the
1021 * blocking state of the socket.
1022 * Mike <pall@rz.uni-karlsruhe.de>
1027 /* Clean up the receive buffer for full frames taken by the user,
1028 * then send an ACK if necessary. COPIED is the number of bytes
1029 * tcp_recvmsg has given to the user so far, it speeds up the
1030 * calculation of whether or not we must ACK for the sake of
1033 static void cleanup_rbuf(struct sock
*sk
, int copied
)
1035 struct tcp_sock
*tp
= tcp_sk(sk
);
1036 int time_to_ack
= 0;
1039 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1041 BUG_TRAP(!skb
|| before(tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
));
1044 if (tcp_ack_scheduled(tp
)) {
1045 /* Delayed ACKs frequently hit locked sockets during bulk
1047 if (tp
->ack
.blocked
||
1048 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1049 tp
->rcv_nxt
- tp
->rcv_wup
> tp
->ack
.rcv_mss
||
1051 * If this read emptied read buffer, we send ACK, if
1052 * connection is not bidirectional, user drained
1053 * receive buffer and there was a small segment
1056 (copied
> 0 && (tp
->ack
.pending
& TCP_ACK_PUSHED
) &&
1057 !tp
->ack
.pingpong
&& !atomic_read(&sk
->sk_rmem_alloc
)))
1061 /* We send an ACK if we can now advertise a non-zero window
1062 * which has been raised "significantly".
1064 * Even if window raised up to infinity, do not send window open ACK
1065 * in states, where we will not receive more. It is useless.
1067 if (copied
> 0 && !time_to_ack
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1068 __u32 rcv_window_now
= tcp_receive_window(tp
);
1070 /* Optimize, __tcp_select_window() is not cheap. */
1071 if (2*rcv_window_now
<= tp
->window_clamp
) {
1072 __u32 new_window
= __tcp_select_window(sk
);
1074 /* Send ACK now, if this read freed lots of space
1075 * in our buffer. Certainly, new_window is new window.
1076 * We can advertise it now, if it is not less than current one.
1077 * "Lots" means "at least twice" here.
1079 if (new_window
&& new_window
>= 2 * rcv_window_now
)
1087 static void tcp_prequeue_process(struct sock
*sk
)
1089 struct sk_buff
*skb
;
1090 struct tcp_sock
*tp
= tcp_sk(sk
);
1092 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED
);
1094 /* RX process wants to run with disabled BHs, though it is not
1097 while ((skb
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
)
1098 sk
->sk_backlog_rcv(sk
, skb
);
1101 /* Clear memory counter. */
1102 tp
->ucopy
.memory
= 0;
1105 static inline struct sk_buff
*tcp_recv_skb(struct sock
*sk
, u32 seq
, u32
*off
)
1107 struct sk_buff
*skb
;
1110 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
1111 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1114 if (offset
< skb
->len
|| skb
->h
.th
->fin
) {
1123 * This routine provides an alternative to tcp_recvmsg() for routines
1124 * that would like to handle copying from skbuffs directly in 'sendfile'
1127 * - It is assumed that the socket was locked by the caller.
1128 * - The routine does not block.
1129 * - At present, there is no support for reading OOB data
1130 * or for 'peeking' the socket using this routine
1131 * (although both would be easy to implement).
1133 int tcp_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
1134 sk_read_actor_t recv_actor
)
1136 struct sk_buff
*skb
;
1137 struct tcp_sock
*tp
= tcp_sk(sk
);
1138 u32 seq
= tp
->copied_seq
;
1142 if (sk
->sk_state
== TCP_LISTEN
)
1144 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1145 if (offset
< skb
->len
) {
1148 len
= skb
->len
- offset
;
1149 /* Stop reading if we hit a patch of urgent data */
1151 u32 urg_offset
= tp
->urg_seq
- seq
;
1152 if (urg_offset
< len
)
1157 used
= recv_actor(desc
, skb
, offset
, len
);
1163 if (offset
!= skb
->len
)
1166 if (skb
->h
.th
->fin
) {
1167 sk_eat_skb(sk
, skb
);
1171 sk_eat_skb(sk
, skb
);
1175 tp
->copied_seq
= seq
;
1177 tcp_rcv_space_adjust(sk
);
1179 /* Clean up data we have read: This will do ACK frames. */
1181 cleanup_rbuf(sk
, copied
);
1186 * This routine copies from a sock struct into the user buffer.
1188 * Technical note: in 2.3 we work on _locked_ socket, so that
1189 * tricks with *seq access order and skb->users are not required.
1190 * Probably, code can be easily improved even more.
1193 int tcp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
1194 size_t len
, int nonblock
, int flags
, int *addr_len
)
1196 struct tcp_sock
*tp
= tcp_sk(sk
);
1202 int target
; /* Read at least this many bytes */
1204 struct task_struct
*user_recv
= NULL
;
1208 TCP_CHECK_TIMER(sk
);
1211 if (sk
->sk_state
== TCP_LISTEN
)
1214 timeo
= sock_rcvtimeo(sk
, nonblock
);
1216 /* Urgent data needs to be handled specially. */
1217 if (flags
& MSG_OOB
)
1220 seq
= &tp
->copied_seq
;
1221 if (flags
& MSG_PEEK
) {
1222 peek_seq
= tp
->copied_seq
;
1226 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1229 struct sk_buff
*skb
;
1232 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1233 if (tp
->urg_data
&& tp
->urg_seq
== *seq
) {
1236 if (signal_pending(current
)) {
1237 copied
= timeo
? sock_intr_errno(timeo
) : -EAGAIN
;
1242 /* Next get a buffer. */
1244 skb
= skb_peek(&sk
->sk_receive_queue
);
1249 /* Now that we have two receive queues this
1252 if (before(*seq
, TCP_SKB_CB(skb
)->seq
)) {
1253 printk(KERN_INFO
"recvmsg bug: copied %X "
1254 "seq %X\n", *seq
, TCP_SKB_CB(skb
)->seq
);
1257 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
1260 if (offset
< skb
->len
)
1264 BUG_TRAP(flags
& MSG_PEEK
);
1266 } while (skb
!= (struct sk_buff
*)&sk
->sk_receive_queue
);
1268 /* Well, if we have backlog, try to process it now yet. */
1270 if (copied
>= target
&& !sk
->sk_backlog
.tail
)
1275 sk
->sk_state
== TCP_CLOSE
||
1276 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1278 signal_pending(current
) ||
1282 if (sock_flag(sk
, SOCK_DONE
))
1286 copied
= sock_error(sk
);
1290 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1293 if (sk
->sk_state
== TCP_CLOSE
) {
1294 if (!sock_flag(sk
, SOCK_DONE
)) {
1295 /* This occurs when user tries to read
1296 * from never connected socket.
1309 if (signal_pending(current
)) {
1310 copied
= sock_intr_errno(timeo
);
1315 cleanup_rbuf(sk
, copied
);
1317 if (!sysctl_tcp_low_latency
&& tp
->ucopy
.task
== user_recv
) {
1318 /* Install new reader */
1319 if (!user_recv
&& !(flags
& (MSG_TRUNC
| MSG_PEEK
))) {
1320 user_recv
= current
;
1321 tp
->ucopy
.task
= user_recv
;
1322 tp
->ucopy
.iov
= msg
->msg_iov
;
1325 tp
->ucopy
.len
= len
;
1327 BUG_TRAP(tp
->copied_seq
== tp
->rcv_nxt
||
1328 (flags
& (MSG_PEEK
| MSG_TRUNC
)));
1330 /* Ugly... If prequeue is not empty, we have to
1331 * process it before releasing socket, otherwise
1332 * order will be broken at second iteration.
1333 * More elegant solution is required!!!
1335 * Look: we have the following (pseudo)queues:
1337 * 1. packets in flight
1342 * Each queue can be processed only if the next ones
1343 * are empty. At this point we have empty receive_queue.
1344 * But prequeue _can_ be not empty after 2nd iteration,
1345 * when we jumped to start of loop because backlog
1346 * processing added something to receive_queue.
1347 * We cannot release_sock(), because backlog contains
1348 * packets arrived _after_ prequeued ones.
1350 * Shortly, algorithm is clear --- to process all
1351 * the queues in order. We could make it more directly,
1352 * requeueing packets from backlog to prequeue, if
1353 * is not empty. It is more elegant, but eats cycles,
1356 if (!skb_queue_empty(&tp
->ucopy
.prequeue
))
1359 /* __ Set realtime policy in scheduler __ */
1362 if (copied
>= target
) {
1363 /* Do not sleep, just process backlog. */
1367 sk_wait_data(sk
, &timeo
);
1372 /* __ Restore normal policy in scheduler __ */
1374 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1375 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG
, chunk
);
1380 if (tp
->rcv_nxt
== tp
->copied_seq
&&
1381 !skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1383 tcp_prequeue_process(sk
);
1385 if ((chunk
= len
- tp
->ucopy
.len
) != 0) {
1386 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1392 if ((flags
& MSG_PEEK
) && peek_seq
!= tp
->copied_seq
) {
1393 if (net_ratelimit())
1394 printk(KERN_DEBUG
"TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1395 current
->comm
, current
->pid
);
1396 peek_seq
= tp
->copied_seq
;
1401 /* Ok so how much can we use? */
1402 used
= skb
->len
- offset
;
1406 /* Do we have urgent data here? */
1408 u32 urg_offset
= tp
->urg_seq
- *seq
;
1409 if (urg_offset
< used
) {
1411 if (!sock_flag(sk
, SOCK_URGINLINE
)) {
1423 if (!(flags
& MSG_TRUNC
)) {
1424 err
= skb_copy_datagram_iovec(skb
, offset
,
1425 msg
->msg_iov
, used
);
1427 /* Exception. Bailout! */
1438 tcp_rcv_space_adjust(sk
);
1441 if (tp
->urg_data
&& after(tp
->copied_seq
, tp
->urg_seq
)) {
1443 tcp_fast_path_check(sk
, tp
);
1445 if (used
+ offset
< skb
->len
)
1450 if (!(flags
& MSG_PEEK
))
1451 sk_eat_skb(sk
, skb
);
1455 /* Process the FIN. */
1457 if (!(flags
& MSG_PEEK
))
1458 sk_eat_skb(sk
, skb
);
1463 if (!skb_queue_empty(&tp
->ucopy
.prequeue
)) {
1466 tp
->ucopy
.len
= copied
> 0 ? len
: 0;
1468 tcp_prequeue_process(sk
);
1470 if (copied
> 0 && (chunk
= len
- tp
->ucopy
.len
) != 0) {
1471 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE
, chunk
);
1477 tp
->ucopy
.task
= NULL
;
1481 /* According to UNIX98, msg_name/msg_namelen are ignored
1482 * on connected socket. I was just happy when found this 8) --ANK
1485 /* Clean up data we have read: This will do ACK frames. */
1486 cleanup_rbuf(sk
, copied
);
1488 TCP_CHECK_TIMER(sk
);
1493 TCP_CHECK_TIMER(sk
);
1498 err
= tcp_recv_urg(sk
, timeo
, msg
, len
, flags
, addr_len
);
1503 * State processing on a close. This implements the state shift for
1504 * sending our FIN frame. Note that we only send a FIN for some
1505 * states. A shutdown() may have already sent the FIN, or we may be
1509 static unsigned char new_state
[16] = {
1510 /* current state: new state: action: */
1511 /* (Invalid) */ TCP_CLOSE
,
1512 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
1513 /* TCP_SYN_SENT */ TCP_CLOSE
,
1514 /* TCP_SYN_RECV */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
1515 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1
,
1516 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2
,
1517 /* TCP_TIME_WAIT */ TCP_CLOSE
,
1518 /* TCP_CLOSE */ TCP_CLOSE
,
1519 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK
| TCP_ACTION_FIN
,
1520 /* TCP_LAST_ACK */ TCP_LAST_ACK
,
1521 /* TCP_LISTEN */ TCP_CLOSE
,
1522 /* TCP_CLOSING */ TCP_CLOSING
,
1525 static int tcp_close_state(struct sock
*sk
)
1527 int next
= (int)new_state
[sk
->sk_state
];
1528 int ns
= next
& TCP_STATE_MASK
;
1530 tcp_set_state(sk
, ns
);
1532 return next
& TCP_ACTION_FIN
;
1536 * Shutdown the sending side of a connection. Much like close except
1537 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1540 void tcp_shutdown(struct sock
*sk
, int how
)
1542 /* We need to grab some memory, and put together a FIN,
1543 * and then put it into the queue to be sent.
1544 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1546 if (!(how
& SEND_SHUTDOWN
))
1549 /* If we've already sent a FIN, or it's a closed state, skip this. */
1550 if ((1 << sk
->sk_state
) &
1551 (TCPF_ESTABLISHED
| TCPF_SYN_SENT
|
1552 TCPF_SYN_RECV
| TCPF_CLOSE_WAIT
)) {
1553 /* Clear out any half completed packets. FIN if needed. */
1554 if (tcp_close_state(sk
))
1560 * At this point, there should be no process reference to this
1561 * socket, and thus no user references at all. Therefore we
1562 * can assume the socket waitqueue is inactive and nobody will
1563 * try to jump onto it.
1565 void tcp_destroy_sock(struct sock
*sk
)
1567 BUG_TRAP(sk
->sk_state
== TCP_CLOSE
);
1568 BUG_TRAP(sock_flag(sk
, SOCK_DEAD
));
1570 /* It cannot be in hash table! */
1571 BUG_TRAP(sk_unhashed(sk
));
1573 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1574 BUG_TRAP(!inet_sk(sk
)->num
|| inet_sk(sk
)->bind_hash
);
1576 sk
->sk_prot
->destroy(sk
);
1578 sk_stream_kill_queues(sk
);
1580 xfrm_sk_free_policy(sk
);
1582 sk_refcnt_debug_release(sk
);
1584 atomic_dec(&tcp_orphan_count
);
1588 void tcp_close(struct sock
*sk
, long timeout
)
1590 struct sk_buff
*skb
;
1591 int data_was_unread
= 0;
1594 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1596 if (sk
->sk_state
== TCP_LISTEN
) {
1597 tcp_set_state(sk
, TCP_CLOSE
);
1600 tcp_listen_stop(sk
);
1602 goto adjudge_to_death
;
1605 /* We need to flush the recv. buffs. We do this only on the
1606 * descriptor close, not protocol-sourced closes, because the
1607 * reader process may not have drained the data yet!
1609 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
1610 u32 len
= TCP_SKB_CB(skb
)->end_seq
- TCP_SKB_CB(skb
)->seq
-
1612 data_was_unread
+= len
;
1616 sk_stream_mem_reclaim(sk
);
1618 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1619 * 3.10, we send a RST here because data was lost. To
1620 * witness the awful effects of the old behavior of always
1621 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1622 * a bulk GET in an FTP client, suspend the process, wait
1623 * for the client to advertise a zero window, then kill -9
1624 * the FTP client, wheee... Note: timeout is always zero
1627 if (data_was_unread
) {
1628 /* Unread data was tossed, zap the connection. */
1629 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE
);
1630 tcp_set_state(sk
, TCP_CLOSE
);
1631 tcp_send_active_reset(sk
, GFP_KERNEL
);
1632 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
1633 /* Check zero linger _after_ checking for unread data. */
1634 sk
->sk_prot
->disconnect(sk
, 0);
1635 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA
);
1636 } else if (tcp_close_state(sk
)) {
1637 /* We FIN if the application ate all the data before
1638 * zapping the connection.
1641 /* RED-PEN. Formally speaking, we have broken TCP state
1642 * machine. State transitions:
1644 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1645 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1646 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1648 * are legal only when FIN has been sent (i.e. in window),
1649 * rather than queued out of window. Purists blame.
1651 * F.e. "RFC state" is ESTABLISHED,
1652 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1654 * The visible declinations are that sometimes
1655 * we enter time-wait state, when it is not required really
1656 * (harmless), do not send active resets, when they are
1657 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1658 * they look as CLOSING or LAST_ACK for Linux)
1659 * Probably, I missed some more holelets.
1665 sk_stream_wait_close(sk
, timeout
);
1668 /* It is the last release_sock in its life. It will remove backlog. */
1672 /* Now socket is owned by kernel and we acquire BH lock
1673 to finish close. No need to check for user refs.
1677 BUG_TRAP(!sock_owned_by_user(sk
));
1682 /* This is a (useful) BSD violating of the RFC. There is a
1683 * problem with TCP as specified in that the other end could
1684 * keep a socket open forever with no application left this end.
1685 * We use a 3 minute timeout (about the same as BSD) then kill
1686 * our end. If they send after that then tough - BUT: long enough
1687 * that we won't make the old 4*rto = almost no time - whoops
1690 * Nope, it was not mistake. It is really desired behaviour
1691 * f.e. on http servers, when such sockets are useless, but
1692 * consume significant resources. Let's do it with special
1693 * linger2 option. --ANK
1696 if (sk
->sk_state
== TCP_FIN_WAIT2
) {
1697 struct tcp_sock
*tp
= tcp_sk(sk
);
1698 if (tp
->linger2
< 0) {
1699 tcp_set_state(sk
, TCP_CLOSE
);
1700 tcp_send_active_reset(sk
, GFP_ATOMIC
);
1701 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER
);
1703 int tmo
= tcp_fin_time(tp
);
1705 if (tmo
> TCP_TIMEWAIT_LEN
) {
1706 tcp_reset_keepalive_timer(sk
, tcp_fin_time(tp
));
1708 atomic_inc(&tcp_orphan_count
);
1709 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
1714 if (sk
->sk_state
!= TCP_CLOSE
) {
1715 sk_stream_mem_reclaim(sk
);
1716 if (atomic_read(&tcp_orphan_count
) > sysctl_tcp_max_orphans
||
1717 (sk
->sk_wmem_queued
> SOCK_MIN_SNDBUF
&&
1718 atomic_read(&tcp_memory_allocated
) > sysctl_tcp_mem
[2])) {
1719 if (net_ratelimit())
1720 printk(KERN_INFO
"TCP: too many of orphaned "
1722 tcp_set_state(sk
, TCP_CLOSE
);
1723 tcp_send_active_reset(sk
, GFP_ATOMIC
);
1724 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY
);
1727 atomic_inc(&tcp_orphan_count
);
1729 if (sk
->sk_state
== TCP_CLOSE
)
1730 tcp_destroy_sock(sk
);
1731 /* Otherwise, socket is reprieved until protocol close. */
1739 /* These states need RST on ABORT according to RFC793 */
1741 static inline int tcp_need_reset(int state
)
1743 return (1 << state
) &
1744 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
| TCPF_FIN_WAIT1
|
1745 TCPF_FIN_WAIT2
| TCPF_SYN_RECV
);
1748 int tcp_disconnect(struct sock
*sk
, int flags
)
1750 struct inet_sock
*inet
= inet_sk(sk
);
1751 struct tcp_sock
*tp
= tcp_sk(sk
);
1753 int old_state
= sk
->sk_state
;
1755 if (old_state
!= TCP_CLOSE
)
1756 tcp_set_state(sk
, TCP_CLOSE
);
1758 /* ABORT function of RFC793 */
1759 if (old_state
== TCP_LISTEN
) {
1760 tcp_listen_stop(sk
);
1761 } else if (tcp_need_reset(old_state
) ||
1762 (tp
->snd_nxt
!= tp
->write_seq
&&
1763 (1 << old_state
) & (TCPF_CLOSING
| TCPF_LAST_ACK
))) {
1764 /* The last check adjusts for discrepance of Linux wrt. RFC
1767 tcp_send_active_reset(sk
, gfp_any());
1768 sk
->sk_err
= ECONNRESET
;
1769 } else if (old_state
== TCP_SYN_SENT
)
1770 sk
->sk_err
= ECONNRESET
;
1772 tcp_clear_xmit_timers(sk
);
1773 __skb_queue_purge(&sk
->sk_receive_queue
);
1774 sk_stream_writequeue_purge(sk
);
1775 __skb_queue_purge(&tp
->out_of_order_queue
);
1779 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
1780 inet_reset_saddr(sk
);
1782 sk
->sk_shutdown
= 0;
1783 sock_reset_flag(sk
, SOCK_DONE
);
1785 if ((tp
->write_seq
+= tp
->max_window
+ 2) == 0)
1790 tp
->packets_out
= 0;
1791 tp
->snd_ssthresh
= 0x7fffffff;
1792 tp
->snd_cwnd_cnt
= 0;
1793 tcp_set_ca_state(tp
, TCP_CA_Open
);
1794 tcp_clear_retrans(tp
);
1795 tcp_delack_init(tp
);
1796 sk
->sk_send_head
= NULL
;
1797 tp
->rx_opt
.saw_tstamp
= 0;
1798 tcp_sack_reset(&tp
->rx_opt
);
1801 BUG_TRAP(!inet
->num
|| inet
->bind_hash
);
1803 sk
->sk_error_report(sk
);
1808 * Wait for an incoming connection, avoid race
1809 * conditions. This must be called with the socket locked.
1811 static int wait_for_connect(struct sock
*sk
, long timeo
)
1813 struct tcp_sock
*tp
= tcp_sk(sk
);
1818 * True wake-one mechanism for incoming connections: only
1819 * one process gets woken up, not the 'whole herd'.
1820 * Since we do not 'race & poll' for established sockets
1821 * anymore, the common case will execute the loop only once.
1823 * Subtle issue: "add_wait_queue_exclusive()" will be added
1824 * after any current non-exclusive waiters, and we know that
1825 * it will always _stay_ after any new non-exclusive waiters
1826 * because all non-exclusive waiters are added at the
1827 * beginning of the wait-queue. As such, it's ok to "drop"
1828 * our exclusiveness temporarily when we get woken up without
1829 * having to remove and re-insert us on the wait queue.
1832 prepare_to_wait_exclusive(sk
->sk_sleep
, &wait
,
1833 TASK_INTERRUPTIBLE
);
1835 if (reqsk_queue_empty(&tp
->accept_queue
))
1836 timeo
= schedule_timeout(timeo
);
1839 if (!reqsk_queue_empty(&tp
->accept_queue
))
1842 if (sk
->sk_state
!= TCP_LISTEN
)
1844 err
= sock_intr_errno(timeo
);
1845 if (signal_pending(current
))
1851 finish_wait(sk
->sk_sleep
, &wait
);
1856 * This will accept the next outstanding connection.
1859 struct sock
*tcp_accept(struct sock
*sk
, int flags
, int *err
)
1861 struct tcp_sock
*tp
= tcp_sk(sk
);
1867 /* We need to make sure that this socket is listening,
1868 * and that it has something pending.
1871 if (sk
->sk_state
!= TCP_LISTEN
)
1874 /* Find already established connection */
1875 if (reqsk_queue_empty(&tp
->accept_queue
)) {
1876 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1878 /* If this is a non blocking socket don't sleep */
1883 error
= wait_for_connect(sk
, timeo
);
1888 newsk
= reqsk_queue_get_child(&tp
->accept_queue
, sk
);
1889 BUG_TRAP(newsk
->sk_state
!= TCP_SYN_RECV
);
1900 * Socket option code for TCP.
1902 int tcp_setsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
1905 struct tcp_sock
*tp
= tcp_sk(sk
);
1909 if (level
!= SOL_TCP
)
1910 return tp
->af_specific
->setsockopt(sk
, level
, optname
,
1913 /* This is a string value all the others are int's */
1914 if (optname
== TCP_CONGESTION
) {
1915 char name
[TCP_CA_NAME_MAX
];
1920 val
= strncpy_from_user(name
, optval
,
1921 min(TCP_CA_NAME_MAX
-1, optlen
));
1927 err
= tcp_set_congestion_control(tp
, name
);
1932 if (optlen
< sizeof(int))
1935 if (get_user(val
, (int __user
*)optval
))
1942 /* Values greater than interface MTU won't take effect. However
1943 * at the point when this call is done we typically don't yet
1944 * know which interface is going to be used */
1945 if (val
< 8 || val
> MAX_TCP_WINDOW
) {
1949 tp
->rx_opt
.user_mss
= val
;
1954 /* TCP_NODELAY is weaker than TCP_CORK, so that
1955 * this option on corked socket is remembered, but
1956 * it is not activated until cork is cleared.
1958 * However, when TCP_NODELAY is set we make
1959 * an explicit push, which overrides even TCP_CORK
1960 * for currently queued segments.
1962 tp
->nonagle
|= TCP_NAGLE_OFF
|TCP_NAGLE_PUSH
;
1963 tcp_push_pending_frames(sk
, tp
);
1965 tp
->nonagle
&= ~TCP_NAGLE_OFF
;
1970 /* When set indicates to always queue non-full frames.
1971 * Later the user clears this option and we transmit
1972 * any pending partial frames in the queue. This is
1973 * meant to be used alongside sendfile() to get properly
1974 * filled frames when the user (for example) must write
1975 * out headers with a write() call first and then use
1976 * sendfile to send out the data parts.
1978 * TCP_CORK can be set together with TCP_NODELAY and it is
1979 * stronger than TCP_NODELAY.
1982 tp
->nonagle
|= TCP_NAGLE_CORK
;
1984 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
1985 if (tp
->nonagle
&TCP_NAGLE_OFF
)
1986 tp
->nonagle
|= TCP_NAGLE_PUSH
;
1987 tcp_push_pending_frames(sk
, tp
);
1992 if (val
< 1 || val
> MAX_TCP_KEEPIDLE
)
1995 tp
->keepalive_time
= val
* HZ
;
1996 if (sock_flag(sk
, SOCK_KEEPOPEN
) &&
1997 !((1 << sk
->sk_state
) &
1998 (TCPF_CLOSE
| TCPF_LISTEN
))) {
1999 __u32 elapsed
= tcp_time_stamp
- tp
->rcv_tstamp
;
2000 if (tp
->keepalive_time
> elapsed
)
2001 elapsed
= tp
->keepalive_time
- elapsed
;
2004 tcp_reset_keepalive_timer(sk
, elapsed
);
2009 if (val
< 1 || val
> MAX_TCP_KEEPINTVL
)
2012 tp
->keepalive_intvl
= val
* HZ
;
2015 if (val
< 1 || val
> MAX_TCP_KEEPCNT
)
2018 tp
->keepalive_probes
= val
;
2021 if (val
< 1 || val
> MAX_TCP_SYNCNT
)
2024 tp
->syn_retries
= val
;
2030 else if (val
> sysctl_tcp_fin_timeout
/ HZ
)
2033 tp
->linger2
= val
* HZ
;
2036 case TCP_DEFER_ACCEPT
:
2037 tp
->defer_accept
= 0;
2039 /* Translate value in seconds to number of
2041 while (tp
->defer_accept
< 32 &&
2042 val
> ((TCP_TIMEOUT_INIT
/ HZ
) <<
2049 case TCP_WINDOW_CLAMP
:
2051 if (sk
->sk_state
!= TCP_CLOSE
) {
2055 tp
->window_clamp
= 0;
2057 tp
->window_clamp
= val
< SOCK_MIN_RCVBUF
/ 2 ?
2058 SOCK_MIN_RCVBUF
/ 2 : val
;
2063 tp
->ack
.pingpong
= 1;
2065 tp
->ack
.pingpong
= 0;
2066 if ((1 << sk
->sk_state
) &
2067 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
) &&
2068 tcp_ack_scheduled(tp
)) {
2069 tp
->ack
.pending
|= TCP_ACK_PUSHED
;
2070 cleanup_rbuf(sk
, 1);
2072 tp
->ack
.pingpong
= 1;
2085 /* Return information about state of tcp endpoint in API format. */
2086 void tcp_get_info(struct sock
*sk
, struct tcp_info
*info
)
2088 struct tcp_sock
*tp
= tcp_sk(sk
);
2089 u32 now
= tcp_time_stamp
;
2091 memset(info
, 0, sizeof(*info
));
2093 info
->tcpi_state
= sk
->sk_state
;
2094 info
->tcpi_ca_state
= tp
->ca_state
;
2095 info
->tcpi_retransmits
= tp
->retransmits
;
2096 info
->tcpi_probes
= tp
->probes_out
;
2097 info
->tcpi_backoff
= tp
->backoff
;
2099 if (tp
->rx_opt
.tstamp_ok
)
2100 info
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
2101 if (tp
->rx_opt
.sack_ok
)
2102 info
->tcpi_options
|= TCPI_OPT_SACK
;
2103 if (tp
->rx_opt
.wscale_ok
) {
2104 info
->tcpi_options
|= TCPI_OPT_WSCALE
;
2105 info
->tcpi_snd_wscale
= tp
->rx_opt
.snd_wscale
;
2106 info
->tcpi_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
2109 if (tp
->ecn_flags
&TCP_ECN_OK
)
2110 info
->tcpi_options
|= TCPI_OPT_ECN
;
2112 info
->tcpi_rto
= jiffies_to_usecs(tp
->rto
);
2113 info
->tcpi_ato
= jiffies_to_usecs(tp
->ack
.ato
);
2114 info
->tcpi_snd_mss
= tp
->mss_cache
;
2115 info
->tcpi_rcv_mss
= tp
->ack
.rcv_mss
;
2117 info
->tcpi_unacked
= tp
->packets_out
;
2118 info
->tcpi_sacked
= tp
->sacked_out
;
2119 info
->tcpi_lost
= tp
->lost_out
;
2120 info
->tcpi_retrans
= tp
->retrans_out
;
2121 info
->tcpi_fackets
= tp
->fackets_out
;
2123 info
->tcpi_last_data_sent
= jiffies_to_msecs(now
- tp
->lsndtime
);
2124 info
->tcpi_last_data_recv
= jiffies_to_msecs(now
- tp
->ack
.lrcvtime
);
2125 info
->tcpi_last_ack_recv
= jiffies_to_msecs(now
- tp
->rcv_tstamp
);
2127 info
->tcpi_pmtu
= tp
->pmtu_cookie
;
2128 info
->tcpi_rcv_ssthresh
= tp
->rcv_ssthresh
;
2129 info
->tcpi_rtt
= jiffies_to_usecs(tp
->srtt
)>>3;
2130 info
->tcpi_rttvar
= jiffies_to_usecs(tp
->mdev
)>>2;
2131 info
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
2132 info
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
2133 info
->tcpi_advmss
= tp
->advmss
;
2134 info
->tcpi_reordering
= tp
->reordering
;
2136 info
->tcpi_rcv_rtt
= jiffies_to_usecs(tp
->rcv_rtt_est
.rtt
)>>3;
2137 info
->tcpi_rcv_space
= tp
->rcvq_space
.space
;
2139 info
->tcpi_total_retrans
= tp
->total_retrans
;
2142 EXPORT_SYMBOL_GPL(tcp_get_info
);
2144 int tcp_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
2147 struct tcp_sock
*tp
= tcp_sk(sk
);
2150 if (level
!= SOL_TCP
)
2151 return tp
->af_specific
->getsockopt(sk
, level
, optname
,
2154 if (get_user(len
, optlen
))
2157 len
= min_t(unsigned int, len
, sizeof(int));
2164 val
= tp
->mss_cache
;
2165 if (!val
&& ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2166 val
= tp
->rx_opt
.user_mss
;
2169 val
= !!(tp
->nonagle
&TCP_NAGLE_OFF
);
2172 val
= !!(tp
->nonagle
&TCP_NAGLE_CORK
);
2175 val
= (tp
->keepalive_time
? : sysctl_tcp_keepalive_time
) / HZ
;
2178 val
= (tp
->keepalive_intvl
? : sysctl_tcp_keepalive_intvl
) / HZ
;
2181 val
= tp
->keepalive_probes
? : sysctl_tcp_keepalive_probes
;
2184 val
= tp
->syn_retries
? : sysctl_tcp_syn_retries
;
2189 val
= (val
? : sysctl_tcp_fin_timeout
) / HZ
;
2191 case TCP_DEFER_ACCEPT
:
2192 val
= !tp
->defer_accept
? 0 : ((TCP_TIMEOUT_INIT
/ HZ
) <<
2193 (tp
->defer_accept
- 1));
2195 case TCP_WINDOW_CLAMP
:
2196 val
= tp
->window_clamp
;
2199 struct tcp_info info
;
2201 if (get_user(len
, optlen
))
2204 tcp_get_info(sk
, &info
);
2206 len
= min_t(unsigned int, len
, sizeof(info
));
2207 if (put_user(len
, optlen
))
2209 if (copy_to_user(optval
, &info
, len
))
2214 val
= !tp
->ack
.pingpong
;
2217 case TCP_CONGESTION
:
2218 if (get_user(len
, optlen
))
2220 len
= min_t(unsigned int, len
, TCP_CA_NAME_MAX
);
2221 if (put_user(len
, optlen
))
2223 if (copy_to_user(optval
, tp
->ca_ops
->name
, len
))
2227 return -ENOPROTOOPT
;
2230 if (put_user(len
, optlen
))
2232 if (copy_to_user(optval
, &val
, len
))
2238 extern void __skb_cb_too_small_for_tcp(int, int);
2239 extern struct tcp_congestion_ops tcp_reno
;
2241 static __initdata
unsigned long thash_entries
;
2242 static int __init
set_thash_entries(char *str
)
2246 thash_entries
= simple_strtoul(str
, &str
, 0);
2249 __setup("thash_entries=", set_thash_entries
);
2251 void __init
tcp_init(void)
2253 struct sk_buff
*skb
= NULL
;
2256 if (sizeof(struct tcp_skb_cb
) > sizeof(skb
->cb
))
2257 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb
),
2260 tcp_bucket_cachep
= kmem_cache_create("tcp_bind_bucket",
2261 sizeof(struct inet_bind_bucket
),
2262 0, SLAB_HWCACHE_ALIGN
,
2264 if (!tcp_bucket_cachep
)
2265 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2267 tcp_timewait_cachep
= kmem_cache_create("tcp_tw_bucket",
2268 sizeof(struct tcp_tw_bucket
),
2269 0, SLAB_HWCACHE_ALIGN
,
2271 if (!tcp_timewait_cachep
)
2272 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2274 /* Size and allocate the main established and bind bucket
2277 * The methodology is similar to that of the buffer cache.
2280 alloc_large_system_hash("TCP established",
2281 sizeof(struct inet_ehash_bucket
),
2283 (num_physpages
>= 128 * 1024) ?
2290 tcp_ehash_size
= (1 << tcp_ehash_size
) >> 1;
2291 for (i
= 0; i
< (tcp_ehash_size
<< 1); i
++) {
2292 rwlock_init(&tcp_ehash
[i
].lock
);
2293 INIT_HLIST_HEAD(&tcp_ehash
[i
].chain
);
2297 alloc_large_system_hash("TCP bind",
2298 sizeof(struct inet_bind_hashbucket
),
2300 (num_physpages
>= 128 * 1024) ?
2307 tcp_bhash_size
= 1 << tcp_bhash_size
;
2308 for (i
= 0; i
< tcp_bhash_size
; i
++) {
2309 spin_lock_init(&tcp_bhash
[i
].lock
);
2310 INIT_HLIST_HEAD(&tcp_bhash
[i
].chain
);
2313 /* Try to be a bit smarter and adjust defaults depending
2314 * on available memory.
2316 for (order
= 0; ((1 << order
) << PAGE_SHIFT
) <
2317 (tcp_bhash_size
* sizeof(struct inet_bind_hashbucket
));
2321 sysctl_local_port_range
[0] = 32768;
2322 sysctl_local_port_range
[1] = 61000;
2323 sysctl_tcp_max_tw_buckets
= 180000;
2324 sysctl_tcp_max_orphans
= 4096 << (order
- 4);
2325 sysctl_max_syn_backlog
= 1024;
2326 } else if (order
< 3) {
2327 sysctl_local_port_range
[0] = 1024 * (3 - order
);
2328 sysctl_tcp_max_tw_buckets
>>= (3 - order
);
2329 sysctl_tcp_max_orphans
>>= (3 - order
);
2330 sysctl_max_syn_backlog
= 128;
2332 tcp_port_rover
= sysctl_local_port_range
[0] - 1;
2334 sysctl_tcp_mem
[0] = 768 << order
;
2335 sysctl_tcp_mem
[1] = 1024 << order
;
2336 sysctl_tcp_mem
[2] = 1536 << order
;
2339 sysctl_tcp_wmem
[2] = 64 * 1024;
2340 sysctl_tcp_rmem
[0] = PAGE_SIZE
;
2341 sysctl_tcp_rmem
[1] = 43689;
2342 sysctl_tcp_rmem
[2] = 2 * 43689;
2345 printk(KERN_INFO
"TCP: Hash tables configured "
2346 "(established %d bind %d)\n",
2347 tcp_ehash_size
<< 1, tcp_bhash_size
);
2349 tcp_register_congestion_control(&tcp_reno
);
2352 EXPORT_SYMBOL(tcp_accept
);
2353 EXPORT_SYMBOL(tcp_close
);
2354 EXPORT_SYMBOL(tcp_destroy_sock
);
2355 EXPORT_SYMBOL(tcp_disconnect
);
2356 EXPORT_SYMBOL(tcp_getsockopt
);
2357 EXPORT_SYMBOL(tcp_ioctl
);
2358 EXPORT_SYMBOL(tcp_poll
);
2359 EXPORT_SYMBOL(tcp_read_sock
);
2360 EXPORT_SYMBOL(tcp_recvmsg
);
2361 EXPORT_SYMBOL(tcp_sendmsg
);
2362 EXPORT_SYMBOL(tcp_sendpage
);
2363 EXPORT_SYMBOL(tcp_setsockopt
);
2364 EXPORT_SYMBOL(tcp_shutdown
);
2365 EXPORT_SYMBOL(tcp_statistics
);
2366 EXPORT_SYMBOL(tcp_timewait_cachep
);