83b117a25c2ad4d7ea4673778b52f5ee0ae61f5f
[deliverable/linux.git] / include / net / tcp.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18 #ifndef _TCP_H
19 #define _TCP_H
20
21 #define TCP_DEBUG 1
22 #define FASTRETRANS_DEBUG 1
23
24 #include <linux/config.h>
25 #include <linux/list.h>
26 #include <linux/tcp.h>
27 #include <linux/slab.h>
28 #include <linux/cache.h>
29 #include <linux/percpu.h>
30 #include <linux/skbuff.h>
31
32 #include <net/inet_connection_sock.h>
33 #include <net/inet_timewait_sock.h>
34 #include <net/inet_hashtables.h>
35 #include <net/checksum.h>
36 #include <net/request_sock.h>
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 #include <net/ip.h>
40 #include <net/tcp_states.h>
41
42 #include <linux/seq_file.h>
43
44 extern struct inet_hashinfo tcp_hashinfo;
45
46 extern atomic_t tcp_orphan_count;
47 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
48
49 #define MAX_TCP_HEADER (128 + MAX_HEADER)
50
51 /*
52 * Never offer a window over 32767 without using window scaling. Some
53 * poor stacks do signed 16bit maths!
54 */
55 #define MAX_TCP_WINDOW 32767U
56
57 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
58 #define TCP_MIN_MSS 88U
59
60 /* Minimal RCV_MSS. */
61 #define TCP_MIN_RCVMSS 536U
62
63 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
64 #define TCP_FASTRETRANS_THRESH 3
65
66 /* Maximal reordering. */
67 #define TCP_MAX_REORDERING 127
68
69 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
70 #define TCP_MAX_QUICKACKS 16U
71
72 /* urg_data states */
73 #define TCP_URG_VALID 0x0100
74 #define TCP_URG_NOTYET 0x0200
75 #define TCP_URG_READ 0x0400
76
77 #define TCP_RETR1 3 /*
78 * This is how many retries it does before it
79 * tries to figure out if the gateway is
80 * down. Minimal RFC value is 3; it corresponds
81 * to ~3sec-8min depending on RTO.
82 */
83
84 #define TCP_RETR2 15 /*
85 * This should take at least
86 * 90 minutes to time out.
87 * RFC1122 says that the limit is 100 sec.
88 * 15 is ~13-30min depending on RTO.
89 */
90
91 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
92 * connection: ~180sec is RFC minimum */
93
94 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
95 * connection: ~180sec is RFC minimum */
96
97
98 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
99 * socket. 7 is ~50sec-16min.
100 */
101
102
103 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
104 * state, about 60 seconds */
105 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
106 /* BSD style FIN_WAIT2 deadlock breaker.
107 * It used to be 3min, new value is 60sec,
108 * to combine FIN-WAIT-2 timeout with
109 * TIME-WAIT timer.
110 */
111
112 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
113 #if HZ >= 100
114 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
115 #define TCP_ATO_MIN ((unsigned)(HZ/25))
116 #else
117 #define TCP_DELACK_MIN 4U
118 #define TCP_ATO_MIN 4U
119 #endif
120 #define TCP_RTO_MAX ((unsigned)(120*HZ))
121 #define TCP_RTO_MIN ((unsigned)(HZ/5))
122 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
123
124 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
125 * for local resources.
126 */
127
128 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
129 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
130 #define TCP_KEEPALIVE_INTVL (75*HZ)
131
132 #define MAX_TCP_KEEPIDLE 32767
133 #define MAX_TCP_KEEPINTVL 32767
134 #define MAX_TCP_KEEPCNT 127
135 #define MAX_TCP_SYNCNT 127
136
137 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
138 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
139
140 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
141 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
142 * after this time. It should be equal
143 * (or greater than) TCP_TIMEWAIT_LEN
144 * to provide reliability equal to one
145 * provided by timewait state.
146 */
147 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
148 * timestamps. It must be less than
149 * minimal timewait lifetime.
150 */
151 /*
152 * TCP option
153 */
154
155 #define TCPOPT_NOP 1 /* Padding */
156 #define TCPOPT_EOL 0 /* End of options */
157 #define TCPOPT_MSS 2 /* Segment size negotiating */
158 #define TCPOPT_WINDOW 3 /* Window scaling */
159 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
160 #define TCPOPT_SACK 5 /* SACK Block */
161 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
162
163 /*
164 * TCP option lengths
165 */
166
167 #define TCPOLEN_MSS 4
168 #define TCPOLEN_WINDOW 3
169 #define TCPOLEN_SACK_PERM 2
170 #define TCPOLEN_TIMESTAMP 10
171
172 /* But this is what stacks really send out. */
173 #define TCPOLEN_TSTAMP_ALIGNED 12
174 #define TCPOLEN_WSCALE_ALIGNED 4
175 #define TCPOLEN_SACKPERM_ALIGNED 4
176 #define TCPOLEN_SACK_BASE 2
177 #define TCPOLEN_SACK_BASE_ALIGNED 4
178 #define TCPOLEN_SACK_PERBLOCK 8
179
180 /* Flags in tp->nonagle */
181 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
182 #define TCP_NAGLE_CORK 2 /* Socket is corked */
183 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
184
185 extern struct inet_timewait_death_row tcp_death_row;
186
187 /* sysctl variables for tcp */
188 extern int sysctl_tcp_timestamps;
189 extern int sysctl_tcp_window_scaling;
190 extern int sysctl_tcp_sack;
191 extern int sysctl_tcp_fin_timeout;
192 extern int sysctl_tcp_keepalive_time;
193 extern int sysctl_tcp_keepalive_probes;
194 extern int sysctl_tcp_keepalive_intvl;
195 extern int sysctl_tcp_syn_retries;
196 extern int sysctl_tcp_synack_retries;
197 extern int sysctl_tcp_retries1;
198 extern int sysctl_tcp_retries2;
199 extern int sysctl_tcp_orphan_retries;
200 extern int sysctl_tcp_syncookies;
201 extern int sysctl_tcp_retrans_collapse;
202 extern int sysctl_tcp_stdurg;
203 extern int sysctl_tcp_rfc1337;
204 extern int sysctl_tcp_abort_on_overflow;
205 extern int sysctl_tcp_max_orphans;
206 extern int sysctl_tcp_fack;
207 extern int sysctl_tcp_reordering;
208 extern int sysctl_tcp_ecn;
209 extern int sysctl_tcp_dsack;
210 extern int sysctl_tcp_mem[3];
211 extern int sysctl_tcp_wmem[3];
212 extern int sysctl_tcp_rmem[3];
213 extern int sysctl_tcp_app_win;
214 extern int sysctl_tcp_adv_win_scale;
215 extern int sysctl_tcp_tw_reuse;
216 extern int sysctl_tcp_frto;
217 extern int sysctl_tcp_low_latency;
218 extern int sysctl_tcp_nometrics_save;
219 extern int sysctl_tcp_moderate_rcvbuf;
220 extern int sysctl_tcp_tso_win_divisor;
221 extern int sysctl_tcp_abc;
222
223 extern atomic_t tcp_memory_allocated;
224 extern atomic_t tcp_sockets_allocated;
225 extern int tcp_memory_pressure;
226
227 /*
228 * The next routines deal with comparing 32 bit unsigned ints
229 * and worry about wraparound (automatic with unsigned arithmetic).
230 */
231
232 static inline int before(__u32 seq1, __u32 seq2)
233 {
234 return (__s32)(seq1-seq2) < 0;
235 }
236
237 static inline int after(__u32 seq1, __u32 seq2)
238 {
239 return (__s32)(seq2-seq1) < 0;
240 }
241
242
243 /* is s2<=s1<=s3 ? */
244 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
245 {
246 return seq3 - seq2 >= seq1 - seq2;
247 }
248
249
250 extern struct proto tcp_prot;
251
252 DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
253 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
254 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
255 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
256 #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
257 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
258 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
259
260 extern void tcp_v4_err(struct sk_buff *skb, u32);
261
262 extern void tcp_shutdown (struct sock *sk, int how);
263
264 extern int tcp_v4_rcv(struct sk_buff *skb);
265
266 extern int tcp_v4_remember_stamp(struct sock *sk);
267
268 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
269
270 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
271 struct msghdr *msg, size_t size);
272 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
273
274 extern int tcp_ioctl(struct sock *sk,
275 int cmd,
276 unsigned long arg);
277
278 extern int tcp_rcv_state_process(struct sock *sk,
279 struct sk_buff *skb,
280 struct tcphdr *th,
281 unsigned len);
282
283 extern int tcp_rcv_established(struct sock *sk,
284 struct sk_buff *skb,
285 struct tcphdr *th,
286 unsigned len);
287
288 extern void tcp_rcv_space_adjust(struct sock *sk);
289
290 static inline void tcp_dec_quickack_mode(struct sock *sk,
291 const unsigned int pkts)
292 {
293 struct inet_connection_sock *icsk = inet_csk(sk);
294
295 if (icsk->icsk_ack.quick) {
296 if (pkts >= icsk->icsk_ack.quick) {
297 icsk->icsk_ack.quick = 0;
298 /* Leaving quickack mode we deflate ATO. */
299 icsk->icsk_ack.ato = TCP_ATO_MIN;
300 } else
301 icsk->icsk_ack.quick -= pkts;
302 }
303 }
304
305 extern void tcp_enter_quickack_mode(struct sock *sk);
306
307 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
308 {
309 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
310 }
311
312 enum tcp_tw_status
313 {
314 TCP_TW_SUCCESS = 0,
315 TCP_TW_RST = 1,
316 TCP_TW_ACK = 2,
317 TCP_TW_SYN = 3
318 };
319
320
321 extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
322 struct sk_buff *skb,
323 const struct tcphdr *th);
324
325 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
326 struct request_sock *req,
327 struct request_sock **prev);
328 extern int tcp_child_process(struct sock *parent,
329 struct sock *child,
330 struct sk_buff *skb);
331 extern void tcp_enter_frto(struct sock *sk);
332 extern void tcp_enter_loss(struct sock *sk, int how);
333 extern void tcp_clear_retrans(struct tcp_sock *tp);
334 extern void tcp_update_metrics(struct sock *sk);
335
336 extern void tcp_close(struct sock *sk,
337 long timeout);
338 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
339
340 extern int tcp_getsockopt(struct sock *sk, int level,
341 int optname,
342 char __user *optval,
343 int __user *optlen);
344 extern int tcp_setsockopt(struct sock *sk, int level,
345 int optname, char __user *optval,
346 int optlen);
347 extern void tcp_set_keepalive(struct sock *sk, int val);
348 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
349 struct msghdr *msg,
350 size_t len, int nonblock,
351 int flags, int *addr_len);
352
353 extern void tcp_parse_options(struct sk_buff *skb,
354 struct tcp_options_received *opt_rx,
355 int estab);
356
357 /*
358 * TCP v4 functions exported for the inet6 API
359 */
360
361 extern void tcp_v4_send_check(struct sock *sk, int len,
362 struct sk_buff *skb);
363
364 extern int tcp_v4_conn_request(struct sock *sk,
365 struct sk_buff *skb);
366
367 extern struct sock * tcp_create_openreq_child(struct sock *sk,
368 struct request_sock *req,
369 struct sk_buff *skb);
370
371 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
372 struct sk_buff *skb,
373 struct request_sock *req,
374 struct dst_entry *dst);
375
376 extern int tcp_v4_do_rcv(struct sock *sk,
377 struct sk_buff *skb);
378
379 extern int tcp_v4_connect(struct sock *sk,
380 struct sockaddr *uaddr,
381 int addr_len);
382
383 extern int tcp_connect(struct sock *sk);
384
385 extern struct sk_buff * tcp_make_synack(struct sock *sk,
386 struct dst_entry *dst,
387 struct request_sock *req);
388
389 extern int tcp_disconnect(struct sock *sk, int flags);
390
391 extern void tcp_unhash(struct sock *sk);
392
393 extern int tcp_v4_hash_connecting(struct sock *sk);
394
395
396 /* From syncookies.c */
397 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
398 struct ip_options *opt);
399 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
400 __u16 *mss);
401
402 /* tcp_output.c */
403
404 extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
405 unsigned int cur_mss, int nonagle);
406 extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
407 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
408 extern void tcp_xmit_retransmit_queue(struct sock *);
409 extern void tcp_simple_retransmit(struct sock *);
410 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
411 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
412
413 extern void tcp_send_probe0(struct sock *);
414 extern void tcp_send_partial(struct sock *);
415 extern int tcp_write_wakeup(struct sock *);
416 extern void tcp_send_fin(struct sock *sk);
417 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
418 extern int tcp_send_synack(struct sock *);
419 extern void tcp_push_one(struct sock *, unsigned int mss_now);
420 extern void tcp_send_ack(struct sock *sk);
421 extern void tcp_send_delayed_ack(struct sock *sk);
422
423 /* tcp_input.c */
424 extern void tcp_cwnd_application_limited(struct sock *sk);
425
426 /* tcp_timer.c */
427 extern void tcp_init_xmit_timers(struct sock *);
428 static inline void tcp_clear_xmit_timers(struct sock *sk)
429 {
430 inet_csk_clear_xmit_timers(sk);
431 }
432
433 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
434 extern unsigned int tcp_current_mss(struct sock *sk, int large);
435
436 /* tcp.c */
437 extern void tcp_get_info(struct sock *, struct tcp_info *);
438
439 /* Read 'sendfile()'-style from a TCP socket */
440 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
441 unsigned int, size_t);
442 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
443 sk_read_actor_t recv_actor);
444
445 /* Initialize RCV_MSS value.
446 * RCV_MSS is an our guess about MSS used by the peer.
447 * We haven't any direct information about the MSS.
448 * It's better to underestimate the RCV_MSS rather than overestimate.
449 * Overestimations make us ACKing less frequently than needed.
450 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
451 */
452
453 static inline void tcp_initialize_rcv_mss(struct sock *sk)
454 {
455 struct tcp_sock *tp = tcp_sk(sk);
456 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
457
458 hint = min(hint, tp->rcv_wnd/2);
459 hint = min(hint, TCP_MIN_RCVMSS);
460 hint = max(hint, TCP_MIN_MSS);
461
462 inet_csk(sk)->icsk_ack.rcv_mss = hint;
463 }
464
465 static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
466 {
467 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
468 ntohl(TCP_FLAG_ACK) |
469 snd_wnd);
470 }
471
472 static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
473 {
474 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
475 }
476
477 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
478 {
479 if (skb_queue_empty(&tp->out_of_order_queue) &&
480 tp->rcv_wnd &&
481 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
482 !tp->urg_data)
483 tcp_fast_path_on(tp);
484 }
485
486 /* Compute the actual receive window we are currently advertising.
487 * Rcv_nxt can be after the window if our peer push more data
488 * than the offered window.
489 */
490 static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
491 {
492 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
493
494 if (win < 0)
495 win = 0;
496 return (u32) win;
497 }
498
499 /* Choose a new window, without checks for shrinking, and without
500 * scaling applied to the result. The caller does these things
501 * if necessary. This is a "raw" window selection.
502 */
503 extern u32 __tcp_select_window(struct sock *sk);
504
505 /* TCP timestamps are only 32-bits, this causes a slight
506 * complication on 64-bit systems since we store a snapshot
507 * of jiffies in the buffer control blocks below. We decided
508 * to use only the low 32-bits of jiffies and hide the ugly
509 * casts with the following macro.
510 */
511 #define tcp_time_stamp ((__u32)(jiffies))
512
513 /* This is what the send packet queuing engine uses to pass
514 * TCP per-packet control information to the transmission
515 * code. We also store the host-order sequence numbers in
516 * here too. This is 36 bytes on 32-bit architectures,
517 * 40 bytes on 64-bit machines, if this grows please adjust
518 * skbuff.h:skbuff->cb[xxx] size appropriately.
519 */
520 struct tcp_skb_cb {
521 union {
522 struct inet_skb_parm h4;
523 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
524 struct inet6_skb_parm h6;
525 #endif
526 } header; /* For incoming frames */
527 __u32 seq; /* Starting sequence number */
528 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
529 __u32 when; /* used to compute rtt's */
530 __u8 flags; /* TCP header flags. */
531
532 /* NOTE: These must match up to the flags byte in a
533 * real TCP header.
534 */
535 #define TCPCB_FLAG_FIN 0x01
536 #define TCPCB_FLAG_SYN 0x02
537 #define TCPCB_FLAG_RST 0x04
538 #define TCPCB_FLAG_PSH 0x08
539 #define TCPCB_FLAG_ACK 0x10
540 #define TCPCB_FLAG_URG 0x20
541 #define TCPCB_FLAG_ECE 0x40
542 #define TCPCB_FLAG_CWR 0x80
543
544 __u8 sacked; /* State flags for SACK/FACK. */
545 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
546 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
547 #define TCPCB_LOST 0x04 /* SKB is lost */
548 #define TCPCB_TAGBITS 0x07 /* All tag bits */
549
550 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
551 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
552
553 #define TCPCB_URG 0x20 /* Urgent pointer advanced here */
554
555 #define TCPCB_AT_TAIL (TCPCB_URG)
556
557 __u16 urg_ptr; /* Valid w/URG flags is set. */
558 __u32 ack_seq; /* Sequence number ACK'd */
559 };
560
561 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
562
563 #include <net/tcp_ecn.h>
564
565 /* Due to TSO, an SKB can be composed of multiple actual
566 * packets. To keep these tracked properly, we use this.
567 */
568 static inline int tcp_skb_pcount(const struct sk_buff *skb)
569 {
570 return skb_shinfo(skb)->tso_segs;
571 }
572
573 /* This is valid iff tcp_skb_pcount() > 1. */
574 static inline int tcp_skb_mss(const struct sk_buff *skb)
575 {
576 return skb_shinfo(skb)->tso_size;
577 }
578
579 static inline void tcp_dec_pcount_approx(__u32 *count,
580 const struct sk_buff *skb)
581 {
582 if (*count) {
583 *count -= tcp_skb_pcount(skb);
584 if ((int)*count < 0)
585 *count = 0;
586 }
587 }
588
589 static inline void tcp_packets_out_inc(struct sock *sk,
590 struct tcp_sock *tp,
591 const struct sk_buff *skb)
592 {
593 int orig = tp->packets_out;
594
595 tp->packets_out += tcp_skb_pcount(skb);
596 if (!orig)
597 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
598 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
599 }
600
601 static inline void tcp_packets_out_dec(struct tcp_sock *tp,
602 const struct sk_buff *skb)
603 {
604 tp->packets_out -= tcp_skb_pcount(skb);
605 }
606
607 /* Events passed to congestion control interface */
608 enum tcp_ca_event {
609 CA_EVENT_TX_START, /* first transmit when no packets in flight */
610 CA_EVENT_CWND_RESTART, /* congestion window restart */
611 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
612 CA_EVENT_FRTO, /* fast recovery timeout */
613 CA_EVENT_LOSS, /* loss timeout */
614 CA_EVENT_FAST_ACK, /* in sequence ack */
615 CA_EVENT_SLOW_ACK, /* other ack */
616 };
617
618 /*
619 * Interface for adding new TCP congestion control handlers
620 */
621 #define TCP_CA_NAME_MAX 16
622 struct tcp_congestion_ops {
623 struct list_head list;
624
625 /* initialize private data (optional) */
626 void (*init)(struct sock *sk);
627 /* cleanup private data (optional) */
628 void (*release)(struct sock *sk);
629
630 /* return slow start threshold (required) */
631 u32 (*ssthresh)(struct sock *sk);
632 /* lower bound for congestion window (optional) */
633 u32 (*min_cwnd)(struct sock *sk);
634 /* do new cwnd calculation (required) */
635 void (*cong_avoid)(struct sock *sk, u32 ack,
636 u32 rtt, u32 in_flight, int good_ack);
637 /* round trip time sample per acked packet (optional) */
638 void (*rtt_sample)(struct sock *sk, u32 usrtt);
639 /* call before changing ca_state (optional) */
640 void (*set_state)(struct sock *sk, u8 new_state);
641 /* call when cwnd event occurs (optional) */
642 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
643 /* new value of cwnd after loss (optional) */
644 u32 (*undo_cwnd)(struct sock *sk);
645 /* hook for packet ack accounting (optional) */
646 void (*pkts_acked)(struct sock *sk, u32 num_acked);
647 /* get info for inet_diag (optional) */
648 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
649
650 char name[TCP_CA_NAME_MAX];
651 struct module *owner;
652 };
653
654 extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
655 extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
656
657 extern void tcp_init_congestion_control(struct sock *sk);
658 extern void tcp_cleanup_congestion_control(struct sock *sk);
659 extern int tcp_set_default_congestion_control(const char *name);
660 extern void tcp_get_default_congestion_control(char *name);
661 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
662
663 extern struct tcp_congestion_ops tcp_init_congestion_ops;
664 extern u32 tcp_reno_ssthresh(struct sock *sk);
665 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
666 u32 rtt, u32 in_flight, int flag);
667 extern u32 tcp_reno_min_cwnd(struct sock *sk);
668 extern struct tcp_congestion_ops tcp_reno;
669
670 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
671 {
672 struct inet_connection_sock *icsk = inet_csk(sk);
673
674 if (icsk->icsk_ca_ops->set_state)
675 icsk->icsk_ca_ops->set_state(sk, ca_state);
676 icsk->icsk_ca_state = ca_state;
677 }
678
679 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
680 {
681 const struct inet_connection_sock *icsk = inet_csk(sk);
682
683 if (icsk->icsk_ca_ops->cwnd_event)
684 icsk->icsk_ca_ops->cwnd_event(sk, event);
685 }
686
687 /* This determines how many packets are "in the network" to the best
688 * of our knowledge. In many cases it is conservative, but where
689 * detailed information is available from the receiver (via SACK
690 * blocks etc.) we can make more aggressive calculations.
691 *
692 * Use this for decisions involving congestion control, use just
693 * tp->packets_out to determine if the send queue is empty or not.
694 *
695 * Read this equation as:
696 *
697 * "Packets sent once on transmission queue" MINUS
698 * "Packets left network, but not honestly ACKed yet" PLUS
699 * "Packets fast retransmitted"
700 */
701 static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
702 {
703 return (tp->packets_out - tp->left_out + tp->retrans_out);
704 }
705
706 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
707 * The exception is rate halving phase, when cwnd is decreasing towards
708 * ssthresh.
709 */
710 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
711 {
712 const struct tcp_sock *tp = tcp_sk(sk);
713 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
714 return tp->snd_ssthresh;
715 else
716 return max(tp->snd_ssthresh,
717 ((tp->snd_cwnd >> 1) +
718 (tp->snd_cwnd >> 2)));
719 }
720
721 /*
722 * Linear increase during slow start
723 */
724 static inline void tcp_slow_start(struct tcp_sock *tp)
725 {
726 if (sysctl_tcp_abc) {
727 /* RFC3465: Slow Start
728 * TCP sender SHOULD increase cwnd by the number of
729 * previously unacknowledged bytes ACKed by each incoming
730 * acknowledgment, provided the increase is not more than L
731 */
732 if (tp->bytes_acked < tp->mss_cache)
733 return;
734
735 /* We MAY increase by 2 if discovered delayed ack */
736 if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
737 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
738 tp->snd_cwnd++;
739 }
740 }
741 tp->bytes_acked = 0;
742
743 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
744 tp->snd_cwnd++;
745 }
746
747
748 static inline void tcp_sync_left_out(struct tcp_sock *tp)
749 {
750 if (tp->rx_opt.sack_ok &&
751 (tp->sacked_out >= tp->packets_out - tp->lost_out))
752 tp->sacked_out = tp->packets_out - tp->lost_out;
753 tp->left_out = tp->sacked_out + tp->lost_out;
754 }
755
756 /* Set slow start threshold and cwnd not falling to slow start */
757 static inline void __tcp_enter_cwr(struct sock *sk)
758 {
759 const struct inet_connection_sock *icsk = inet_csk(sk);
760 struct tcp_sock *tp = tcp_sk(sk);
761
762 tp->undo_marker = 0;
763 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
764 tp->snd_cwnd = min(tp->snd_cwnd,
765 tcp_packets_in_flight(tp) + 1U);
766 tp->snd_cwnd_cnt = 0;
767 tp->high_seq = tp->snd_nxt;
768 tp->snd_cwnd_stamp = tcp_time_stamp;
769 TCP_ECN_queue_cwr(tp);
770 }
771
772 static inline void tcp_enter_cwr(struct sock *sk)
773 {
774 struct tcp_sock *tp = tcp_sk(sk);
775
776 tp->prior_ssthresh = 0;
777 tp->bytes_acked = 0;
778 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
779 __tcp_enter_cwr(sk);
780 tcp_set_ca_state(sk, TCP_CA_CWR);
781 }
782 }
783
784 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
785
786 /* Slow start with delack produces 3 packets of burst, so that
787 * it is safe "de facto".
788 */
789 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
790 {
791 return 3;
792 }
793
794 /* RFC2861 Check whether we are limited by application or congestion window
795 * This is the inverse of cwnd check in tcp_tso_should_defer
796 */
797 static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
798 {
799 const struct tcp_sock *tp = tcp_sk(sk);
800 u32 left;
801
802 if (in_flight >= tp->snd_cwnd)
803 return 1;
804
805 if (!(sk->sk_route_caps & NETIF_F_TSO))
806 return 0;
807
808 left = tp->snd_cwnd - in_flight;
809 if (sysctl_tcp_tso_win_divisor)
810 return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
811 else
812 return left <= tcp_max_burst(tp);
813 }
814
815 static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
816 const struct sk_buff *skb)
817 {
818 if (skb->len < mss)
819 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
820 }
821
822 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
823 {
824 const struct inet_connection_sock *icsk = inet_csk(sk);
825 if (!tp->packets_out && !icsk->icsk_pending)
826 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
827 icsk->icsk_rto, TCP_RTO_MAX);
828 }
829
830 static __inline__ void tcp_push_pending_frames(struct sock *sk,
831 struct tcp_sock *tp)
832 {
833 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
834 }
835
836 static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
837 {
838 tp->snd_wl1 = seq;
839 }
840
841 static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
842 {
843 tp->snd_wl1 = seq;
844 }
845
846 /*
847 * Calculate(/check) TCP checksum
848 */
849 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
850 unsigned long saddr, unsigned long daddr,
851 unsigned long base)
852 {
853 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
854 }
855
856 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
857 {
858 return __skb_checksum_complete(skb);
859 }
860
861 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
862 {
863 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
864 __tcp_checksum_complete(skb);
865 }
866
867 /* Prequeue for VJ style copy to user, combined with checksumming. */
868
869 static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
870 {
871 tp->ucopy.task = NULL;
872 tp->ucopy.len = 0;
873 tp->ucopy.memory = 0;
874 skb_queue_head_init(&tp->ucopy.prequeue);
875 }
876
877 /* Packet is added to VJ-style prequeue for processing in process
878 * context, if a reader task is waiting. Apparently, this exciting
879 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
880 * failed somewhere. Latency? Burstiness? Well, at least now we will
881 * see, why it failed. 8)8) --ANK
882 *
883 * NOTE: is this not too big to inline?
884 */
885 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
886 {
887 struct tcp_sock *tp = tcp_sk(sk);
888
889 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
890 __skb_queue_tail(&tp->ucopy.prequeue, skb);
891 tp->ucopy.memory += skb->truesize;
892 if (tp->ucopy.memory > sk->sk_rcvbuf) {
893 struct sk_buff *skb1;
894
895 BUG_ON(sock_owned_by_user(sk));
896
897 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
898 sk->sk_backlog_rcv(sk, skb1);
899 NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
900 }
901
902 tp->ucopy.memory = 0;
903 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
904 wake_up_interruptible(sk->sk_sleep);
905 if (!inet_csk_ack_scheduled(sk))
906 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
907 (3 * TCP_RTO_MIN) / 4,
908 TCP_RTO_MAX);
909 }
910 return 1;
911 }
912 return 0;
913 }
914
915
916 #undef STATE_TRACE
917
918 #ifdef STATE_TRACE
919 static const char *statename[]={
920 "Unused","Established","Syn Sent","Syn Recv",
921 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
922 "Close Wait","Last ACK","Listen","Closing"
923 };
924 #endif
925
926 static __inline__ void tcp_set_state(struct sock *sk, int state)
927 {
928 int oldstate = sk->sk_state;
929
930 switch (state) {
931 case TCP_ESTABLISHED:
932 if (oldstate != TCP_ESTABLISHED)
933 TCP_INC_STATS(TCP_MIB_CURRESTAB);
934 break;
935
936 case TCP_CLOSE:
937 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
938 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
939
940 sk->sk_prot->unhash(sk);
941 if (inet_csk(sk)->icsk_bind_hash &&
942 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
943 inet_put_port(&tcp_hashinfo, sk);
944 /* fall through */
945 default:
946 if (oldstate==TCP_ESTABLISHED)
947 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
948 }
949
950 /* Change state AFTER socket is unhashed to avoid closed
951 * socket sitting in hash tables.
952 */
953 sk->sk_state = state;
954
955 #ifdef STATE_TRACE
956 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
957 #endif
958 }
959
960 static __inline__ void tcp_done(struct sock *sk)
961 {
962 tcp_set_state(sk, TCP_CLOSE);
963 tcp_clear_xmit_timers(sk);
964
965 sk->sk_shutdown = SHUTDOWN_MASK;
966
967 if (!sock_flag(sk, SOCK_DEAD))
968 sk->sk_state_change(sk);
969 else
970 inet_csk_destroy_sock(sk);
971 }
972
973 static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
974 {
975 rx_opt->dsack = 0;
976 rx_opt->eff_sacks = 0;
977 rx_opt->num_sacks = 0;
978 }
979
980 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
981 {
982 if (tp->rx_opt.tstamp_ok) {
983 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
984 (TCPOPT_NOP << 16) |
985 (TCPOPT_TIMESTAMP << 8) |
986 TCPOLEN_TIMESTAMP);
987 *ptr++ = htonl(tstamp);
988 *ptr++ = htonl(tp->rx_opt.ts_recent);
989 }
990 if (tp->rx_opt.eff_sacks) {
991 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
992 int this_sack;
993
994 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
995 (TCPOPT_NOP << 16) |
996 (TCPOPT_SACK << 8) |
997 (TCPOLEN_SACK_BASE +
998 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
999 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
1000 *ptr++ = htonl(sp[this_sack].start_seq);
1001 *ptr++ = htonl(sp[this_sack].end_seq);
1002 }
1003 if (tp->rx_opt.dsack) {
1004 tp->rx_opt.dsack = 0;
1005 tp->rx_opt.eff_sacks--;
1006 }
1007 }
1008 }
1009
1010 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1011 * If this is every changed make sure to change the definition of
1012 * MAX_SYN_SIZE to match the new maximum number of options that you
1013 * can generate.
1014 */
1015 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1016 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1017 {
1018 /* We always get an MSS option.
1019 * The option bytes which will be seen in normal data
1020 * packets should timestamps be used, must be in the MSS
1021 * advertised. But we subtract them from tp->mss_cache so
1022 * that calculations in tcp_sendmsg are simpler etc.
1023 * So account for this fact here if necessary. If we
1024 * don't do this correctly, as a receiver we won't
1025 * recognize data packets as being full sized when we
1026 * should, and thus we won't abide by the delayed ACK
1027 * rules correctly.
1028 * SACKs don't matter, we never delay an ACK when we
1029 * have any of those going out.
1030 */
1031 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1032 if (ts) {
1033 if(sack)
1034 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1035 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1036 else
1037 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1038 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1039 *ptr++ = htonl(tstamp); /* TSVAL */
1040 *ptr++ = htonl(ts_recent); /* TSECR */
1041 } else if(sack)
1042 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1043 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1044 if (offer_wscale)
1045 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1046 }
1047
1048 /* Determine a window scaling and initial window to offer. */
1049 extern void tcp_select_initial_window(int __space, __u32 mss,
1050 __u32 *rcv_wnd, __u32 *window_clamp,
1051 int wscale_ok, __u8 *rcv_wscale);
1052
1053 static inline int tcp_win_from_space(int space)
1054 {
1055 return sysctl_tcp_adv_win_scale<=0 ?
1056 (space>>(-sysctl_tcp_adv_win_scale)) :
1057 space - (space>>sysctl_tcp_adv_win_scale);
1058 }
1059
1060 /* Note: caller must be prepared to deal with negative returns */
1061 static inline int tcp_space(const struct sock *sk)
1062 {
1063 return tcp_win_from_space(sk->sk_rcvbuf -
1064 atomic_read(&sk->sk_rmem_alloc));
1065 }
1066
1067 static inline int tcp_full_space(const struct sock *sk)
1068 {
1069 return tcp_win_from_space(sk->sk_rcvbuf);
1070 }
1071
1072 static __inline__ void tcp_openreq_init(struct request_sock *req,
1073 struct tcp_options_received *rx_opt,
1074 struct sk_buff *skb)
1075 {
1076 struct inet_request_sock *ireq = inet_rsk(req);
1077
1078 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1079 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1080 req->mss = rx_opt->mss_clamp;
1081 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1082 ireq->tstamp_ok = rx_opt->tstamp_ok;
1083 ireq->sack_ok = rx_opt->sack_ok;
1084 ireq->snd_wscale = rx_opt->snd_wscale;
1085 ireq->wscale_ok = rx_opt->wscale_ok;
1086 ireq->acked = 0;
1087 ireq->ecn_ok = 0;
1088 ireq->rmt_port = skb->h.th->source;
1089 }
1090
1091 extern void tcp_enter_memory_pressure(void);
1092
1093 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1094 {
1095 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1096 }
1097
1098 static inline int keepalive_time_when(const struct tcp_sock *tp)
1099 {
1100 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1101 }
1102
1103 static inline int tcp_fin_time(const struct sock *sk)
1104 {
1105 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1106 const int rto = inet_csk(sk)->icsk_rto;
1107
1108 if (fin_timeout < (rto << 2) - (rto >> 1))
1109 fin_timeout = (rto << 2) - (rto >> 1);
1110
1111 return fin_timeout;
1112 }
1113
1114 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
1115 {
1116 if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
1117 return 0;
1118 if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
1119 return 0;
1120
1121 /* RST segments are not recommended to carry timestamp,
1122 and, if they do, it is recommended to ignore PAWS because
1123 "their cleanup function should take precedence over timestamps."
1124 Certainly, it is mistake. It is necessary to understand the reasons
1125 of this constraint to relax it: if peer reboots, clock may go
1126 out-of-sync and half-open connections will not be reset.
1127 Actually, the problem would be not existing if all
1128 the implementations followed draft about maintaining clock
1129 via reboots. Linux-2.2 DOES NOT!
1130
1131 However, we can relax time bounds for RST segments to MSL.
1132 */
1133 if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1134 return 0;
1135 return 1;
1136 }
1137
1138 #define TCP_CHECK_TIMER(sk) do { } while (0)
1139
1140 static inline int tcp_use_frto(const struct sock *sk)
1141 {
1142 const struct tcp_sock *tp = tcp_sk(sk);
1143
1144 /* F-RTO must be activated in sysctl and there must be some
1145 * unsent new data, and the advertised window should allow
1146 * sending it.
1147 */
1148 return (sysctl_tcp_frto && sk->sk_send_head &&
1149 !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
1150 tp->snd_una + tp->snd_wnd));
1151 }
1152
1153 static inline void tcp_mib_init(void)
1154 {
1155 /* See RFC 2012 */
1156 TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
1157 TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1158 TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1159 TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
1160 }
1161
1162 /*from STCP */
1163 static inline void clear_all_retrans_hints(struct tcp_sock *tp){
1164 tp->lost_skb_hint = NULL;
1165 tp->scoreboard_skb_hint = NULL;
1166 tp->retransmit_skb_hint = NULL;
1167 tp->forward_skb_hint = NULL;
1168 tp->fastpath_skb_hint = NULL;
1169 }
1170
1171 /* /proc */
1172 enum tcp_seq_states {
1173 TCP_SEQ_STATE_LISTENING,
1174 TCP_SEQ_STATE_OPENREQ,
1175 TCP_SEQ_STATE_ESTABLISHED,
1176 TCP_SEQ_STATE_TIME_WAIT,
1177 };
1178
1179 struct tcp_seq_afinfo {
1180 struct module *owner;
1181 char *name;
1182 sa_family_t family;
1183 int (*seq_show) (struct seq_file *m, void *v);
1184 struct file_operations *seq_fops;
1185 };
1186
1187 struct tcp_iter_state {
1188 sa_family_t family;
1189 enum tcp_seq_states state;
1190 struct sock *syn_wait_sk;
1191 int bucket, sbucket, num, uid;
1192 struct seq_operations seq_ops;
1193 };
1194
1195 extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1196 extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1197
1198 extern struct request_sock_ops tcp_request_sock_ops;
1199
1200 extern int tcp_v4_destroy_sock(struct sock *sk);
1201
1202 #ifdef CONFIG_PROC_FS
1203 extern int tcp4_proc_init(void);
1204 extern void tcp4_proc_exit(void);
1205 #endif
1206
1207 extern void tcp_v4_init(struct net_proto_family *ops);
1208 extern void tcp_init(void);
1209
1210 #endif /* _TCP_H */
This page took 0.052458 seconds and 4 git commands to generate.