tcp: md5: rcu conversion
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
5a0e3ad6 63#include <linux/slab.h>
1da177e4 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4 66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4 73#include <net/xfrm.h>
1a2449a8 74#include <net/netdma.h>
6e5714ea 75#include <net/secure_seq.h>
d1a4c0b3 76#include <net/tcp_memcontrol.h>
1da177e4
LT
77
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83
cfb6eeb4
YH
84#include <linux/crypto.h>
85#include <linux/scatterlist.h>
86
ab32ea5d
BH
87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 89EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 90
1da177e4 91
cfb6eeb4 92#ifdef CONFIG_TCP_MD5SIG
a915da9b 93static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
95#endif
96
5caea4ea 97struct inet_hashinfo tcp_hashinfo;
4bc2f18b 98EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 99
cf533ea5 100static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 101{
eddc9ec5
ACM
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
aa8223c7
ACM
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
1da177e4
LT
106}
107
6d6ee43e
ACM
108int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109{
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137}
6d6ee43e
ACM
138EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
1da177e4
LT
140/* This will initiate an outgoing connection. */
141int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142{
2d7192d6 143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 146 __be16 orig_sport, orig_dport;
bada8adc 147 __be32 daddr, nexthop;
da905bd1 148 struct flowi4 *fl4;
2d7192d6 149 struct rtable *rt;
1da177e4 150 int err;
f6d8bd05 151 struct ip_options_rcu *inet_opt;
1da177e4
LT
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
163 if (!daddr)
164 return -EINVAL;
f6d8bd05 165 nexthop = inet_opt->opt.faddr;
1da177e4
LT
166 }
167
dca8b089
DM
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
da905bd1
DM
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
174 orig_sport, orig_dport, sk, true);
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
7c73a6fa 178 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 179 return err;
584bdf8c 180 }
1da177e4
LT
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
f6d8bd05 187 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 188 daddr = fl4->daddr;
1da177e4 189
c720c7e8 190 if (!inet->inet_saddr)
da905bd1 191 inet->inet_saddr = fl4->saddr;
c720c7e8 192 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 193
c720c7e8 194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 tp->write_seq = 0;
199 }
200
295ff7ed 201 if (tcp_death_row.sysctl_tw_recycle &&
da905bd1 202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
ed2361e6 203 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
7174259e
ACM
204 /*
205 * VJ's idea. We save last timestamp seen from
206 * the destination in peer table, when entering state
207 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
208 * when trying new connection.
1da177e4 209 */
317fe0e6
ED
210 if (peer) {
211 inet_peer_refcheck(peer);
212 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
213 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
214 tp->rx_opt.ts_recent = peer->tcp_ts;
215 }
1da177e4
LT
216 }
217 }
218
c720c7e8
ED
219 inet->inet_dport = usin->sin_port;
220 inet->inet_daddr = daddr;
1da177e4 221
d83d8461 222 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
223 if (inet_opt)
224 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 225
bee7ca9e 226 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
227
228 /* Socket identity is still unknown (sport may be zero).
229 * However we set state to SYN-SENT and not releasing socket
230 * lock select source port, enter ourselves into the hash tables and
231 * complete initialization after this.
232 */
233 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 234 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
235 if (err)
236 goto failure;
237
da905bd1 238 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
239 inet->inet_sport, inet->inet_dport, sk);
240 if (IS_ERR(rt)) {
241 err = PTR_ERR(rt);
242 rt = NULL;
1da177e4 243 goto failure;
b23dd4fe 244 }
1da177e4 245 /* OK, now commit destination to socket. */
bcd76111 246 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 247 sk_setup_caps(sk, &rt->dst);
1da177e4
LT
248
249 if (!tp->write_seq)
c720c7e8
ED
250 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
251 inet->inet_daddr,
252 inet->inet_sport,
1da177e4
LT
253 usin->sin_port);
254
c720c7e8 255 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4
LT
256
257 err = tcp_connect(sk);
258 rt = NULL;
259 if (err)
260 goto failure;
261
262 return 0;
263
264failure:
7174259e
ACM
265 /*
266 * This unhashes the socket and releases the local port,
267 * if necessary.
268 */
1da177e4
LT
269 tcp_set_state(sk, TCP_CLOSE);
270 ip_rt_put(rt);
271 sk->sk_route_caps = 0;
c720c7e8 272 inet->inet_dport = 0;
1da177e4
LT
273 return err;
274}
4bc2f18b 275EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 276
1da177e4
LT
277/*
278 * This routine does path mtu discovery as defined in RFC1191.
279 */
b71d1d42 280static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
1da177e4
LT
281{
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
284
285 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
286 * send out by Linux are always <576bytes so they should go through
287 * unfragmented).
288 */
289 if (sk->sk_state == TCP_LISTEN)
290 return;
291
292 /* We don't check in the destentry if pmtu discovery is forbidden
293 * on this route. We just assume that no packet_to_big packets
294 * are send back when pmtu discovery is not active.
e905a9ed 295 * There is a small race when the user changes this flag in the
1da177e4
LT
296 * route, but I think that's acceptable.
297 */
298 if ((dst = __sk_dst_check(sk, 0)) == NULL)
299 return;
300
301 dst->ops->update_pmtu(dst, mtu);
302
303 /* Something is about to be wrong... Remember soft error
304 * for the case, if this connection will not able to recover.
305 */
306 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
307 sk->sk_err_soft = EMSGSIZE;
308
309 mtu = dst_mtu(dst);
310
311 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 312 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
313 tcp_sync_mss(sk, mtu);
314
315 /* Resend the TCP packet because it's
316 * clear that the old packet has been
317 * dropped. This is the new "fast" path mtu
318 * discovery.
319 */
320 tcp_simple_retransmit(sk);
321 } /* else let the usual retransmit timer handle it */
322}
323
324/*
325 * This routine is called by the ICMP module when it gets some
326 * sort of error condition. If err < 0 then the socket should
327 * be closed and the error returned to the user. If err > 0
328 * it's just the icmp type << 8 | icmp code. After adjustment
329 * header points to the first 8 bytes of the tcp header. We need
330 * to find the appropriate port.
331 *
332 * The locking strategy used here is very "optimistic". When
333 * someone else accesses the socket the ICMP is just dropped
334 * and for some paths there is no check at all.
335 * A more general error queue to queue errors for later handling
336 * is probably better.
337 *
338 */
339
4d1a2d9e 340void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 341{
b71d1d42 342 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 343 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 344 struct inet_connection_sock *icsk;
1da177e4
LT
345 struct tcp_sock *tp;
346 struct inet_sock *inet;
4d1a2d9e
DL
347 const int type = icmp_hdr(icmp_skb)->type;
348 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 349 struct sock *sk;
f1ecd5d9 350 struct sk_buff *skb;
1da177e4 351 __u32 seq;
f1ecd5d9 352 __u32 remaining;
1da177e4 353 int err;
4d1a2d9e 354 struct net *net = dev_net(icmp_skb->dev);
1da177e4 355
4d1a2d9e 356 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 357 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
358 return;
359 }
360
fd54d716 361 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 362 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 363 if (!sk) {
dcfc23ca 364 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
365 return;
366 }
367 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 368 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
369 return;
370 }
371
372 bh_lock_sock(sk);
373 /* If too many ICMPs get dropped on busy
374 * servers this needs to be solved differently.
375 */
376 if (sock_owned_by_user(sk))
de0744af 377 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
378
379 if (sk->sk_state == TCP_CLOSE)
380 goto out;
381
97e3ecd1 382 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
383 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
384 goto out;
385 }
386
f1ecd5d9 387 icsk = inet_csk(sk);
1da177e4
LT
388 tp = tcp_sk(sk);
389 seq = ntohl(th->seq);
390 if (sk->sk_state != TCP_LISTEN &&
391 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
393 goto out;
394 }
395
396 switch (type) {
397 case ICMP_SOURCE_QUENCH:
398 /* Just silently ignore these. */
399 goto out;
400 case ICMP_PARAMETERPROB:
401 err = EPROTO;
402 break;
403 case ICMP_DEST_UNREACH:
404 if (code > NR_ICMP_UNREACH)
405 goto out;
406
407 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
408 if (!sock_owned_by_user(sk))
409 do_pmtu_discovery(sk, iph, info);
410 goto out;
411 }
412
413 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
414 /* check if icmp_skb allows revert of backoff
415 * (see draft-zimmermann-tcp-lcd) */
416 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
417 break;
418 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
419 !icsk->icsk_backoff)
420 break;
421
8f49c270
DM
422 if (sock_owned_by_user(sk))
423 break;
424
f1ecd5d9 425 icsk->icsk_backoff--;
9ad7c049
JC
426 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
427 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
f1ecd5d9
DL
428 tcp_bound_rto(sk);
429
430 skb = tcp_write_queue_head(sk);
431 BUG_ON(!skb);
432
433 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
434 tcp_time_stamp - TCP_SKB_CB(skb)->when);
435
436 if (remaining) {
437 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
438 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
439 } else {
440 /* RTO revert clocked out retransmission.
441 * Will retransmit now */
442 tcp_retransmit_timer(sk);
443 }
444
1da177e4
LT
445 break;
446 case ICMP_TIME_EXCEEDED:
447 err = EHOSTUNREACH;
448 break;
449 default:
450 goto out;
451 }
452
453 switch (sk->sk_state) {
60236fdd 454 struct request_sock *req, **prev;
1da177e4
LT
455 case TCP_LISTEN:
456 if (sock_owned_by_user(sk))
457 goto out;
458
463c84b9
ACM
459 req = inet_csk_search_req(sk, &prev, th->dest,
460 iph->daddr, iph->saddr);
1da177e4
LT
461 if (!req)
462 goto out;
463
464 /* ICMPs are not backlogged, hence we cannot get
465 an established socket here.
466 */
547b792c 467 WARN_ON(req->sk);
1da177e4 468
2e6599cb 469 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 470 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
471 goto out;
472 }
473
474 /*
475 * Still in SYN_RECV, just remove it silently.
476 * There is no good way to pass the error to the newly
477 * created socket, and POSIX does not want network
478 * errors returned from accept().
479 */
463c84b9 480 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
481 goto out;
482
483 case TCP_SYN_SENT:
484 case TCP_SYN_RECV: /* Cannot happen.
485 It can f.e. if SYNs crossed.
486 */
487 if (!sock_owned_by_user(sk)) {
1da177e4
LT
488 sk->sk_err = err;
489
490 sk->sk_error_report(sk);
491
492 tcp_done(sk);
493 } else {
494 sk->sk_err_soft = err;
495 }
496 goto out;
497 }
498
499 /* If we've already connected we will keep trying
500 * until we time out, or the user gives up.
501 *
502 * rfc1122 4.2.3.9 allows to consider as hard errors
503 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
504 * but it is obsoleted by pmtu discovery).
505 *
506 * Note, that in modern internet, where routing is unreliable
507 * and in each dark corner broken firewalls sit, sending random
508 * errors ordered by their masters even this two messages finally lose
509 * their original sense (even Linux sends invalid PORT_UNREACHs)
510 *
511 * Now we are in compliance with RFCs.
512 * --ANK (980905)
513 */
514
515 inet = inet_sk(sk);
516 if (!sock_owned_by_user(sk) && inet->recverr) {
517 sk->sk_err = err;
518 sk->sk_error_report(sk);
519 } else { /* Only an error on timeout */
520 sk->sk_err_soft = err;
521 }
522
523out:
524 bh_unlock_sock(sk);
525 sock_put(sk);
526}
527
419f9f89
HX
528static void __tcp_v4_send_check(struct sk_buff *skb,
529 __be32 saddr, __be32 daddr)
1da177e4 530{
aa8223c7 531 struct tcphdr *th = tcp_hdr(skb);
1da177e4 532
84fa7933 533 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 534 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 535 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 536 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 537 } else {
419f9f89 538 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 539 csum_partial(th,
1da177e4
LT
540 th->doff << 2,
541 skb->csum));
542 }
543}
544
419f9f89 545/* This routine computes an IPv4 TCP checksum. */
bb296246 546void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 547{
cf533ea5 548 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
549
550 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
551}
4bc2f18b 552EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 553
a430a43d
HX
554int tcp_v4_gso_send_check(struct sk_buff *skb)
555{
eddc9ec5 556 const struct iphdr *iph;
a430a43d
HX
557 struct tcphdr *th;
558
559 if (!pskb_may_pull(skb, sizeof(*th)))
560 return -EINVAL;
561
eddc9ec5 562 iph = ip_hdr(skb);
aa8223c7 563 th = tcp_hdr(skb);
a430a43d
HX
564
565 th->check = 0;
84fa7933 566 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 567 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
568 return 0;
569}
570
1da177e4
LT
571/*
572 * This routine will send an RST to the other tcp.
573 *
574 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
575 * for reset.
576 * Answer: if a packet caused RST, it is not for a socket
577 * existing in our system, if it is matched to a socket,
578 * it is just duplicate segment or bug in other side's TCP.
579 * So that we build reply only basing on parameters
580 * arrived with segment.
581 * Exception: precedence violation. We do not implement it in any case.
582 */
583
cfb6eeb4 584static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 585{
cf533ea5 586 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
587 struct {
588 struct tcphdr th;
589#ifdef CONFIG_TCP_MD5SIG
714e85be 590 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
591#endif
592 } rep;
1da177e4 593 struct ip_reply_arg arg;
cfb6eeb4
YH
594#ifdef CONFIG_TCP_MD5SIG
595 struct tcp_md5sig_key *key;
596#endif
a86b1e30 597 struct net *net;
1da177e4
LT
598
599 /* Never send a reset in response to a reset. */
600 if (th->rst)
601 return;
602
511c3f92 603 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
604 return;
605
606 /* Swap the send and the receive. */
cfb6eeb4
YH
607 memset(&rep, 0, sizeof(rep));
608 rep.th.dest = th->source;
609 rep.th.source = th->dest;
610 rep.th.doff = sizeof(struct tcphdr) / 4;
611 rep.th.rst = 1;
1da177e4
LT
612
613 if (th->ack) {
cfb6eeb4 614 rep.th.seq = th->ack_seq;
1da177e4 615 } else {
cfb6eeb4
YH
616 rep.th.ack = 1;
617 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
618 skb->len - (th->doff << 2));
1da177e4
LT
619 }
620
7174259e 621 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
622 arg.iov[0].iov_base = (unsigned char *)&rep;
623 arg.iov[0].iov_len = sizeof(rep.th);
624
625#ifdef CONFIG_TCP_MD5SIG
a915da9b
ED
626 key = sk ? tcp_md5_do_lookup(sk,
627 (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
628 AF_INET) : NULL;
cfb6eeb4
YH
629 if (key) {
630 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
631 (TCPOPT_NOP << 16) |
632 (TCPOPT_MD5SIG << 8) |
633 TCPOLEN_MD5SIG);
634 /* Update length and the length the header thinks exists */
635 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
636 rep.th.doff = arg.iov[0].iov_len / 4;
637
49a72dfb 638 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
639 key, ip_hdr(skb)->saddr,
640 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
641 }
642#endif
eddc9ec5
ACM
643 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
644 ip_hdr(skb)->saddr, /* XXX */
52cd5750 645 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 646 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 647 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 648
adf30907 649 net = dev_net(skb_dst(skb)->dev);
66b13d99 650 arg.tos = ip_hdr(skb)->tos;
0a5ebb80 651 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
7feb49c8 652 &arg, arg.iov[0].iov_len);
1da177e4 653
63231bdd
PE
654 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
655 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
656}
657
658/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
659 outside socket context is ugly, certainly. What can I do?
660 */
661
9501f972
YH
662static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
663 u32 win, u32 ts, int oif,
88ef4a5a 664 struct tcp_md5sig_key *key,
66b13d99 665 int reply_flags, u8 tos)
1da177e4 666{
cf533ea5 667 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
668 struct {
669 struct tcphdr th;
714e85be 670 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 671#ifdef CONFIG_TCP_MD5SIG
714e85be 672 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
673#endif
674 ];
1da177e4
LT
675 } rep;
676 struct ip_reply_arg arg;
adf30907 677 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
678
679 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 680 memset(&arg, 0, sizeof(arg));
1da177e4
LT
681
682 arg.iov[0].iov_base = (unsigned char *)&rep;
683 arg.iov[0].iov_len = sizeof(rep.th);
684 if (ts) {
cfb6eeb4
YH
685 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
686 (TCPOPT_TIMESTAMP << 8) |
687 TCPOLEN_TIMESTAMP);
688 rep.opt[1] = htonl(tcp_time_stamp);
689 rep.opt[2] = htonl(ts);
cb48cfe8 690 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
691 }
692
693 /* Swap the send and the receive. */
694 rep.th.dest = th->source;
695 rep.th.source = th->dest;
696 rep.th.doff = arg.iov[0].iov_len / 4;
697 rep.th.seq = htonl(seq);
698 rep.th.ack_seq = htonl(ack);
699 rep.th.ack = 1;
700 rep.th.window = htons(win);
701
cfb6eeb4 702#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
703 if (key) {
704 int offset = (ts) ? 3 : 0;
705
706 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
707 (TCPOPT_NOP << 16) |
708 (TCPOPT_MD5SIG << 8) |
709 TCPOLEN_MD5SIG);
710 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
711 rep.th.doff = arg.iov[0].iov_len/4;
712
49a72dfb 713 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
714 key, ip_hdr(skb)->saddr,
715 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
716 }
717#endif
88ef4a5a 718 arg.flags = reply_flags;
eddc9ec5
ACM
719 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
720 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
721 arg.iov[0].iov_len, IPPROTO_TCP, 0);
722 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
723 if (oif)
724 arg.bound_dev_if = oif;
66b13d99 725 arg.tos = tos;
0a5ebb80 726 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
7feb49c8 727 &arg, arg.iov[0].iov_len);
1da177e4 728
63231bdd 729 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
730}
731
732static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
733{
8feaf0c0 734 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 735 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 736
9501f972 737 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 738 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
739 tcptw->tw_ts_recent,
740 tw->tw_bound_dev_if,
88ef4a5a 741 tcp_twsk_md5_key(tcptw),
66b13d99
ED
742 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
743 tw->tw_tos
9501f972 744 );
1da177e4 745
8feaf0c0 746 inet_twsk_put(tw);
1da177e4
LT
747}
748
6edafaaf 749static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 750 struct request_sock *req)
1da177e4 751{
9501f972 752 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 753 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
754 req->ts_recent,
755 0,
a915da9b
ED
756 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
757 AF_INET),
66b13d99
ED
758 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
759 ip_hdr(skb)->tos);
1da177e4
LT
760}
761
1da177e4 762/*
9bf1d83e 763 * Send a SYN-ACK after having received a SYN.
60236fdd 764 * This still operates on a request_sock only, not on a big
1da177e4
LT
765 * socket.
766 */
72659ecc
OP
767static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
768 struct request_sock *req,
769 struct request_values *rvp)
1da177e4 770{
2e6599cb 771 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 772 struct flowi4 fl4;
1da177e4
LT
773 int err = -1;
774 struct sk_buff * skb;
775
776 /* First, grab a route. */
6bd023f3 777 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 778 return -1;
1da177e4 779
e6b4d113 780 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
781
782 if (skb) {
419f9f89 783 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 784
2e6599cb
ACM
785 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
786 ireq->rmt_addr,
787 ireq->opt);
b9df3cb8 788 err = net_xmit_eval(err);
1da177e4
LT
789 }
790
1da177e4
LT
791 dst_release(dst);
792 return err;
793}
794
72659ecc 795static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 796 struct request_values *rvp)
fd80eb94 797{
72659ecc
OP
798 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
799 return tcp_v4_send_synack(sk, NULL, req, rvp);
fd80eb94
DL
800}
801
1da177e4 802/*
60236fdd 803 * IPv4 request_sock destructor.
1da177e4 804 */
60236fdd 805static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 806{
a51482bd 807 kfree(inet_rsk(req)->opt);
1da177e4
LT
808}
809
946cedcc
ED
810/*
811 * Return 1 if a syncookie should be sent
812 */
813int tcp_syn_flood_action(struct sock *sk,
814 const struct sk_buff *skb,
815 const char *proto)
1da177e4 816{
946cedcc
ED
817 const char *msg = "Dropping request";
818 int want_cookie = 0;
819 struct listen_sock *lopt;
820
821
1da177e4 822
2a1d4bd4 823#ifdef CONFIG_SYN_COOKIES
946cedcc 824 if (sysctl_tcp_syncookies) {
2a1d4bd4 825 msg = "Sending cookies";
946cedcc
ED
826 want_cookie = 1;
827 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
828 } else
80e40daa 829#endif
946cedcc
ED
830 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
831
832 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
833 if (!lopt->synflood_warned) {
834 lopt->synflood_warned = 1;
835 pr_info("%s: Possible SYN flooding on port %d. %s. "
836 " Check SNMP counters.\n",
837 proto, ntohs(tcp_hdr(skb)->dest), msg);
838 }
839 return want_cookie;
2a1d4bd4 840}
946cedcc 841EXPORT_SYMBOL(tcp_syn_flood_action);
1da177e4
LT
842
843/*
60236fdd 844 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 845 */
f6d8bd05
ED
846static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
847 struct sk_buff *skb)
1da177e4 848{
f6d8bd05
ED
849 const struct ip_options *opt = &(IPCB(skb)->opt);
850 struct ip_options_rcu *dopt = NULL;
1da177e4
LT
851
852 if (opt && opt->optlen) {
f6d8bd05
ED
853 int opt_size = sizeof(*dopt) + opt->optlen;
854
1da177e4
LT
855 dopt = kmalloc(opt_size, GFP_ATOMIC);
856 if (dopt) {
f6d8bd05 857 if (ip_options_echo(&dopt->opt, skb)) {
1da177e4
LT
858 kfree(dopt);
859 dopt = NULL;
860 }
861 }
862 }
863 return dopt;
864}
865
cfb6eeb4
YH
866#ifdef CONFIG_TCP_MD5SIG
867/*
868 * RFC2385 MD5 checksumming requires a mapping of
869 * IP address->MD5 Key.
870 * We need to maintain these in the sk structure.
871 */
872
873/* Find the Key structure for an address. */
a915da9b
ED
874struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
875 const union tcp_md5_addr *addr,
876 int family)
cfb6eeb4
YH
877{
878 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
879 struct tcp_md5sig_key *key;
880 struct hlist_node *pos;
881 unsigned int size = sizeof(struct in_addr);
cfb6eeb4 882
a915da9b 883 if (!tp->md5sig_info)
cfb6eeb4 884 return NULL;
a915da9b
ED
885#if IS_ENABLED(CONFIG_IPV6)
886 if (family == AF_INET6)
887 size = sizeof(struct in6_addr);
888#endif
889 hlist_for_each_entry_rcu(key, pos, &tp->md5sig_info->head, node) {
890 if (key->family != family)
891 continue;
892 if (!memcmp(&key->addr, addr, size))
893 return key;
cfb6eeb4
YH
894 }
895 return NULL;
896}
a915da9b 897EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4
YH
898
899struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
900 struct sock *addr_sk)
901{
a915da9b
ED
902 union tcp_md5_addr *addr;
903
904 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
905 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 906}
cfb6eeb4
YH
907EXPORT_SYMBOL(tcp_v4_md5_lookup);
908
f5b99bcd
AB
909static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
910 struct request_sock *req)
cfb6eeb4 911{
a915da9b
ED
912 union tcp_md5_addr *addr;
913
914 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
915 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4
YH
916}
917
918/* This can be called on a newly created socket, from other files */
a915da9b
ED
919int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
920 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
921{
922 /* Add Key to the list */
b0a713e9 923 struct tcp_md5sig_key *key;
cfb6eeb4 924 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 925 struct tcp_md5sig_info *md5sig;
cfb6eeb4 926
a915da9b 927 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
cfb6eeb4
YH
928 if (key) {
929 /* Pre-existing entry - just update that one. */
a915da9b 930 memcpy(key->key, newkey, newkeylen);
b0a713e9 931 key->keylen = newkeylen;
a915da9b
ED
932 return 0;
933 }
260fcbeb 934
a915da9b
ED
935 md5sig = tp->md5sig_info;
936 if (!md5sig) {
937 md5sig = kmalloc(sizeof(*md5sig), gfp);
938 if (!md5sig)
cfb6eeb4 939 return -ENOMEM;
cfb6eeb4 940
a915da9b
ED
941 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
942 INIT_HLIST_HEAD(&md5sig->head);
943 tp->md5sig_info = md5sig;
944 }
cfb6eeb4 945
a915da9b
ED
946 key = kmalloc(sizeof(*key), gfp);
947 if (!key)
948 return -ENOMEM;
949 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
950 kfree(key);
951 return -ENOMEM;
cfb6eeb4 952 }
a915da9b
ED
953
954 memcpy(key->key, newkey, newkeylen);
955 key->keylen = newkeylen;
956 key->family = family;
957 memcpy(&key->addr, addr,
958 (family == AF_INET6) ? sizeof(struct in6_addr) :
959 sizeof(struct in_addr));
960 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
961 return 0;
962}
a915da9b 963EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 964
a915da9b 965int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4
YH
966{
967 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
968 struct tcp_md5sig_key *key;
969
970 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
971 if (!key)
972 return -ENOENT;
973 hlist_del_rcu(&key->node);
974 kfree_rcu(key, rcu);
975 if (hlist_empty(&tp->md5sig_info->head))
976 tcp_free_md5sig_pool();
977 return 0;
cfb6eeb4 978}
a915da9b 979EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 980
a915da9b 981void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
982{
983 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
984 struct tcp_md5sig_key *key;
985 struct hlist_node *pos, *n;
cfb6eeb4 986
a915da9b 987 if (!hlist_empty(&tp->md5sig_info->head))
cfb6eeb4 988 tcp_free_md5sig_pool();
a915da9b
ED
989 hlist_for_each_entry_safe(key, pos, n, &tp->md5sig_info->head, node) {
990 hlist_del_rcu(&key->node);
991 kfree_rcu(key, rcu);
cfb6eeb4
YH
992 }
993}
994
7174259e
ACM
995static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
996 int optlen)
cfb6eeb4
YH
997{
998 struct tcp_md5sig cmd;
999 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
1000
1001 if (optlen < sizeof(cmd))
1002 return -EINVAL;
1003
7174259e 1004 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1005 return -EFAULT;
1006
1007 if (sin->sin_family != AF_INET)
1008 return -EINVAL;
1009
1010 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1011 if (!tcp_sk(sk)->md5sig_info)
1012 return -ENOENT;
a915da9b
ED
1013 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1014 AF_INET);
cfb6eeb4
YH
1015 }
1016
1017 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1018 return -EINVAL;
1019
a915da9b
ED
1020 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1021 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1022 GFP_KERNEL);
cfb6eeb4
YH
1023}
1024
49a72dfb
AL
1025static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1026 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1027{
cfb6eeb4 1028 struct tcp4_pseudohdr *bp;
49a72dfb 1029 struct scatterlist sg;
cfb6eeb4
YH
1030
1031 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1032
1033 /*
49a72dfb 1034 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1035 * destination IP address, zero-padded protocol number, and
1036 * segment length)
1037 */
1038 bp->saddr = saddr;
1039 bp->daddr = daddr;
1040 bp->pad = 0;
076fb722 1041 bp->protocol = IPPROTO_TCP;
49a72dfb 1042 bp->len = cpu_to_be16(nbytes);
c7da57a1 1043
49a72dfb
AL
1044 sg_init_one(&sg, bp, sizeof(*bp));
1045 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1046}
1047
a915da9b 1048static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1049 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1050{
1051 struct tcp_md5sig_pool *hp;
1052 struct hash_desc *desc;
1053
1054 hp = tcp_get_md5sig_pool();
1055 if (!hp)
1056 goto clear_hash_noput;
1057 desc = &hp->md5_desc;
1058
1059 if (crypto_hash_init(desc))
1060 goto clear_hash;
1061 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1062 goto clear_hash;
1063 if (tcp_md5_hash_header(hp, th))
1064 goto clear_hash;
1065 if (tcp_md5_hash_key(hp, key))
1066 goto clear_hash;
1067 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1068 goto clear_hash;
1069
cfb6eeb4 1070 tcp_put_md5sig_pool();
cfb6eeb4 1071 return 0;
49a72dfb 1072
cfb6eeb4
YH
1073clear_hash:
1074 tcp_put_md5sig_pool();
1075clear_hash_noput:
1076 memset(md5_hash, 0, 16);
49a72dfb 1077 return 1;
cfb6eeb4
YH
1078}
1079
49a72dfb 1080int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1081 const struct sock *sk, const struct request_sock *req,
1082 const struct sk_buff *skb)
cfb6eeb4 1083{
49a72dfb
AL
1084 struct tcp_md5sig_pool *hp;
1085 struct hash_desc *desc;
318cf7aa 1086 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1087 __be32 saddr, daddr;
1088
1089 if (sk) {
c720c7e8
ED
1090 saddr = inet_sk(sk)->inet_saddr;
1091 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1092 } else if (req) {
1093 saddr = inet_rsk(req)->loc_addr;
1094 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1095 } else {
49a72dfb
AL
1096 const struct iphdr *iph = ip_hdr(skb);
1097 saddr = iph->saddr;
1098 daddr = iph->daddr;
cfb6eeb4 1099 }
49a72dfb
AL
1100
1101 hp = tcp_get_md5sig_pool();
1102 if (!hp)
1103 goto clear_hash_noput;
1104 desc = &hp->md5_desc;
1105
1106 if (crypto_hash_init(desc))
1107 goto clear_hash;
1108
1109 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1110 goto clear_hash;
1111 if (tcp_md5_hash_header(hp, th))
1112 goto clear_hash;
1113 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1114 goto clear_hash;
1115 if (tcp_md5_hash_key(hp, key))
1116 goto clear_hash;
1117 if (crypto_hash_final(desc, md5_hash))
1118 goto clear_hash;
1119
1120 tcp_put_md5sig_pool();
1121 return 0;
1122
1123clear_hash:
1124 tcp_put_md5sig_pool();
1125clear_hash_noput:
1126 memset(md5_hash, 0, 16);
1127 return 1;
cfb6eeb4 1128}
49a72dfb 1129EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1130
318cf7aa 1131static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4
YH
1132{
1133 /*
1134 * This gets called for each TCP segment that arrives
1135 * so we want to be efficient.
1136 * We have 3 drop cases:
1137 * o No MD5 hash and one expected.
1138 * o MD5 hash and we're not expecting one.
1139 * o MD5 hash and its wrong.
1140 */
cf533ea5 1141 const __u8 *hash_location = NULL;
cfb6eeb4 1142 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1143 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1144 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1145 int genhash;
cfb6eeb4
YH
1146 unsigned char newhash[16];
1147
a915da9b
ED
1148 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1149 AF_INET);
7d5d5525 1150 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1151
cfb6eeb4
YH
1152 /* We've parsed the options - do we have a hash? */
1153 if (!hash_expected && !hash_location)
1154 return 0;
1155
1156 if (hash_expected && !hash_location) {
785957d3 1157 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1158 return 1;
1159 }
1160
1161 if (!hash_expected && hash_location) {
785957d3 1162 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1163 return 1;
1164 }
1165
1166 /* Okay, so this is hash_expected and hash_location -
1167 * so we need to calculate the checksum.
1168 */
49a72dfb
AL
1169 genhash = tcp_v4_md5_hash_skb(newhash,
1170 hash_expected,
1171 NULL, NULL, skb);
cfb6eeb4
YH
1172
1173 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1174 if (net_ratelimit()) {
673d57e7
HH
1175 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1176 &iph->saddr, ntohs(th->source),
1177 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1178 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1179 }
1180 return 1;
1181 }
1182 return 0;
1183}
1184
1185#endif
1186
72a3effa 1187struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1188 .family = PF_INET,
2e6599cb 1189 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1190 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1191 .send_ack = tcp_v4_reqsk_send_ack,
1192 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1193 .send_reset = tcp_v4_send_reset,
72659ecc 1194 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1195};
1196
cfb6eeb4 1197#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1198static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1199 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1200 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1201};
b6332e6c 1202#endif
cfb6eeb4 1203
1da177e4
LT
1204int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1205{
4957faad 1206 struct tcp_extend_values tmp_ext;
1da177e4 1207 struct tcp_options_received tmp_opt;
cf533ea5 1208 const u8 *hash_location;
60236fdd 1209 struct request_sock *req;
e6b4d113 1210 struct inet_request_sock *ireq;
4957faad 1211 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1212 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1213 __be32 saddr = ip_hdr(skb)->saddr;
1214 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1215 __u32 isn = TCP_SKB_CB(skb)->when;
1da177e4 1216 int want_cookie = 0;
1da177e4
LT
1217
1218 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1219 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1220 goto drop;
1221
1222 /* TW buckets are converted to open requests without
1223 * limitations, they conserve resources and peer is
1224 * evidently real one.
1225 */
463c84b9 1226 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1227 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1228 if (!want_cookie)
1229 goto drop;
1da177e4
LT
1230 }
1231
1232 /* Accept backlog is full. If we have already queued enough
1233 * of warm entries in syn queue, drop request. It is better than
1234 * clogging syn queue with openreqs with exponentially increasing
1235 * timeout.
1236 */
463c84b9 1237 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1238 goto drop;
1239
ce4a7d0d 1240 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1241 if (!req)
1242 goto drop;
1243
cfb6eeb4
YH
1244#ifdef CONFIG_TCP_MD5SIG
1245 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1246#endif
1247
1da177e4 1248 tcp_clear_options(&tmp_opt);
bee7ca9e 1249 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1250 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1251 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1252
1253 if (tmp_opt.cookie_plus > 0 &&
1254 tmp_opt.saw_tstamp &&
1255 !tp->rx_opt.cookie_out_never &&
1256 (sysctl_tcp_cookie_size > 0 ||
1257 (tp->cookie_values != NULL &&
1258 tp->cookie_values->cookie_desired > 0))) {
1259 u8 *c;
1260 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1261 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1262
1263 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1264 goto drop_and_release;
1265
1266 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1267 *mess++ ^= (__force u32)daddr;
1268 *mess++ ^= (__force u32)saddr;
1da177e4 1269
4957faad
WAS
1270 /* plus variable length Initiator Cookie */
1271 c = (u8 *)mess;
1272 while (l-- > 0)
1273 *c++ ^= *hash_location++;
1274
4957faad 1275 want_cookie = 0; /* not our kind of cookie */
4957faad
WAS
1276 tmp_ext.cookie_out_never = 0; /* false */
1277 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1278 } else if (!tp->rx_opt.cookie_in_always) {
1279 /* redundant indications, but ensure initialization. */
1280 tmp_ext.cookie_out_never = 1; /* true */
1281 tmp_ext.cookie_plus = 0;
1282 } else {
1283 goto drop_and_release;
1284 }
1285 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1286
4dfc2817 1287 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1288 tcp_clear_options(&tmp_opt);
1da177e4 1289
1da177e4 1290 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1291 tcp_openreq_init(req, &tmp_opt, skb);
1292
bb5b7c11
DM
1293 ireq = inet_rsk(req);
1294 ireq->loc_addr = daddr;
1295 ireq->rmt_addr = saddr;
1296 ireq->no_srccheck = inet_sk(sk)->transparent;
1297 ireq->opt = tcp_v4_save_options(sk, skb);
1298
284904aa 1299 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1300 goto drop_and_free;
284904aa 1301
172d69e6 1302 if (!want_cookie || tmp_opt.tstamp_ok)
aa8223c7 1303 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1304
1305 if (want_cookie) {
1da177e4 1306 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1307 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1308 } else if (!isn) {
1309 struct inet_peer *peer = NULL;
6bd023f3 1310 struct flowi4 fl4;
1da177e4
LT
1311
1312 /* VJ's idea. We save last timestamp seen
1313 * from the destination in peer table, when entering
1314 * state TIME-WAIT, and check against it before
1315 * accepting new connection request.
1316 *
1317 * If "isn" is not zero, this request hit alive
1318 * timewait bucket, so that all the necessary checks
1319 * are made in the function processing timewait state.
1320 */
1321 if (tmp_opt.saw_tstamp &&
295ff7ed 1322 tcp_death_row.sysctl_tw_recycle &&
6bd023f3 1323 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
ed2361e6
DM
1324 fl4.daddr == saddr &&
1325 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
317fe0e6 1326 inet_peer_refcheck(peer);
2c1409a0 1327 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1da177e4
LT
1328 (s32)(peer->tcp_ts - req->ts_recent) >
1329 TCP_PAWS_WINDOW) {
de0744af 1330 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1331 goto drop_and_release;
1da177e4
LT
1332 }
1333 }
1334 /* Kill the following clause, if you dislike this way. */
1335 else if (!sysctl_tcp_syncookies &&
463c84b9 1336 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1337 (sysctl_max_syn_backlog >> 2)) &&
1338 (!peer || !peer->tcp_ts_stamp) &&
1339 (!dst || !dst_metric(dst, RTAX_RTT))) {
1340 /* Without syncookies last quarter of
1341 * backlog is filled with destinations,
1342 * proven to be alive.
1343 * It means that we continue to communicate
1344 * to destinations, already remembered
1345 * to the moment of synflood.
1346 */
673d57e7
HH
1347 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1348 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1349 goto drop_and_release;
1da177e4
LT
1350 }
1351
a94f723d 1352 isn = tcp_v4_init_sequence(skb);
1da177e4 1353 }
2e6599cb 1354 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1355 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1356
72659ecc
OP
1357 if (tcp_v4_send_synack(sk, dst, req,
1358 (struct request_values *)&tmp_ext) ||
4957faad 1359 want_cookie)
1da177e4
LT
1360 goto drop_and_free;
1361
7cd04fa7 1362 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1363 return 0;
1364
7cd04fa7
DL
1365drop_and_release:
1366 dst_release(dst);
1da177e4 1367drop_and_free:
60236fdd 1368 reqsk_free(req);
1da177e4 1369drop:
1da177e4
LT
1370 return 0;
1371}
4bc2f18b 1372EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1373
1374
1375/*
1376 * The three way handshake has completed - we got a valid synack -
1377 * now create the new socket.
1378 */
1379struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1380 struct request_sock *req,
1da177e4
LT
1381 struct dst_entry *dst)
1382{
2e6599cb 1383 struct inet_request_sock *ireq;
1da177e4
LT
1384 struct inet_sock *newinet;
1385 struct tcp_sock *newtp;
1386 struct sock *newsk;
cfb6eeb4
YH
1387#ifdef CONFIG_TCP_MD5SIG
1388 struct tcp_md5sig_key *key;
1389#endif
f6d8bd05 1390 struct ip_options_rcu *inet_opt;
1da177e4
LT
1391
1392 if (sk_acceptq_is_full(sk))
1393 goto exit_overflow;
1394
1da177e4
LT
1395 newsk = tcp_create_openreq_child(sk, req, skb);
1396 if (!newsk)
093d2823 1397 goto exit_nonewsk;
1da177e4 1398
bcd76111 1399 newsk->sk_gso_type = SKB_GSO_TCPV4;
1da177e4
LT
1400
1401 newtp = tcp_sk(newsk);
1402 newinet = inet_sk(newsk);
2e6599cb 1403 ireq = inet_rsk(req);
c720c7e8
ED
1404 newinet->inet_daddr = ireq->rmt_addr;
1405 newinet->inet_rcv_saddr = ireq->loc_addr;
1406 newinet->inet_saddr = ireq->loc_addr;
f6d8bd05
ED
1407 inet_opt = ireq->opt;
1408 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1409 ireq->opt = NULL;
463c84b9 1410 newinet->mc_index = inet_iif(skb);
eddc9ec5 1411 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1412 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1413 if (inet_opt)
1414 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1415 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1416
0e734419
DM
1417 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1418 goto put_and_exit;
1419
1420 sk_setup_caps(newsk, dst);
1421
5d424d5a 1422 tcp_mtup_init(newsk);
1da177e4 1423 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1424 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1425 if (tcp_sk(sk)->rx_opt.user_mss &&
1426 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1427 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1428
1da177e4 1429 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1430 if (tcp_rsk(req)->snt_synack)
1431 tcp_valid_rtt_meas(newsk,
1432 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1433 newtp->total_retrans = req->retrans;
1da177e4 1434
cfb6eeb4
YH
1435#ifdef CONFIG_TCP_MD5SIG
1436 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1437 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1438 AF_INET);
c720c7e8 1439 if (key != NULL) {
cfb6eeb4
YH
1440 /*
1441 * We're using one, so create a matching key
1442 * on the newsk structure. If we fail to get
1443 * memory, then we end up not copying the key
1444 * across. Shucks.
1445 */
a915da9b
ED
1446 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1447 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1448 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1449 }
1450#endif
1451
0e734419
DM
1452 if (__inet_inherit_port(sk, newsk) < 0)
1453 goto put_and_exit;
9327f705 1454 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1455
1456 return newsk;
1457
1458exit_overflow:
de0744af 1459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1460exit_nonewsk:
1461 dst_release(dst);
1da177e4 1462exit:
de0744af 1463 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1464 return NULL;
0e734419 1465put_and_exit:
709e8697 1466 tcp_clear_xmit_timers(newsk);
d8a6e65f 1467 tcp_cleanup_congestion_control(newsk);
918eb399 1468 bh_unlock_sock(newsk);
0e734419
DM
1469 sock_put(newsk);
1470 goto exit;
1da177e4 1471}
4bc2f18b 1472EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1473
1474static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1475{
aa8223c7 1476 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1477 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1478 struct sock *nsk;
60236fdd 1479 struct request_sock **prev;
1da177e4 1480 /* Find possible connection requests. */
463c84b9
ACM
1481 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1482 iph->saddr, iph->daddr);
1da177e4
LT
1483 if (req)
1484 return tcp_check_req(sk, skb, req, prev);
1485
3b1e0a65 1486 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1487 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1488
1489 if (nsk) {
1490 if (nsk->sk_state != TCP_TIME_WAIT) {
1491 bh_lock_sock(nsk);
1492 return nsk;
1493 }
9469c7b4 1494 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1495 return NULL;
1496 }
1497
1498#ifdef CONFIG_SYN_COOKIES
af9b4738 1499 if (!th->syn)
1da177e4
LT
1500 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1501#endif
1502 return sk;
1503}
1504
b51655b9 1505static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1506{
eddc9ec5
ACM
1507 const struct iphdr *iph = ip_hdr(skb);
1508
84fa7933 1509 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1510 if (!tcp_v4_check(skb->len, iph->saddr,
1511 iph->daddr, skb->csum)) {
fb286bb2 1512 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1513 return 0;
fb286bb2 1514 }
1da177e4 1515 }
fb286bb2 1516
eddc9ec5 1517 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1518 skb->len, IPPROTO_TCP, 0);
1519
1da177e4 1520 if (skb->len <= 76) {
fb286bb2 1521 return __skb_checksum_complete(skb);
1da177e4
LT
1522 }
1523 return 0;
1524}
1525
1526
1527/* The socket must have it's spinlock held when we get
1528 * here.
1529 *
1530 * We have a potential double-lock case here, so even when
1531 * doing backlog processing we use the BH locking scheme.
1532 * This is because we cannot sleep with the original spinlock
1533 * held.
1534 */
1535int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1536{
cfb6eeb4
YH
1537 struct sock *rsk;
1538#ifdef CONFIG_TCP_MD5SIG
1539 /*
1540 * We really want to reject the packet as early as possible
1541 * if:
1542 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1543 * o There is an MD5 option and we're not expecting one
1544 */
7174259e 1545 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1546 goto discard;
1547#endif
1548
1da177e4 1549 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
bdeab991 1550 sock_rps_save_rxhash(sk, skb);
aa8223c7 1551 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1552 rsk = sk;
1da177e4 1553 goto reset;
cfb6eeb4 1554 }
1da177e4
LT
1555 return 0;
1556 }
1557
ab6a5bb6 1558 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1559 goto csum_err;
1560
1561 if (sk->sk_state == TCP_LISTEN) {
1562 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1563 if (!nsk)
1564 goto discard;
1565
1566 if (nsk != sk) {
bdeab991 1567 sock_rps_save_rxhash(nsk, skb);
cfb6eeb4
YH
1568 if (tcp_child_process(sk, nsk, skb)) {
1569 rsk = nsk;
1da177e4 1570 goto reset;
cfb6eeb4 1571 }
1da177e4
LT
1572 return 0;
1573 }
ca55158c 1574 } else
bdeab991 1575 sock_rps_save_rxhash(sk, skb);
ca55158c 1576
aa8223c7 1577 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1578 rsk = sk;
1da177e4 1579 goto reset;
cfb6eeb4 1580 }
1da177e4
LT
1581 return 0;
1582
1583reset:
cfb6eeb4 1584 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1585discard:
1586 kfree_skb(skb);
1587 /* Be careful here. If this function gets more complicated and
1588 * gcc suffers from register pressure on the x86, sk (in %ebx)
1589 * might be destroyed here. This current version compiles correctly,
1590 * but you have been warned.
1591 */
1592 return 0;
1593
1594csum_err:
63231bdd 1595 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1596 goto discard;
1597}
4bc2f18b 1598EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
1599
1600/*
1601 * From tcp_input.c
1602 */
1603
1604int tcp_v4_rcv(struct sk_buff *skb)
1605{
eddc9ec5 1606 const struct iphdr *iph;
cf533ea5 1607 const struct tcphdr *th;
1da177e4
LT
1608 struct sock *sk;
1609 int ret;
a86b1e30 1610 struct net *net = dev_net(skb->dev);
1da177e4
LT
1611
1612 if (skb->pkt_type != PACKET_HOST)
1613 goto discard_it;
1614
1615 /* Count it even if it's bad */
63231bdd 1616 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1617
1618 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1619 goto discard_it;
1620
aa8223c7 1621 th = tcp_hdr(skb);
1da177e4
LT
1622
1623 if (th->doff < sizeof(struct tcphdr) / 4)
1624 goto bad_packet;
1625 if (!pskb_may_pull(skb, th->doff * 4))
1626 goto discard_it;
1627
1628 /* An explanation is required here, I think.
1629 * Packet length and doff are validated by header prediction,
caa20d9a 1630 * provided case of th->doff==0 is eliminated.
1da177e4 1631 * So, we defer the checks. */
60476372 1632 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1633 goto bad_packet;
1634
aa8223c7 1635 th = tcp_hdr(skb);
eddc9ec5 1636 iph = ip_hdr(skb);
1da177e4
LT
1637 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1638 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1639 skb->len - th->doff * 4);
1640 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1641 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1642 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1643 TCP_SKB_CB(skb)->sacked = 0;
1644
9a1f27c4 1645 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1646 if (!sk)
1647 goto no_tcp_socket;
1648
bb134d5d
ED
1649process:
1650 if (sk->sk_state == TCP_TIME_WAIT)
1651 goto do_time_wait;
1652
6cce09f8
ED
1653 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1654 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1655 goto discard_and_relse;
6cce09f8 1656 }
d218d111 1657
1da177e4
LT
1658 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1659 goto discard_and_relse;
b59c2701 1660 nf_reset(skb);
1da177e4 1661
fda9ef5d 1662 if (sk_filter(sk, skb))
1da177e4
LT
1663 goto discard_and_relse;
1664
1665 skb->dev = NULL;
1666
c6366184 1667 bh_lock_sock_nested(sk);
1da177e4
LT
1668 ret = 0;
1669 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1670#ifdef CONFIG_NET_DMA
1671 struct tcp_sock *tp = tcp_sk(sk);
1672 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1673 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1674 if (tp->ucopy.dma_chan)
1da177e4 1675 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1676 else
1677#endif
1678 {
1679 if (!tcp_prequeue(sk, skb))
ae8d7f88 1680 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1681 }
6cce09f8 1682 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1683 bh_unlock_sock(sk);
6cce09f8 1684 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1685 goto discard_and_relse;
1686 }
1da177e4
LT
1687 bh_unlock_sock(sk);
1688
1689 sock_put(sk);
1690
1691 return ret;
1692
1693no_tcp_socket:
1694 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1695 goto discard_it;
1696
1697 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1698bad_packet:
63231bdd 1699 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1700 } else {
cfb6eeb4 1701 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1702 }
1703
1704discard_it:
1705 /* Discard frame. */
1706 kfree_skb(skb);
e905a9ed 1707 return 0;
1da177e4
LT
1708
1709discard_and_relse:
1710 sock_put(sk);
1711 goto discard_it;
1712
1713do_time_wait:
1714 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1715 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1716 goto discard_it;
1717 }
1718
1719 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1720 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1721 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1722 goto discard_it;
1723 }
9469c7b4 1724 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1725 case TCP_TW_SYN: {
c346dca1 1726 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1727 &tcp_hashinfo,
eddc9ec5 1728 iph->daddr, th->dest,
463c84b9 1729 inet_iif(skb));
1da177e4 1730 if (sk2) {
9469c7b4
YH
1731 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1732 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1733 sk = sk2;
1734 goto process;
1735 }
1736 /* Fall through to ACK */
1737 }
1738 case TCP_TW_ACK:
1739 tcp_v4_timewait_ack(sk, skb);
1740 break;
1741 case TCP_TW_RST:
1742 goto no_tcp_socket;
1743 case TCP_TW_SUCCESS:;
1744 }
1745 goto discard_it;
1746}
1747
3f419d2d 1748struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1da177e4 1749{
3f419d2d 1750 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1da177e4 1751 struct inet_sock *inet = inet_sk(sk);
3f419d2d 1752 struct inet_peer *peer;
1da177e4 1753
c5216cc7
DM
1754 if (!rt ||
1755 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
b534ecf1 1756 peer = inet_getpeer_v4(inet->inet_daddr, 1);
3f419d2d 1757 *release_it = true;
1da177e4
LT
1758 } else {
1759 if (!rt->peer)
a48eff12 1760 rt_bind_peer(rt, inet->inet_daddr, 1);
1da177e4 1761 peer = rt->peer;
3f419d2d 1762 *release_it = false;
1da177e4
LT
1763 }
1764
3f419d2d 1765 return peer;
1da177e4 1766}
3f419d2d 1767EXPORT_SYMBOL(tcp_v4_get_peer);
1da177e4 1768
ccb7c410 1769void *tcp_v4_tw_get_peer(struct sock *sk)
1da177e4 1770{
cf533ea5 1771 const struct inet_timewait_sock *tw = inet_twsk(sk);
1da177e4 1772
ccb7c410 1773 return inet_getpeer_v4(tw->tw_daddr, 1);
1da177e4 1774}
ccb7c410
DM
1775EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1776
1777static struct timewait_sock_ops tcp_timewait_sock_ops = {
1778 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1779 .twsk_unique = tcp_twsk_unique,
1780 .twsk_destructor= tcp_twsk_destructor,
1781 .twsk_getpeer = tcp_v4_tw_get_peer,
1782};
1da177e4 1783
3b401a81 1784const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1785 .queue_xmit = ip_queue_xmit,
1786 .send_check = tcp_v4_send_check,
1787 .rebuild_header = inet_sk_rebuild_header,
1788 .conn_request = tcp_v4_conn_request,
1789 .syn_recv_sock = tcp_v4_syn_recv_sock,
3f419d2d 1790 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1791 .net_header_len = sizeof(struct iphdr),
1792 .setsockopt = ip_setsockopt,
1793 .getsockopt = ip_getsockopt,
1794 .addr2sockaddr = inet_csk_addr2sockaddr,
1795 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1796 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1797#ifdef CONFIG_COMPAT
543d9cfe
ACM
1798 .compat_setsockopt = compat_ip_setsockopt,
1799 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1800#endif
1da177e4 1801};
4bc2f18b 1802EXPORT_SYMBOL(ipv4_specific);
1da177e4 1803
cfb6eeb4 1804#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1805static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1806 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1807 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1808 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1809};
b6332e6c 1810#endif
cfb6eeb4 1811
1da177e4
LT
1812/* NOTE: A lot of things set to zero explicitly by call to
1813 * sk_alloc() so need not be done here.
1814 */
1815static int tcp_v4_init_sock(struct sock *sk)
1816{
6687e988 1817 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1818 struct tcp_sock *tp = tcp_sk(sk);
1819
1820 skb_queue_head_init(&tp->out_of_order_queue);
1821 tcp_init_xmit_timers(sk);
1822 tcp_prequeue_init(tp);
1823
6687e988 1824 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1825 tp->mdev = TCP_TIMEOUT_INIT;
1826
1827 /* So many TCP implementations out there (incorrectly) count the
1828 * initial SYN frame in their delayed-ACK and congestion control
1829 * algorithms that we must have the following bandaid to talk
1830 * efficiently to them. -DaveM
1831 */
9ad7c049 1832 tp->snd_cwnd = TCP_INIT_CWND;
1da177e4
LT
1833
1834 /* See draft-stevens-tcpca-spec-01 for discussion of the
1835 * initialization of these values.
1836 */
0b6a05c1 1837 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1838 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1839 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1840
1841 tp->reordering = sysctl_tcp_reordering;
6687e988 1842 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1843
1844 sk->sk_state = TCP_CLOSE;
1845
1846 sk->sk_write_space = sk_stream_write_space;
1847 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1848
8292a17a 1849 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1850 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1851#ifdef CONFIG_TCP_MD5SIG
1852 tp->af_specific = &tcp_sock_ipv4_specific;
1853#endif
1da177e4 1854
435cf559
WAS
1855 /* TCP Cookie Transactions */
1856 if (sysctl_tcp_cookie_size > 0) {
1857 /* Default, cookies without s_data_payload. */
1858 tp->cookie_values =
1859 kzalloc(sizeof(*tp->cookie_values),
1860 sk->sk_allocation);
1861 if (tp->cookie_values != NULL)
1862 kref_init(&tp->cookie_values->kref);
1863 }
1864 /* Presumed zeroed, in order of appearance:
1865 * cookie_in_always, cookie_out_never,
1866 * s_data_constant, s_data_in, s_data_out
1867 */
1da177e4
LT
1868 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1869 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1870
eb4dea58 1871 local_bh_disable();
d1a4c0b3 1872 sock_update_memcg(sk);
180d8cd9 1873 sk_sockets_allocated_inc(sk);
eb4dea58 1874 local_bh_enable();
1da177e4
LT
1875
1876 return 0;
1877}
1878
7d06b2e0 1879void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1880{
1881 struct tcp_sock *tp = tcp_sk(sk);
1882
1883 tcp_clear_xmit_timers(sk);
1884
6687e988 1885 tcp_cleanup_congestion_control(sk);
317a76f9 1886
1da177e4 1887 /* Cleanup up the write buffer. */
fe067e8a 1888 tcp_write_queue_purge(sk);
1da177e4
LT
1889
1890 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1891 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1892
cfb6eeb4
YH
1893#ifdef CONFIG_TCP_MD5SIG
1894 /* Clean up the MD5 key list, if any */
1895 if (tp->md5sig_info) {
a915da9b 1896 tcp_clear_md5_list(sk);
cfb6eeb4
YH
1897 kfree(tp->md5sig_info);
1898 tp->md5sig_info = NULL;
1899 }
1900#endif
1901
1a2449a8
CL
1902#ifdef CONFIG_NET_DMA
1903 /* Cleans up our sk_async_wait_queue */
e905a9ed 1904 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1905#endif
1906
1da177e4
LT
1907 /* Clean prequeue, it must be empty really */
1908 __skb_queue_purge(&tp->ucopy.prequeue);
1909
1910 /* Clean up a referenced TCP bind bucket. */
463c84b9 1911 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1912 inet_put_port(sk);
1da177e4
LT
1913
1914 /*
1915 * If sendmsg cached page exists, toss it.
1916 */
1917 if (sk->sk_sndmsg_page) {
1918 __free_page(sk->sk_sndmsg_page);
1919 sk->sk_sndmsg_page = NULL;
1920 }
1921
435cf559
WAS
1922 /* TCP Cookie Transactions */
1923 if (tp->cookie_values != NULL) {
1924 kref_put(&tp->cookie_values->kref,
1925 tcp_cookie_values_release);
1926 tp->cookie_values = NULL;
1927 }
1928
180d8cd9 1929 sk_sockets_allocated_dec(sk);
d1a4c0b3 1930 sock_release_memcg(sk);
1da177e4 1931}
1da177e4
LT
1932EXPORT_SYMBOL(tcp_v4_destroy_sock);
1933
1934#ifdef CONFIG_PROC_FS
1935/* Proc filesystem TCP sock list dumping. */
1936
3ab5aee7 1937static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1938{
3ab5aee7 1939 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1940 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1941}
1942
8feaf0c0 1943static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1944{
3ab5aee7
ED
1945 return !is_a_nulls(tw->tw_node.next) ?
1946 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1947}
1948
a8b690f9
TH
1949/*
1950 * Get next listener socket follow cur. If cur is NULL, get first socket
1951 * starting from bucket given in st->bucket; when st->bucket is zero the
1952 * very first socket in the hash table is returned.
1953 */
1da177e4
LT
1954static void *listening_get_next(struct seq_file *seq, void *cur)
1955{
463c84b9 1956 struct inet_connection_sock *icsk;
c25eb3bf 1957 struct hlist_nulls_node *node;
1da177e4 1958 struct sock *sk = cur;
5caea4ea 1959 struct inet_listen_hashbucket *ilb;
5799de0b 1960 struct tcp_iter_state *st = seq->private;
a4146b1b 1961 struct net *net = seq_file_net(seq);
1da177e4
LT
1962
1963 if (!sk) {
a8b690f9 1964 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1965 spin_lock_bh(&ilb->lock);
c25eb3bf 1966 sk = sk_nulls_head(&ilb->head);
a8b690f9 1967 st->offset = 0;
1da177e4
LT
1968 goto get_sk;
1969 }
5caea4ea 1970 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1971 ++st->num;
a8b690f9 1972 ++st->offset;
1da177e4
LT
1973
1974 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1975 struct request_sock *req = cur;
1da177e4 1976
72a3effa 1977 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1978 req = req->dl_next;
1979 while (1) {
1980 while (req) {
bdccc4ca 1981 if (req->rsk_ops->family == st->family) {
1da177e4
LT
1982 cur = req;
1983 goto out;
1984 }
1985 req = req->dl_next;
1986 }
72a3effa 1987 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1988 break;
1989get_req:
463c84b9 1990 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 1991 }
1bde5ac4 1992 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 1993 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1994 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1995 } else {
e905a9ed 1996 icsk = inet_csk(sk);
463c84b9
ACM
1997 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1998 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1999 goto start_req;
463c84b9 2000 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 2001 sk = sk_nulls_next(sk);
1da177e4
LT
2002 }
2003get_sk:
c25eb3bf 2004 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
2005 if (!net_eq(sock_net(sk), net))
2006 continue;
2007 if (sk->sk_family == st->family) {
1da177e4
LT
2008 cur = sk;
2009 goto out;
2010 }
e905a9ed 2011 icsk = inet_csk(sk);
463c84b9
ACM
2012 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2013 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2014start_req:
2015 st->uid = sock_i_uid(sk);
2016 st->syn_wait_sk = sk;
2017 st->state = TCP_SEQ_STATE_OPENREQ;
2018 st->sbucket = 0;
2019 goto get_req;
2020 }
463c84b9 2021 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2022 }
5caea4ea 2023 spin_unlock_bh(&ilb->lock);
a8b690f9 2024 st->offset = 0;
0f7ff927 2025 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2026 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2027 spin_lock_bh(&ilb->lock);
c25eb3bf 2028 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2029 goto get_sk;
2030 }
2031 cur = NULL;
2032out:
2033 return cur;
2034}
2035
2036static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2037{
a8b690f9
TH
2038 struct tcp_iter_state *st = seq->private;
2039 void *rc;
2040
2041 st->bucket = 0;
2042 st->offset = 0;
2043 rc = listening_get_next(seq, NULL);
1da177e4
LT
2044
2045 while (rc && *pos) {
2046 rc = listening_get_next(seq, rc);
2047 --*pos;
2048 }
2049 return rc;
2050}
2051
6eac5604
AK
2052static inline int empty_bucket(struct tcp_iter_state *st)
2053{
3ab5aee7
ED
2054 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2055 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2056}
2057
a8b690f9
TH
2058/*
2059 * Get first established socket starting from bucket given in st->bucket.
2060 * If st->bucket is zero, the very first socket in the hash is returned.
2061 */
1da177e4
LT
2062static void *established_get_first(struct seq_file *seq)
2063{
5799de0b 2064 struct tcp_iter_state *st = seq->private;
a4146b1b 2065 struct net *net = seq_file_net(seq);
1da177e4
LT
2066 void *rc = NULL;
2067
a8b690f9
TH
2068 st->offset = 0;
2069 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2070 struct sock *sk;
3ab5aee7 2071 struct hlist_nulls_node *node;
8feaf0c0 2072 struct inet_timewait_sock *tw;
9db66bdc 2073 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2074
6eac5604
AK
2075 /* Lockless fast path for the common case of empty buckets */
2076 if (empty_bucket(st))
2077 continue;
2078
9db66bdc 2079 spin_lock_bh(lock);
3ab5aee7 2080 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2081 if (sk->sk_family != st->family ||
878628fb 2082 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2083 continue;
2084 }
2085 rc = sk;
2086 goto out;
2087 }
2088 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2089 inet_twsk_for_each(tw, node,
dbca9b27 2090 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2091 if (tw->tw_family != st->family ||
878628fb 2092 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2093 continue;
2094 }
2095 rc = tw;
2096 goto out;
2097 }
9db66bdc 2098 spin_unlock_bh(lock);
1da177e4
LT
2099 st->state = TCP_SEQ_STATE_ESTABLISHED;
2100 }
2101out:
2102 return rc;
2103}
2104
2105static void *established_get_next(struct seq_file *seq, void *cur)
2106{
2107 struct sock *sk = cur;
8feaf0c0 2108 struct inet_timewait_sock *tw;
3ab5aee7 2109 struct hlist_nulls_node *node;
5799de0b 2110 struct tcp_iter_state *st = seq->private;
a4146b1b 2111 struct net *net = seq_file_net(seq);
1da177e4
LT
2112
2113 ++st->num;
a8b690f9 2114 ++st->offset;
1da177e4
LT
2115
2116 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2117 tw = cur;
2118 tw = tw_next(tw);
2119get_tw:
878628fb 2120 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2121 tw = tw_next(tw);
2122 }
2123 if (tw) {
2124 cur = tw;
2125 goto out;
2126 }
9db66bdc 2127 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2128 st->state = TCP_SEQ_STATE_ESTABLISHED;
2129
6eac5604 2130 /* Look for next non empty bucket */
a8b690f9 2131 st->offset = 0;
f373b53b 2132 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2133 empty_bucket(st))
2134 ;
f373b53b 2135 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2136 return NULL;
2137
9db66bdc 2138 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2139 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2140 } else
3ab5aee7 2141 sk = sk_nulls_next(sk);
1da177e4 2142
3ab5aee7 2143 sk_nulls_for_each_from(sk, node) {
878628fb 2144 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2145 goto found;
2146 }
2147
2148 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2149 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2150 goto get_tw;
2151found:
2152 cur = sk;
2153out:
2154 return cur;
2155}
2156
2157static void *established_get_idx(struct seq_file *seq, loff_t pos)
2158{
a8b690f9
TH
2159 struct tcp_iter_state *st = seq->private;
2160 void *rc;
2161
2162 st->bucket = 0;
2163 rc = established_get_first(seq);
1da177e4
LT
2164
2165 while (rc && pos) {
2166 rc = established_get_next(seq, rc);
2167 --pos;
7174259e 2168 }
1da177e4
LT
2169 return rc;
2170}
2171
2172static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2173{
2174 void *rc;
5799de0b 2175 struct tcp_iter_state *st = seq->private;
1da177e4 2176
1da177e4
LT
2177 st->state = TCP_SEQ_STATE_LISTENING;
2178 rc = listening_get_idx(seq, &pos);
2179
2180 if (!rc) {
1da177e4
LT
2181 st->state = TCP_SEQ_STATE_ESTABLISHED;
2182 rc = established_get_idx(seq, pos);
2183 }
2184
2185 return rc;
2186}
2187
a8b690f9
TH
2188static void *tcp_seek_last_pos(struct seq_file *seq)
2189{
2190 struct tcp_iter_state *st = seq->private;
2191 int offset = st->offset;
2192 int orig_num = st->num;
2193 void *rc = NULL;
2194
2195 switch (st->state) {
2196 case TCP_SEQ_STATE_OPENREQ:
2197 case TCP_SEQ_STATE_LISTENING:
2198 if (st->bucket >= INET_LHTABLE_SIZE)
2199 break;
2200 st->state = TCP_SEQ_STATE_LISTENING;
2201 rc = listening_get_next(seq, NULL);
2202 while (offset-- && rc)
2203 rc = listening_get_next(seq, rc);
2204 if (rc)
2205 break;
2206 st->bucket = 0;
2207 /* Fallthrough */
2208 case TCP_SEQ_STATE_ESTABLISHED:
2209 case TCP_SEQ_STATE_TIME_WAIT:
2210 st->state = TCP_SEQ_STATE_ESTABLISHED;
2211 if (st->bucket > tcp_hashinfo.ehash_mask)
2212 break;
2213 rc = established_get_first(seq);
2214 while (offset-- && rc)
2215 rc = established_get_next(seq, rc);
2216 }
2217
2218 st->num = orig_num;
2219
2220 return rc;
2221}
2222
1da177e4
LT
2223static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2224{
5799de0b 2225 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2226 void *rc;
2227
2228 if (*pos && *pos == st->last_pos) {
2229 rc = tcp_seek_last_pos(seq);
2230 if (rc)
2231 goto out;
2232 }
2233
1da177e4
LT
2234 st->state = TCP_SEQ_STATE_LISTENING;
2235 st->num = 0;
a8b690f9
TH
2236 st->bucket = 0;
2237 st->offset = 0;
2238 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2239
2240out:
2241 st->last_pos = *pos;
2242 return rc;
1da177e4
LT
2243}
2244
2245static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2246{
a8b690f9 2247 struct tcp_iter_state *st = seq->private;
1da177e4 2248 void *rc = NULL;
1da177e4
LT
2249
2250 if (v == SEQ_START_TOKEN) {
2251 rc = tcp_get_idx(seq, 0);
2252 goto out;
2253 }
1da177e4
LT
2254
2255 switch (st->state) {
2256 case TCP_SEQ_STATE_OPENREQ:
2257 case TCP_SEQ_STATE_LISTENING:
2258 rc = listening_get_next(seq, v);
2259 if (!rc) {
1da177e4 2260 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2261 st->bucket = 0;
2262 st->offset = 0;
1da177e4
LT
2263 rc = established_get_first(seq);
2264 }
2265 break;
2266 case TCP_SEQ_STATE_ESTABLISHED:
2267 case TCP_SEQ_STATE_TIME_WAIT:
2268 rc = established_get_next(seq, v);
2269 break;
2270 }
2271out:
2272 ++*pos;
a8b690f9 2273 st->last_pos = *pos;
1da177e4
LT
2274 return rc;
2275}
2276
2277static void tcp_seq_stop(struct seq_file *seq, void *v)
2278{
5799de0b 2279 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2280
2281 switch (st->state) {
2282 case TCP_SEQ_STATE_OPENREQ:
2283 if (v) {
463c84b9
ACM
2284 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2285 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2286 }
2287 case TCP_SEQ_STATE_LISTENING:
2288 if (v != SEQ_START_TOKEN)
5caea4ea 2289 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2290 break;
2291 case TCP_SEQ_STATE_TIME_WAIT:
2292 case TCP_SEQ_STATE_ESTABLISHED:
2293 if (v)
9db66bdc 2294 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2295 break;
2296 }
2297}
2298
73cb88ec 2299int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4
LT
2300{
2301 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2302 struct tcp_iter_state *s;
52d6f3f1 2303 int err;
1da177e4 2304
52d6f3f1
DL
2305 err = seq_open_net(inode, file, &afinfo->seq_ops,
2306 sizeof(struct tcp_iter_state));
2307 if (err < 0)
2308 return err;
f40c8174 2309
52d6f3f1 2310 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2311 s->family = afinfo->family;
a8b690f9 2312 s->last_pos = 0;
f40c8174
DL
2313 return 0;
2314}
73cb88ec 2315EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2316
6f8b13bc 2317int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2318{
2319 int rc = 0;
2320 struct proc_dir_entry *p;
2321
9427c4b3
DL
2322 afinfo->seq_ops.start = tcp_seq_start;
2323 afinfo->seq_ops.next = tcp_seq_next;
2324 afinfo->seq_ops.stop = tcp_seq_stop;
2325
84841c3c 2326 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2327 afinfo->seq_fops, afinfo);
84841c3c 2328 if (!p)
1da177e4
LT
2329 rc = -ENOMEM;
2330 return rc;
2331}
4bc2f18b 2332EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2333
6f8b13bc 2334void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2335{
6f8b13bc 2336 proc_net_remove(net, afinfo->name);
1da177e4 2337}
4bc2f18b 2338EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2339
cf533ea5 2340static void get_openreq4(const struct sock *sk, const struct request_sock *req,
5e659e4c 2341 struct seq_file *f, int i, int uid, int *len)
1da177e4 2342{
2e6599cb 2343 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2344 int ttd = req->expires - jiffies;
2345
5e659e4c 2346 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2347 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
1da177e4 2348 i,
2e6599cb 2349 ireq->loc_addr,
c720c7e8 2350 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2351 ireq->rmt_addr,
2352 ntohs(ireq->rmt_port),
1da177e4
LT
2353 TCP_SYN_RECV,
2354 0, 0, /* could print option size, but that is af dependent. */
2355 1, /* timers active (only the expire timer) */
2356 jiffies_to_clock_t(ttd),
2357 req->retrans,
2358 uid,
2359 0, /* non standard timer */
2360 0, /* open_requests have no inode */
2361 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2362 req,
2363 len);
1da177e4
LT
2364}
2365
5e659e4c 2366static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2367{
2368 int timer_active;
2369 unsigned long timer_expires;
cf533ea5 2370 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2371 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2372 const struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2373 __be32 dest = inet->inet_daddr;
2374 __be32 src = inet->inet_rcv_saddr;
2375 __u16 destp = ntohs(inet->inet_dport);
2376 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2377 int rx_queue;
1da177e4 2378
463c84b9 2379 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2380 timer_active = 1;
463c84b9
ACM
2381 timer_expires = icsk->icsk_timeout;
2382 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2383 timer_active = 4;
463c84b9 2384 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2385 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2386 timer_active = 2;
cf4c6bf8 2387 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2388 } else {
2389 timer_active = 0;
2390 timer_expires = jiffies;
2391 }
2392
49d09007
ED
2393 if (sk->sk_state == TCP_LISTEN)
2394 rx_queue = sk->sk_ack_backlog;
2395 else
2396 /*
2397 * because we dont lock socket, we might find a transient negative value
2398 */
2399 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2400
5e659e4c 2401 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
71338aa7 2402 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
cf4c6bf8 2403 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2404 tp->write_seq - tp->snd_una,
49d09007 2405 rx_queue,
1da177e4
LT
2406 timer_active,
2407 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2408 icsk->icsk_retransmits,
cf4c6bf8 2409 sock_i_uid(sk),
6687e988 2410 icsk->icsk_probes_out,
cf4c6bf8
IJ
2411 sock_i_ino(sk),
2412 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2413 jiffies_to_clock_t(icsk->icsk_rto),
2414 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2415 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2416 tp->snd_cwnd,
0b6a05c1 2417 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2418 len);
1da177e4
LT
2419}
2420
cf533ea5 2421static void get_timewait4_sock(const struct inet_timewait_sock *tw,
5e659e4c 2422 struct seq_file *f, int i, int *len)
1da177e4 2423{
23f33c2d 2424 __be32 dest, src;
1da177e4
LT
2425 __u16 destp, srcp;
2426 int ttd = tw->tw_ttd - jiffies;
2427
2428 if (ttd < 0)
2429 ttd = 0;
2430
2431 dest = tw->tw_daddr;
2432 src = tw->tw_rcv_saddr;
2433 destp = ntohs(tw->tw_dport);
2434 srcp = ntohs(tw->tw_sport);
2435
5e659e4c 2436 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2437 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
1da177e4
LT
2438 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2439 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2440 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2441}
2442
2443#define TMPSZ 150
2444
2445static int tcp4_seq_show(struct seq_file *seq, void *v)
2446{
5799de0b 2447 struct tcp_iter_state *st;
5e659e4c 2448 int len;
1da177e4
LT
2449
2450 if (v == SEQ_START_TOKEN) {
2451 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2452 " sl local_address rem_address st tx_queue "
2453 "rx_queue tr tm->when retrnsmt uid timeout "
2454 "inode");
2455 goto out;
2456 }
2457 st = seq->private;
2458
2459 switch (st->state) {
2460 case TCP_SEQ_STATE_LISTENING:
2461 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2462 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2463 break;
2464 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2465 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2466 break;
2467 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2468 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2469 break;
2470 }
5e659e4c 2471 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2472out:
2473 return 0;
2474}
2475
73cb88ec
AV
2476static const struct file_operations tcp_afinfo_seq_fops = {
2477 .owner = THIS_MODULE,
2478 .open = tcp_seq_open,
2479 .read = seq_read,
2480 .llseek = seq_lseek,
2481 .release = seq_release_net
2482};
2483
1da177e4 2484static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2485 .name = "tcp",
2486 .family = AF_INET,
73cb88ec 2487 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2488 .seq_ops = {
2489 .show = tcp4_seq_show,
2490 },
1da177e4
LT
2491};
2492
2c8c1e72 2493static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2494{
2495 return tcp_proc_register(net, &tcp4_seq_afinfo);
2496}
2497
2c8c1e72 2498static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2499{
2500 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2501}
2502
2503static struct pernet_operations tcp4_net_ops = {
2504 .init = tcp4_proc_init_net,
2505 .exit = tcp4_proc_exit_net,
2506};
2507
1da177e4
LT
2508int __init tcp4_proc_init(void)
2509{
757764f6 2510 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2511}
2512
2513void tcp4_proc_exit(void)
2514{
757764f6 2515 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2516}
2517#endif /* CONFIG_PROC_FS */
2518
bf296b12
HX
2519struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2520{
b71d1d42 2521 const struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2522
2523 switch (skb->ip_summed) {
2524 case CHECKSUM_COMPLETE:
86911732 2525 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2526 skb->csum)) {
2527 skb->ip_summed = CHECKSUM_UNNECESSARY;
2528 break;
2529 }
2530
2531 /* fall through */
2532 case CHECKSUM_NONE:
2533 NAPI_GRO_CB(skb)->flush = 1;
2534 return NULL;
2535 }
2536
2537 return tcp_gro_receive(head, skb);
2538}
bf296b12
HX
2539
2540int tcp4_gro_complete(struct sk_buff *skb)
2541{
b71d1d42 2542 const struct iphdr *iph = ip_hdr(skb);
bf296b12
HX
2543 struct tcphdr *th = tcp_hdr(skb);
2544
2545 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2546 iph->saddr, iph->daddr, 0);
2547 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2548
2549 return tcp_gro_complete(skb);
2550}
bf296b12 2551
1da177e4
LT
2552struct proto tcp_prot = {
2553 .name = "TCP",
2554 .owner = THIS_MODULE,
2555 .close = tcp_close,
2556 .connect = tcp_v4_connect,
2557 .disconnect = tcp_disconnect,
463c84b9 2558 .accept = inet_csk_accept,
1da177e4
LT
2559 .ioctl = tcp_ioctl,
2560 .init = tcp_v4_init_sock,
2561 .destroy = tcp_v4_destroy_sock,
2562 .shutdown = tcp_shutdown,
2563 .setsockopt = tcp_setsockopt,
2564 .getsockopt = tcp_getsockopt,
1da177e4 2565 .recvmsg = tcp_recvmsg,
7ba42910
CG
2566 .sendmsg = tcp_sendmsg,
2567 .sendpage = tcp_sendpage,
1da177e4 2568 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2569 .hash = inet_hash,
2570 .unhash = inet_unhash,
2571 .get_port = inet_csk_get_port,
1da177e4
LT
2572 .enter_memory_pressure = tcp_enter_memory_pressure,
2573 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2574 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2575 .memory_allocated = &tcp_memory_allocated,
2576 .memory_pressure = &tcp_memory_pressure,
1da177e4
LT
2577 .sysctl_wmem = sysctl_tcp_wmem,
2578 .sysctl_rmem = sysctl_tcp_rmem,
2579 .max_header = MAX_TCP_HEADER,
2580 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2581 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2582 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2583 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2584 .h.hashinfo = &tcp_hashinfo,
7ba42910 2585 .no_autobind = true,
543d9cfe
ACM
2586#ifdef CONFIG_COMPAT
2587 .compat_setsockopt = compat_tcp_setsockopt,
2588 .compat_getsockopt = compat_tcp_getsockopt,
2589#endif
d1a4c0b3
GC
2590#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2591 .init_cgroup = tcp_init_cgroup,
2592 .destroy_cgroup = tcp_destroy_cgroup,
2593 .proto_cgroup = tcp_proto_cgroup,
2594#endif
1da177e4 2595};
4bc2f18b 2596EXPORT_SYMBOL(tcp_prot);
1da177e4 2597
046ee902
DL
2598static int __net_init tcp_sk_init(struct net *net)
2599{
2600 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2601 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2602}
2603
2604static void __net_exit tcp_sk_exit(struct net *net)
2605{
2606 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
b099ce26
EB
2607}
2608
2609static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2610{
2611 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2612}
2613
2614static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2615 .init = tcp_sk_init,
2616 .exit = tcp_sk_exit,
2617 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2618};
2619
9b0f976f 2620void __init tcp_v4_init(void)
1da177e4 2621{
5caea4ea 2622 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2623 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2624 panic("Failed to create the TCP control socket.\n");
1da177e4 2625}
This page took 1.228073 seconds and 5 git commands to generate.