tcp: do not lock listener to process SYN packets
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
125 (!twp || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
148 struct flowi4 *fl4;
149 struct rtable *rt;
150 int err;
151 struct ip_options_rcu *inet_opt;
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet_opt->opt.faddr;
166 }
167
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
174 orig_sport, orig_dport, sk);
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 return err;
180 }
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
187 if (!inet_opt || !inet_opt->opt.srr)
188 daddr = fl4->daddr;
189
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
200 }
201
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
208
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
222 if (err)
223 goto failure;
224
225 sk_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
232 goto failure;
233 }
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
237
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
242 usin->sin_port);
243
244 inet->inet_id = tp->write_seq ^ jiffies;
245
246 err = tcp_connect(sk);
247
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254 failure:
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
263 return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
271 */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
277
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
280 return;
281
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
284 */
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
287
288 mtu = dst_mtu(dst);
289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
294
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
299 */
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309 if (dst)
310 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
319
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
322 */
323 WARN_ON(req->sk);
324
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 reqsk_put(req);
328 } else {
329 /*
330 * Still in SYN_RECV, just remove it silently.
331 * There is no good way to pass the error to the newly
332 * created socket, and POSIX does not want network
333 * errors returned from accept().
334 */
335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337 }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
348 *
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
354 *
355 */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 struct inet_connection_sock *icsk;
362 struct tcp_sock *tp;
363 struct inet_sock *inet;
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
366 struct sock *sk;
367 struct sk_buff *skb;
368 struct request_sock *fastopen;
369 __u32 seq, snd_una;
370 __u32 remaining;
371 int err;
372 struct net *net = dev_net(icmp_skb->dev);
373
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
376 inet_iif(icmp_skb));
377 if (!sk) {
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 return;
380 }
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
383 return;
384 }
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
388
389 bh_lock_sock(sk);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
394 */
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 }
399 if (sk->sk_state == TCP_CLOSE)
400 goto out;
401
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 goto out;
405 }
406
407 icsk = inet_csk(sk);
408 tp = tcp_sk(sk);
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 if (sk->sk_state != TCP_LISTEN &&
413 !between(seq, snd_una, tp->snd_nxt)) {
414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 goto out;
416 }
417
418 switch (type) {
419 case ICMP_REDIRECT:
420 do_redirect(icmp_skb, sk);
421 goto out;
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
424 goto out;
425 case ICMP_PARAMETERPROB:
426 err = EPROTO;
427 break;
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
430 goto out;
431
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
436 */
437 if (sk->sk_state == TCP_LISTEN)
438 goto out;
439
440 tp->mtu_info = info;
441 if (!sock_owned_by_user(sk)) {
442 tcp_v4_mtu_reduced(sk);
443 } else {
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 sock_hold(sk);
446 }
447 goto out;
448 }
449
450 err = icmp_err_convert[code].errno;
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 break;
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
456 !icsk->icsk_backoff || fastopen)
457 break;
458
459 if (sock_owned_by_user(sk))
460 break;
461
462 icsk->icsk_backoff--;
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 TCP_TIMEOUT_INIT;
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467 skb = tcp_write_queue_head(sk);
468 BUG_ON(!skb);
469
470 remaining = icsk->icsk_rto -
471 min(icsk->icsk_rto,
472 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474 if (remaining) {
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
477 } else {
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
481 }
482
483 break;
484 case ICMP_TIME_EXCEEDED:
485 err = EHOSTUNREACH;
486 break;
487 default:
488 goto out;
489 }
490
491 switch (sk->sk_state) {
492 case TCP_SYN_SENT:
493 case TCP_SYN_RECV:
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
496 */
497 if (fastopen && !fastopen->sk)
498 break;
499
500 if (!sock_owned_by_user(sk)) {
501 sk->sk_err = err;
502
503 sk->sk_error_report(sk);
504
505 tcp_done(sk);
506 } else {
507 sk->sk_err_soft = err;
508 }
509 goto out;
510 }
511
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
514 *
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
518 *
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 *
524 * Now we are in compliance with RFCs.
525 * --ANK (980905)
526 */
527
528 inet = inet_sk(sk);
529 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_err = err;
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
534 }
535
536 out:
537 bh_unlock_sock(sk);
538 sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 struct tcphdr *th = tcp_hdr(skb);
544
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
549 } else {
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
551 csum_partial(th,
552 th->doff << 2,
553 skb->csum));
554 }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 const struct inet_sock *inet = inet_sk(sk);
561
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567 * This routine will send an RST to the other tcp.
568 *
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * for reset.
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
577 */
578
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581 const struct tcphdr *th = tcp_hdr(skb);
582 struct {
583 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 } rep;
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
593 int genhash;
594 struct sock *sk1 = NULL;
595 #endif
596 struct net *net;
597
598 /* Never send a reset in response to a reset. */
599 if (th->rst)
600 return;
601
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
604 */
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 return;
607
608 /* Swap the send and the receive. */
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
613 rep.th.rst = 1;
614
615 if (th->ack) {
616 rep.th.seq = th->ack_seq;
617 } else {
618 rep.th.ack = 1;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
621 }
622
623 memset(&arg, 0, sizeof(arg));
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
626
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
631 /*
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
637 */
638 sk1 = __inet_lookup_listener(net,
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
643 if (!sk1)
644 return;
645 rcu_read_lock();
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
648 if (!key)
649 goto release_sk1;
650
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 }
659
660 if (key) {
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_NOP << 16) |
663 (TCPOPT_MD5SIG << 8) |
664 TCPOLEN_MD5SIG);
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
668
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
672 }
673 #endif
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
682 */
683 if (sk)
684 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
691
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 if (sk1) {
698 rcu_read_unlock();
699 sock_put(sk1);
700 }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
706 */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 u32 win, u32 tsval, u32 tsecr, int oif,
710 struct tcp_md5sig_key *key,
711 int reply_flags, u8 tos)
712 {
713 const struct tcphdr *th = tcp_hdr(skb);
714 struct {
715 struct tcphdr th;
716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 ];
721 } rep;
722 struct ip_reply_arg arg;
723 struct net *net = dev_net(skb_dst(skb)->dev);
724
725 memset(&rep.th, 0, sizeof(struct tcphdr));
726 memset(&arg, 0, sizeof(arg));
727
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
730 if (tsecr) {
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
733 TCPOLEN_TIMESTAMP);
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 }
738
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
745 rep.th.ack = 1;
746 rep.th.window = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749 if (key) {
750 int offset = (tsecr) ? 3 : 0;
751
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 (TCPOPT_NOP << 16) |
754 (TCPOPT_MD5SIG << 8) |
755 TCPOLEN_MD5SIG);
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
758
759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
762 }
763 #endif
764 arg.flags = reply_flags;
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 if (oif)
770 arg.bound_dev_if = oif;
771 arg.tos = tos;
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
776
777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 struct inet_timewait_sock *tw = inet_twsk(sk);
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 tcp_time_stamp + tcptw->tw_ts_offset,
788 tcptw->tw_ts_recent,
789 tw->tw_bound_dev_if,
790 tcp_twsk_md5_key(tcptw),
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 tw->tw_tos
793 );
794
795 inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799 struct request_sock *req)
800 {
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 */
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807 tcp_time_stamp,
808 req->ts_recent,
809 0,
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 AF_INET),
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 ip_hdr(skb)->tos);
814 }
815
816 /*
817 * Send a SYN-ACK after having received a SYN.
818 * This still operates on a request_sock only, not on a big
819 * socket.
820 */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822 struct flowi *fl,
823 struct request_sock *req,
824 u16 queue_mapping,
825 struct tcp_fastopen_cookie *foc,
826 bool attach_req)
827 {
828 const struct inet_request_sock *ireq = inet_rsk(req);
829 struct flowi4 fl4;
830 int err = -1;
831 struct sk_buff *skb;
832
833 /* First, grab a route. */
834 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
835 return -1;
836
837 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
838
839 if (skb) {
840 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
841
842 skb_set_queue_mapping(skb, queue_mapping);
843 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
844 ireq->ir_rmt_addr,
845 ireq->opt);
846 err = net_xmit_eval(err);
847 }
848
849 return err;
850 }
851
852 /*
853 * IPv4 request_sock destructor.
854 */
855 static void tcp_v4_reqsk_destructor(struct request_sock *req)
856 {
857 kfree(inet_rsk(req)->opt);
858 }
859
860
861 #ifdef CONFIG_TCP_MD5SIG
862 /*
863 * RFC2385 MD5 checksumming requires a mapping of
864 * IP address->MD5 Key.
865 * We need to maintain these in the sk structure.
866 */
867
868 /* Find the Key structure for an address. */
869 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
870 const union tcp_md5_addr *addr,
871 int family)
872 {
873 const struct tcp_sock *tp = tcp_sk(sk);
874 struct tcp_md5sig_key *key;
875 unsigned int size = sizeof(struct in_addr);
876 const struct tcp_md5sig_info *md5sig;
877
878 /* caller either holds rcu_read_lock() or socket lock */
879 md5sig = rcu_dereference_check(tp->md5sig_info,
880 sock_owned_by_user(sk) ||
881 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
882 if (!md5sig)
883 return NULL;
884 #if IS_ENABLED(CONFIG_IPV6)
885 if (family == AF_INET6)
886 size = sizeof(struct in6_addr);
887 #endif
888 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
889 if (key->family != family)
890 continue;
891 if (!memcmp(&key->addr, addr, size))
892 return key;
893 }
894 return NULL;
895 }
896 EXPORT_SYMBOL(tcp_md5_do_lookup);
897
898 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
899 const struct sock *addr_sk)
900 {
901 const union tcp_md5_addr *addr;
902
903 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
904 return tcp_md5_do_lookup(sk, addr, AF_INET);
905 }
906 EXPORT_SYMBOL(tcp_v4_md5_lookup);
907
908 /* This can be called on a newly created socket, from other files */
909 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
910 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
911 {
912 /* Add Key to the list */
913 struct tcp_md5sig_key *key;
914 struct tcp_sock *tp = tcp_sk(sk);
915 struct tcp_md5sig_info *md5sig;
916
917 key = tcp_md5_do_lookup(sk, addr, family);
918 if (key) {
919 /* Pre-existing entry - just update that one. */
920 memcpy(key->key, newkey, newkeylen);
921 key->keylen = newkeylen;
922 return 0;
923 }
924
925 md5sig = rcu_dereference_protected(tp->md5sig_info,
926 sock_owned_by_user(sk));
927 if (!md5sig) {
928 md5sig = kmalloc(sizeof(*md5sig), gfp);
929 if (!md5sig)
930 return -ENOMEM;
931
932 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
933 INIT_HLIST_HEAD(&md5sig->head);
934 rcu_assign_pointer(tp->md5sig_info, md5sig);
935 }
936
937 key = sock_kmalloc(sk, sizeof(*key), gfp);
938 if (!key)
939 return -ENOMEM;
940 if (!tcp_alloc_md5sig_pool()) {
941 sock_kfree_s(sk, key, sizeof(*key));
942 return -ENOMEM;
943 }
944
945 memcpy(key->key, newkey, newkeylen);
946 key->keylen = newkeylen;
947 key->family = family;
948 memcpy(&key->addr, addr,
949 (family == AF_INET6) ? sizeof(struct in6_addr) :
950 sizeof(struct in_addr));
951 hlist_add_head_rcu(&key->node, &md5sig->head);
952 return 0;
953 }
954 EXPORT_SYMBOL(tcp_md5_do_add);
955
956 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
957 {
958 struct tcp_md5sig_key *key;
959
960 key = tcp_md5_do_lookup(sk, addr, family);
961 if (!key)
962 return -ENOENT;
963 hlist_del_rcu(&key->node);
964 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
965 kfree_rcu(key, rcu);
966 return 0;
967 }
968 EXPORT_SYMBOL(tcp_md5_do_del);
969
970 static void tcp_clear_md5_list(struct sock *sk)
971 {
972 struct tcp_sock *tp = tcp_sk(sk);
973 struct tcp_md5sig_key *key;
974 struct hlist_node *n;
975 struct tcp_md5sig_info *md5sig;
976
977 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
978
979 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
980 hlist_del_rcu(&key->node);
981 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
982 kfree_rcu(key, rcu);
983 }
984 }
985
986 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
987 int optlen)
988 {
989 struct tcp_md5sig cmd;
990 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
991
992 if (optlen < sizeof(cmd))
993 return -EINVAL;
994
995 if (copy_from_user(&cmd, optval, sizeof(cmd)))
996 return -EFAULT;
997
998 if (sin->sin_family != AF_INET)
999 return -EINVAL;
1000
1001 if (!cmd.tcpm_keylen)
1002 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1003 AF_INET);
1004
1005 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1006 return -EINVAL;
1007
1008 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1009 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1010 GFP_KERNEL);
1011 }
1012
1013 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1014 __be32 daddr, __be32 saddr, int nbytes)
1015 {
1016 struct tcp4_pseudohdr *bp;
1017 struct scatterlist sg;
1018
1019 bp = &hp->md5_blk.ip4;
1020
1021 /*
1022 * 1. the TCP pseudo-header (in the order: source IP address,
1023 * destination IP address, zero-padded protocol number, and
1024 * segment length)
1025 */
1026 bp->saddr = saddr;
1027 bp->daddr = daddr;
1028 bp->pad = 0;
1029 bp->protocol = IPPROTO_TCP;
1030 bp->len = cpu_to_be16(nbytes);
1031
1032 sg_init_one(&sg, bp, sizeof(*bp));
1033 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1034 }
1035
1036 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1037 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1038 {
1039 struct tcp_md5sig_pool *hp;
1040 struct hash_desc *desc;
1041
1042 hp = tcp_get_md5sig_pool();
1043 if (!hp)
1044 goto clear_hash_noput;
1045 desc = &hp->md5_desc;
1046
1047 if (crypto_hash_init(desc))
1048 goto clear_hash;
1049 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1050 goto clear_hash;
1051 if (tcp_md5_hash_header(hp, th))
1052 goto clear_hash;
1053 if (tcp_md5_hash_key(hp, key))
1054 goto clear_hash;
1055 if (crypto_hash_final(desc, md5_hash))
1056 goto clear_hash;
1057
1058 tcp_put_md5sig_pool();
1059 return 0;
1060
1061 clear_hash:
1062 tcp_put_md5sig_pool();
1063 clear_hash_noput:
1064 memset(md5_hash, 0, 16);
1065 return 1;
1066 }
1067
1068 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1069 const struct sock *sk,
1070 const struct sk_buff *skb)
1071 {
1072 struct tcp_md5sig_pool *hp;
1073 struct hash_desc *desc;
1074 const struct tcphdr *th = tcp_hdr(skb);
1075 __be32 saddr, daddr;
1076
1077 if (sk) { /* valid for establish/request sockets */
1078 saddr = sk->sk_rcv_saddr;
1079 daddr = sk->sk_daddr;
1080 } else {
1081 const struct iphdr *iph = ip_hdr(skb);
1082 saddr = iph->saddr;
1083 daddr = iph->daddr;
1084 }
1085
1086 hp = tcp_get_md5sig_pool();
1087 if (!hp)
1088 goto clear_hash_noput;
1089 desc = &hp->md5_desc;
1090
1091 if (crypto_hash_init(desc))
1092 goto clear_hash;
1093
1094 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1095 goto clear_hash;
1096 if (tcp_md5_hash_header(hp, th))
1097 goto clear_hash;
1098 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1099 goto clear_hash;
1100 if (tcp_md5_hash_key(hp, key))
1101 goto clear_hash;
1102 if (crypto_hash_final(desc, md5_hash))
1103 goto clear_hash;
1104
1105 tcp_put_md5sig_pool();
1106 return 0;
1107
1108 clear_hash:
1109 tcp_put_md5sig_pool();
1110 clear_hash_noput:
1111 memset(md5_hash, 0, 16);
1112 return 1;
1113 }
1114 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1115
1116 #endif
1117
1118 /* Called with rcu_read_lock() */
1119 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1120 const struct sk_buff *skb)
1121 {
1122 #ifdef CONFIG_TCP_MD5SIG
1123 /*
1124 * This gets called for each TCP segment that arrives
1125 * so we want to be efficient.
1126 * We have 3 drop cases:
1127 * o No MD5 hash and one expected.
1128 * o MD5 hash and we're not expecting one.
1129 * o MD5 hash and its wrong.
1130 */
1131 const __u8 *hash_location = NULL;
1132 struct tcp_md5sig_key *hash_expected;
1133 const struct iphdr *iph = ip_hdr(skb);
1134 const struct tcphdr *th = tcp_hdr(skb);
1135 int genhash;
1136 unsigned char newhash[16];
1137
1138 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1139 AF_INET);
1140 hash_location = tcp_parse_md5sig_option(th);
1141
1142 /* We've parsed the options - do we have a hash? */
1143 if (!hash_expected && !hash_location)
1144 return false;
1145
1146 if (hash_expected && !hash_location) {
1147 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1148 return true;
1149 }
1150
1151 if (!hash_expected && hash_location) {
1152 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1153 return true;
1154 }
1155
1156 /* Okay, so this is hash_expected and hash_location -
1157 * so we need to calculate the checksum.
1158 */
1159 genhash = tcp_v4_md5_hash_skb(newhash,
1160 hash_expected,
1161 NULL, skb);
1162
1163 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1164 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1165 &iph->saddr, ntohs(th->source),
1166 &iph->daddr, ntohs(th->dest),
1167 genhash ? " tcp_v4_calc_md5_hash failed"
1168 : "");
1169 return true;
1170 }
1171 return false;
1172 #endif
1173 return false;
1174 }
1175
1176 static void tcp_v4_init_req(struct request_sock *req,
1177 const struct sock *sk_listener,
1178 struct sk_buff *skb)
1179 {
1180 struct inet_request_sock *ireq = inet_rsk(req);
1181
1182 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1183 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1184 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1185 ireq->opt = tcp_v4_save_options(skb);
1186 }
1187
1188 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1189 struct flowi *fl,
1190 const struct request_sock *req,
1191 bool *strict)
1192 {
1193 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1194
1195 if (strict) {
1196 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1197 *strict = true;
1198 else
1199 *strict = false;
1200 }
1201
1202 return dst;
1203 }
1204
1205 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1206 .family = PF_INET,
1207 .obj_size = sizeof(struct tcp_request_sock),
1208 .rtx_syn_ack = tcp_rtx_synack,
1209 .send_ack = tcp_v4_reqsk_send_ack,
1210 .destructor = tcp_v4_reqsk_destructor,
1211 .send_reset = tcp_v4_send_reset,
1212 .syn_ack_timeout = tcp_syn_ack_timeout,
1213 };
1214
1215 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1216 .mss_clamp = TCP_MSS_DEFAULT,
1217 #ifdef CONFIG_TCP_MD5SIG
1218 .req_md5_lookup = tcp_v4_md5_lookup,
1219 .calc_md5_hash = tcp_v4_md5_hash_skb,
1220 #endif
1221 .init_req = tcp_v4_init_req,
1222 #ifdef CONFIG_SYN_COOKIES
1223 .cookie_init_seq = cookie_v4_init_sequence,
1224 #endif
1225 .route_req = tcp_v4_route_req,
1226 .init_seq = tcp_v4_init_sequence,
1227 .send_synack = tcp_v4_send_synack,
1228 };
1229
1230 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1231 {
1232 /* Never answer to SYNs send to broadcast or multicast */
1233 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1234 goto drop;
1235
1236 return tcp_conn_request(&tcp_request_sock_ops,
1237 &tcp_request_sock_ipv4_ops, sk, skb);
1238
1239 drop:
1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1241 return 0;
1242 }
1243 EXPORT_SYMBOL(tcp_v4_conn_request);
1244
1245
1246 /*
1247 * The three way handshake has completed - we got a valid synack -
1248 * now create the new socket.
1249 */
1250 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1251 struct request_sock *req,
1252 struct dst_entry *dst)
1253 {
1254 struct inet_request_sock *ireq;
1255 struct inet_sock *newinet;
1256 struct tcp_sock *newtp;
1257 struct sock *newsk;
1258 #ifdef CONFIG_TCP_MD5SIG
1259 struct tcp_md5sig_key *key;
1260 #endif
1261 struct ip_options_rcu *inet_opt;
1262
1263 if (sk_acceptq_is_full(sk))
1264 goto exit_overflow;
1265
1266 newsk = tcp_create_openreq_child(sk, req, skb);
1267 if (!newsk)
1268 goto exit_nonewsk;
1269
1270 newsk->sk_gso_type = SKB_GSO_TCPV4;
1271 inet_sk_rx_dst_set(newsk, skb);
1272
1273 newtp = tcp_sk(newsk);
1274 newinet = inet_sk(newsk);
1275 ireq = inet_rsk(req);
1276 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1277 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1278 newinet->inet_saddr = ireq->ir_loc_addr;
1279 inet_opt = ireq->opt;
1280 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1281 ireq->opt = NULL;
1282 newinet->mc_index = inet_iif(skb);
1283 newinet->mc_ttl = ip_hdr(skb)->ttl;
1284 newinet->rcv_tos = ip_hdr(skb)->tos;
1285 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1286 if (inet_opt)
1287 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1288 newinet->inet_id = newtp->write_seq ^ jiffies;
1289
1290 if (!dst) {
1291 dst = inet_csk_route_child_sock(sk, newsk, req);
1292 if (!dst)
1293 goto put_and_exit;
1294 } else {
1295 /* syncookie case : see end of cookie_v4_check() */
1296 }
1297 sk_setup_caps(newsk, dst);
1298
1299 tcp_ca_openreq_child(newsk, dst);
1300
1301 tcp_sync_mss(newsk, dst_mtu(dst));
1302 newtp->advmss = dst_metric_advmss(dst);
1303 if (tcp_sk(sk)->rx_opt.user_mss &&
1304 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1305 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1306
1307 tcp_initialize_rcv_mss(newsk);
1308
1309 #ifdef CONFIG_TCP_MD5SIG
1310 /* Copy over the MD5 key from the original socket */
1311 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1312 AF_INET);
1313 if (key) {
1314 /*
1315 * We're using one, so create a matching key
1316 * on the newsk structure. If we fail to get
1317 * memory, then we end up not copying the key
1318 * across. Shucks.
1319 */
1320 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1321 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1322 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1323 }
1324 #endif
1325
1326 if (__inet_inherit_port(sk, newsk) < 0)
1327 goto put_and_exit;
1328 __inet_hash_nolisten(newsk, NULL);
1329
1330 return newsk;
1331
1332 exit_overflow:
1333 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1334 exit_nonewsk:
1335 dst_release(dst);
1336 exit:
1337 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1338 return NULL;
1339 put_and_exit:
1340 inet_csk_prepare_forced_close(newsk);
1341 tcp_done(newsk);
1342 goto exit;
1343 }
1344 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1345
1346 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1347 {
1348 #ifdef CONFIG_SYN_COOKIES
1349 const struct tcphdr *th = tcp_hdr(skb);
1350
1351 if (!th->syn)
1352 sk = cookie_v4_check(sk, skb);
1353 #endif
1354 return sk;
1355 }
1356
1357 /* The socket must have it's spinlock held when we get
1358 * here, unless it is a TCP_LISTEN socket.
1359 *
1360 * We have a potential double-lock case here, so even when
1361 * doing backlog processing we use the BH locking scheme.
1362 * This is because we cannot sleep with the original spinlock
1363 * held.
1364 */
1365 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1366 {
1367 struct sock *rsk;
1368
1369 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1370 struct dst_entry *dst = sk->sk_rx_dst;
1371
1372 sock_rps_save_rxhash(sk, skb);
1373 sk_mark_napi_id(sk, skb);
1374 if (dst) {
1375 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1376 !dst->ops->check(dst, 0)) {
1377 dst_release(dst);
1378 sk->sk_rx_dst = NULL;
1379 }
1380 }
1381 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1382 return 0;
1383 }
1384
1385 if (tcp_checksum_complete(skb))
1386 goto csum_err;
1387
1388 if (sk->sk_state == TCP_LISTEN) {
1389 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1390
1391 if (!nsk)
1392 goto discard;
1393 if (nsk != sk) {
1394 sock_rps_save_rxhash(nsk, skb);
1395 sk_mark_napi_id(nsk, skb);
1396 if (tcp_child_process(sk, nsk, skb)) {
1397 rsk = nsk;
1398 goto reset;
1399 }
1400 return 0;
1401 }
1402 } else
1403 sock_rps_save_rxhash(sk, skb);
1404
1405 if (tcp_rcv_state_process(sk, skb)) {
1406 rsk = sk;
1407 goto reset;
1408 }
1409 return 0;
1410
1411 reset:
1412 tcp_v4_send_reset(rsk, skb);
1413 discard:
1414 kfree_skb(skb);
1415 /* Be careful here. If this function gets more complicated and
1416 * gcc suffers from register pressure on the x86, sk (in %ebx)
1417 * might be destroyed here. This current version compiles correctly,
1418 * but you have been warned.
1419 */
1420 return 0;
1421
1422 csum_err:
1423 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1424 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1425 goto discard;
1426 }
1427 EXPORT_SYMBOL(tcp_v4_do_rcv);
1428
1429 void tcp_v4_early_demux(struct sk_buff *skb)
1430 {
1431 const struct iphdr *iph;
1432 const struct tcphdr *th;
1433 struct sock *sk;
1434
1435 if (skb->pkt_type != PACKET_HOST)
1436 return;
1437
1438 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1439 return;
1440
1441 iph = ip_hdr(skb);
1442 th = tcp_hdr(skb);
1443
1444 if (th->doff < sizeof(struct tcphdr) / 4)
1445 return;
1446
1447 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1448 iph->saddr, th->source,
1449 iph->daddr, ntohs(th->dest),
1450 skb->skb_iif);
1451 if (sk) {
1452 skb->sk = sk;
1453 skb->destructor = sock_edemux;
1454 if (sk_fullsock(sk)) {
1455 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1456
1457 if (dst)
1458 dst = dst_check(dst, 0);
1459 if (dst &&
1460 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1461 skb_dst_set_noref(skb, dst);
1462 }
1463 }
1464 }
1465
1466 /* Packet is added to VJ-style prequeue for processing in process
1467 * context, if a reader task is waiting. Apparently, this exciting
1468 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1469 * failed somewhere. Latency? Burstiness? Well, at least now we will
1470 * see, why it failed. 8)8) --ANK
1471 *
1472 */
1473 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1474 {
1475 struct tcp_sock *tp = tcp_sk(sk);
1476
1477 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1478 return false;
1479
1480 if (skb->len <= tcp_hdrlen(skb) &&
1481 skb_queue_len(&tp->ucopy.prequeue) == 0)
1482 return false;
1483
1484 /* Before escaping RCU protected region, we need to take care of skb
1485 * dst. Prequeue is only enabled for established sockets.
1486 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1487 * Instead of doing full sk_rx_dst validity here, let's perform
1488 * an optimistic check.
1489 */
1490 if (likely(sk->sk_rx_dst))
1491 skb_dst_drop(skb);
1492 else
1493 skb_dst_force(skb);
1494
1495 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1496 tp->ucopy.memory += skb->truesize;
1497 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1498 struct sk_buff *skb1;
1499
1500 BUG_ON(sock_owned_by_user(sk));
1501
1502 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1503 sk_backlog_rcv(sk, skb1);
1504 NET_INC_STATS_BH(sock_net(sk),
1505 LINUX_MIB_TCPPREQUEUEDROPPED);
1506 }
1507
1508 tp->ucopy.memory = 0;
1509 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1510 wake_up_interruptible_sync_poll(sk_sleep(sk),
1511 POLLIN | POLLRDNORM | POLLRDBAND);
1512 if (!inet_csk_ack_scheduled(sk))
1513 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1514 (3 * tcp_rto_min(sk)) / 4,
1515 TCP_RTO_MAX);
1516 }
1517 return true;
1518 }
1519 EXPORT_SYMBOL(tcp_prequeue);
1520
1521 /*
1522 * From tcp_input.c
1523 */
1524
1525 int tcp_v4_rcv(struct sk_buff *skb)
1526 {
1527 const struct iphdr *iph;
1528 const struct tcphdr *th;
1529 struct sock *sk;
1530 int ret;
1531 struct net *net = dev_net(skb->dev);
1532
1533 if (skb->pkt_type != PACKET_HOST)
1534 goto discard_it;
1535
1536 /* Count it even if it's bad */
1537 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1538
1539 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1540 goto discard_it;
1541
1542 th = tcp_hdr(skb);
1543
1544 if (th->doff < sizeof(struct tcphdr) / 4)
1545 goto bad_packet;
1546 if (!pskb_may_pull(skb, th->doff * 4))
1547 goto discard_it;
1548
1549 /* An explanation is required here, I think.
1550 * Packet length and doff are validated by header prediction,
1551 * provided case of th->doff==0 is eliminated.
1552 * So, we defer the checks. */
1553
1554 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1555 goto csum_error;
1556
1557 th = tcp_hdr(skb);
1558 iph = ip_hdr(skb);
1559 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1560 * barrier() makes sure compiler wont play fool^Waliasing games.
1561 */
1562 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1563 sizeof(struct inet_skb_parm));
1564 barrier();
1565
1566 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1567 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1568 skb->len - th->doff * 4);
1569 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1570 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1571 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1572 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1573 TCP_SKB_CB(skb)->sacked = 0;
1574
1575 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1576 if (!sk)
1577 goto no_tcp_socket;
1578
1579 process:
1580 if (sk->sk_state == TCP_TIME_WAIT)
1581 goto do_time_wait;
1582
1583 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1584 struct request_sock *req = inet_reqsk(sk);
1585 struct sock *nsk = NULL;
1586
1587 sk = req->rsk_listener;
1588 if (tcp_v4_inbound_md5_hash(sk, skb))
1589 goto discard_and_relse;
1590 if (sk->sk_state == TCP_LISTEN)
1591 nsk = tcp_check_req(sk, skb, req, false);
1592 if (!nsk) {
1593 reqsk_put(req);
1594 goto discard_it;
1595 }
1596 if (nsk == sk) {
1597 sock_hold(sk);
1598 reqsk_put(req);
1599 } else if (tcp_child_process(sk, nsk, skb)) {
1600 tcp_v4_send_reset(nsk, skb);
1601 goto discard_it;
1602 } else {
1603 return 0;
1604 }
1605 }
1606 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1607 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1608 goto discard_and_relse;
1609 }
1610
1611 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1612 goto discard_and_relse;
1613
1614 if (tcp_v4_inbound_md5_hash(sk, skb))
1615 goto discard_and_relse;
1616
1617 nf_reset(skb);
1618
1619 if (sk_filter(sk, skb))
1620 goto discard_and_relse;
1621
1622 skb->dev = NULL;
1623
1624 if (sk->sk_state == TCP_LISTEN) {
1625 ret = tcp_v4_do_rcv(sk, skb);
1626 goto put_and_return;
1627 }
1628
1629 sk_incoming_cpu_update(sk);
1630
1631 bh_lock_sock_nested(sk);
1632 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1633 ret = 0;
1634 if (!sock_owned_by_user(sk)) {
1635 if (!tcp_prequeue(sk, skb))
1636 ret = tcp_v4_do_rcv(sk, skb);
1637 } else if (unlikely(sk_add_backlog(sk, skb,
1638 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1639 bh_unlock_sock(sk);
1640 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1641 goto discard_and_relse;
1642 }
1643 bh_unlock_sock(sk);
1644
1645 put_and_return:
1646 sock_put(sk);
1647
1648 return ret;
1649
1650 no_tcp_socket:
1651 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1652 goto discard_it;
1653
1654 if (tcp_checksum_complete(skb)) {
1655 csum_error:
1656 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1657 bad_packet:
1658 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1659 } else {
1660 tcp_v4_send_reset(NULL, skb);
1661 }
1662
1663 discard_it:
1664 /* Discard frame. */
1665 kfree_skb(skb);
1666 return 0;
1667
1668 discard_and_relse:
1669 sock_put(sk);
1670 goto discard_it;
1671
1672 do_time_wait:
1673 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1674 inet_twsk_put(inet_twsk(sk));
1675 goto discard_it;
1676 }
1677
1678 if (tcp_checksum_complete(skb)) {
1679 inet_twsk_put(inet_twsk(sk));
1680 goto csum_error;
1681 }
1682 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1683 case TCP_TW_SYN: {
1684 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1685 &tcp_hashinfo,
1686 iph->saddr, th->source,
1687 iph->daddr, th->dest,
1688 inet_iif(skb));
1689 if (sk2) {
1690 inet_twsk_deschedule_put(inet_twsk(sk));
1691 sk = sk2;
1692 goto process;
1693 }
1694 /* Fall through to ACK */
1695 }
1696 case TCP_TW_ACK:
1697 tcp_v4_timewait_ack(sk, skb);
1698 break;
1699 case TCP_TW_RST:
1700 goto no_tcp_socket;
1701 case TCP_TW_SUCCESS:;
1702 }
1703 goto discard_it;
1704 }
1705
1706 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1707 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1708 .twsk_unique = tcp_twsk_unique,
1709 .twsk_destructor= tcp_twsk_destructor,
1710 };
1711
1712 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1713 {
1714 struct dst_entry *dst = skb_dst(skb);
1715
1716 if (dst) {
1717 dst_hold(dst);
1718 sk->sk_rx_dst = dst;
1719 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1720 }
1721 }
1722 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1723
1724 const struct inet_connection_sock_af_ops ipv4_specific = {
1725 .queue_xmit = ip_queue_xmit,
1726 .send_check = tcp_v4_send_check,
1727 .rebuild_header = inet_sk_rebuild_header,
1728 .sk_rx_dst_set = inet_sk_rx_dst_set,
1729 .conn_request = tcp_v4_conn_request,
1730 .syn_recv_sock = tcp_v4_syn_recv_sock,
1731 .net_header_len = sizeof(struct iphdr),
1732 .setsockopt = ip_setsockopt,
1733 .getsockopt = ip_getsockopt,
1734 .addr2sockaddr = inet_csk_addr2sockaddr,
1735 .sockaddr_len = sizeof(struct sockaddr_in),
1736 .bind_conflict = inet_csk_bind_conflict,
1737 #ifdef CONFIG_COMPAT
1738 .compat_setsockopt = compat_ip_setsockopt,
1739 .compat_getsockopt = compat_ip_getsockopt,
1740 #endif
1741 .mtu_reduced = tcp_v4_mtu_reduced,
1742 };
1743 EXPORT_SYMBOL(ipv4_specific);
1744
1745 #ifdef CONFIG_TCP_MD5SIG
1746 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1747 .md5_lookup = tcp_v4_md5_lookup,
1748 .calc_md5_hash = tcp_v4_md5_hash_skb,
1749 .md5_parse = tcp_v4_parse_md5_keys,
1750 };
1751 #endif
1752
1753 /* NOTE: A lot of things set to zero explicitly by call to
1754 * sk_alloc() so need not be done here.
1755 */
1756 static int tcp_v4_init_sock(struct sock *sk)
1757 {
1758 struct inet_connection_sock *icsk = inet_csk(sk);
1759
1760 tcp_init_sock(sk);
1761
1762 icsk->icsk_af_ops = &ipv4_specific;
1763
1764 #ifdef CONFIG_TCP_MD5SIG
1765 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1766 #endif
1767
1768 return 0;
1769 }
1770
1771 void tcp_v4_destroy_sock(struct sock *sk)
1772 {
1773 struct tcp_sock *tp = tcp_sk(sk);
1774
1775 tcp_clear_xmit_timers(sk);
1776
1777 tcp_cleanup_congestion_control(sk);
1778
1779 /* Cleanup up the write buffer. */
1780 tcp_write_queue_purge(sk);
1781
1782 /* Cleans up our, hopefully empty, out_of_order_queue. */
1783 __skb_queue_purge(&tp->out_of_order_queue);
1784
1785 #ifdef CONFIG_TCP_MD5SIG
1786 /* Clean up the MD5 key list, if any */
1787 if (tp->md5sig_info) {
1788 tcp_clear_md5_list(sk);
1789 kfree_rcu(tp->md5sig_info, rcu);
1790 tp->md5sig_info = NULL;
1791 }
1792 #endif
1793
1794 /* Clean prequeue, it must be empty really */
1795 __skb_queue_purge(&tp->ucopy.prequeue);
1796
1797 /* Clean up a referenced TCP bind bucket. */
1798 if (inet_csk(sk)->icsk_bind_hash)
1799 inet_put_port(sk);
1800
1801 BUG_ON(tp->fastopen_rsk);
1802
1803 /* If socket is aborted during connect operation */
1804 tcp_free_fastopen_req(tp);
1805 tcp_saved_syn_free(tp);
1806
1807 sk_sockets_allocated_dec(sk);
1808 sock_release_memcg(sk);
1809 }
1810 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1811
1812 #ifdef CONFIG_PROC_FS
1813 /* Proc filesystem TCP sock list dumping. */
1814
1815 /*
1816 * Get next listener socket follow cur. If cur is NULL, get first socket
1817 * starting from bucket given in st->bucket; when st->bucket is zero the
1818 * very first socket in the hash table is returned.
1819 */
1820 static void *listening_get_next(struct seq_file *seq, void *cur)
1821 {
1822 struct inet_connection_sock *icsk;
1823 struct hlist_nulls_node *node;
1824 struct sock *sk = cur;
1825 struct inet_listen_hashbucket *ilb;
1826 struct tcp_iter_state *st = seq->private;
1827 struct net *net = seq_file_net(seq);
1828
1829 if (!sk) {
1830 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1831 spin_lock_bh(&ilb->lock);
1832 sk = sk_nulls_head(&ilb->head);
1833 st->offset = 0;
1834 goto get_sk;
1835 }
1836 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1837 ++st->num;
1838 ++st->offset;
1839
1840 sk = sk_nulls_next(sk);
1841 get_sk:
1842 sk_nulls_for_each_from(sk, node) {
1843 if (!net_eq(sock_net(sk), net))
1844 continue;
1845 if (sk->sk_family == st->family) {
1846 cur = sk;
1847 goto out;
1848 }
1849 icsk = inet_csk(sk);
1850 }
1851 spin_unlock_bh(&ilb->lock);
1852 st->offset = 0;
1853 if (++st->bucket < INET_LHTABLE_SIZE) {
1854 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1855 spin_lock_bh(&ilb->lock);
1856 sk = sk_nulls_head(&ilb->head);
1857 goto get_sk;
1858 }
1859 cur = NULL;
1860 out:
1861 return cur;
1862 }
1863
1864 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1865 {
1866 struct tcp_iter_state *st = seq->private;
1867 void *rc;
1868
1869 st->bucket = 0;
1870 st->offset = 0;
1871 rc = listening_get_next(seq, NULL);
1872
1873 while (rc && *pos) {
1874 rc = listening_get_next(seq, rc);
1875 --*pos;
1876 }
1877 return rc;
1878 }
1879
1880 static inline bool empty_bucket(const struct tcp_iter_state *st)
1881 {
1882 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1883 }
1884
1885 /*
1886 * Get first established socket starting from bucket given in st->bucket.
1887 * If st->bucket is zero, the very first socket in the hash is returned.
1888 */
1889 static void *established_get_first(struct seq_file *seq)
1890 {
1891 struct tcp_iter_state *st = seq->private;
1892 struct net *net = seq_file_net(seq);
1893 void *rc = NULL;
1894
1895 st->offset = 0;
1896 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1897 struct sock *sk;
1898 struct hlist_nulls_node *node;
1899 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1900
1901 /* Lockless fast path for the common case of empty buckets */
1902 if (empty_bucket(st))
1903 continue;
1904
1905 spin_lock_bh(lock);
1906 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1907 if (sk->sk_family != st->family ||
1908 !net_eq(sock_net(sk), net)) {
1909 continue;
1910 }
1911 rc = sk;
1912 goto out;
1913 }
1914 spin_unlock_bh(lock);
1915 }
1916 out:
1917 return rc;
1918 }
1919
1920 static void *established_get_next(struct seq_file *seq, void *cur)
1921 {
1922 struct sock *sk = cur;
1923 struct hlist_nulls_node *node;
1924 struct tcp_iter_state *st = seq->private;
1925 struct net *net = seq_file_net(seq);
1926
1927 ++st->num;
1928 ++st->offset;
1929
1930 sk = sk_nulls_next(sk);
1931
1932 sk_nulls_for_each_from(sk, node) {
1933 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1934 return sk;
1935 }
1936
1937 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1938 ++st->bucket;
1939 return established_get_first(seq);
1940 }
1941
1942 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1943 {
1944 struct tcp_iter_state *st = seq->private;
1945 void *rc;
1946
1947 st->bucket = 0;
1948 rc = established_get_first(seq);
1949
1950 while (rc && pos) {
1951 rc = established_get_next(seq, rc);
1952 --pos;
1953 }
1954 return rc;
1955 }
1956
1957 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1958 {
1959 void *rc;
1960 struct tcp_iter_state *st = seq->private;
1961
1962 st->state = TCP_SEQ_STATE_LISTENING;
1963 rc = listening_get_idx(seq, &pos);
1964
1965 if (!rc) {
1966 st->state = TCP_SEQ_STATE_ESTABLISHED;
1967 rc = established_get_idx(seq, pos);
1968 }
1969
1970 return rc;
1971 }
1972
1973 static void *tcp_seek_last_pos(struct seq_file *seq)
1974 {
1975 struct tcp_iter_state *st = seq->private;
1976 int offset = st->offset;
1977 int orig_num = st->num;
1978 void *rc = NULL;
1979
1980 switch (st->state) {
1981 case TCP_SEQ_STATE_LISTENING:
1982 if (st->bucket >= INET_LHTABLE_SIZE)
1983 break;
1984 st->state = TCP_SEQ_STATE_LISTENING;
1985 rc = listening_get_next(seq, NULL);
1986 while (offset-- && rc)
1987 rc = listening_get_next(seq, rc);
1988 if (rc)
1989 break;
1990 st->bucket = 0;
1991 st->state = TCP_SEQ_STATE_ESTABLISHED;
1992 /* Fallthrough */
1993 case TCP_SEQ_STATE_ESTABLISHED:
1994 if (st->bucket > tcp_hashinfo.ehash_mask)
1995 break;
1996 rc = established_get_first(seq);
1997 while (offset-- && rc)
1998 rc = established_get_next(seq, rc);
1999 }
2000
2001 st->num = orig_num;
2002
2003 return rc;
2004 }
2005
2006 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2007 {
2008 struct tcp_iter_state *st = seq->private;
2009 void *rc;
2010
2011 if (*pos && *pos == st->last_pos) {
2012 rc = tcp_seek_last_pos(seq);
2013 if (rc)
2014 goto out;
2015 }
2016
2017 st->state = TCP_SEQ_STATE_LISTENING;
2018 st->num = 0;
2019 st->bucket = 0;
2020 st->offset = 0;
2021 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2022
2023 out:
2024 st->last_pos = *pos;
2025 return rc;
2026 }
2027
2028 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2029 {
2030 struct tcp_iter_state *st = seq->private;
2031 void *rc = NULL;
2032
2033 if (v == SEQ_START_TOKEN) {
2034 rc = tcp_get_idx(seq, 0);
2035 goto out;
2036 }
2037
2038 switch (st->state) {
2039 case TCP_SEQ_STATE_LISTENING:
2040 rc = listening_get_next(seq, v);
2041 if (!rc) {
2042 st->state = TCP_SEQ_STATE_ESTABLISHED;
2043 st->bucket = 0;
2044 st->offset = 0;
2045 rc = established_get_first(seq);
2046 }
2047 break;
2048 case TCP_SEQ_STATE_ESTABLISHED:
2049 rc = established_get_next(seq, v);
2050 break;
2051 }
2052 out:
2053 ++*pos;
2054 st->last_pos = *pos;
2055 return rc;
2056 }
2057
2058 static void tcp_seq_stop(struct seq_file *seq, void *v)
2059 {
2060 struct tcp_iter_state *st = seq->private;
2061
2062 switch (st->state) {
2063 case TCP_SEQ_STATE_LISTENING:
2064 if (v != SEQ_START_TOKEN)
2065 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2066 break;
2067 case TCP_SEQ_STATE_ESTABLISHED:
2068 if (v)
2069 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2070 break;
2071 }
2072 }
2073
2074 int tcp_seq_open(struct inode *inode, struct file *file)
2075 {
2076 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2077 struct tcp_iter_state *s;
2078 int err;
2079
2080 err = seq_open_net(inode, file, &afinfo->seq_ops,
2081 sizeof(struct tcp_iter_state));
2082 if (err < 0)
2083 return err;
2084
2085 s = ((struct seq_file *)file->private_data)->private;
2086 s->family = afinfo->family;
2087 s->last_pos = 0;
2088 return 0;
2089 }
2090 EXPORT_SYMBOL(tcp_seq_open);
2091
2092 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2093 {
2094 int rc = 0;
2095 struct proc_dir_entry *p;
2096
2097 afinfo->seq_ops.start = tcp_seq_start;
2098 afinfo->seq_ops.next = tcp_seq_next;
2099 afinfo->seq_ops.stop = tcp_seq_stop;
2100
2101 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2102 afinfo->seq_fops, afinfo);
2103 if (!p)
2104 rc = -ENOMEM;
2105 return rc;
2106 }
2107 EXPORT_SYMBOL(tcp_proc_register);
2108
2109 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2110 {
2111 remove_proc_entry(afinfo->name, net->proc_net);
2112 }
2113 EXPORT_SYMBOL(tcp_proc_unregister);
2114
2115 static void get_openreq4(const struct request_sock *req,
2116 struct seq_file *f, int i)
2117 {
2118 const struct inet_request_sock *ireq = inet_rsk(req);
2119 long delta = req->rsk_timer.expires - jiffies;
2120
2121 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2122 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2123 i,
2124 ireq->ir_loc_addr,
2125 ireq->ir_num,
2126 ireq->ir_rmt_addr,
2127 ntohs(ireq->ir_rmt_port),
2128 TCP_SYN_RECV,
2129 0, 0, /* could print option size, but that is af dependent. */
2130 1, /* timers active (only the expire timer) */
2131 jiffies_delta_to_clock_t(delta),
2132 req->num_timeout,
2133 from_kuid_munged(seq_user_ns(f),
2134 sock_i_uid(req->rsk_listener)),
2135 0, /* non standard timer */
2136 0, /* open_requests have no inode */
2137 0,
2138 req);
2139 }
2140
2141 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2142 {
2143 int timer_active;
2144 unsigned long timer_expires;
2145 const struct tcp_sock *tp = tcp_sk(sk);
2146 const struct inet_connection_sock *icsk = inet_csk(sk);
2147 const struct inet_sock *inet = inet_sk(sk);
2148 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2149 __be32 dest = inet->inet_daddr;
2150 __be32 src = inet->inet_rcv_saddr;
2151 __u16 destp = ntohs(inet->inet_dport);
2152 __u16 srcp = ntohs(inet->inet_sport);
2153 int rx_queue;
2154
2155 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2156 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2157 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2158 timer_active = 1;
2159 timer_expires = icsk->icsk_timeout;
2160 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2161 timer_active = 4;
2162 timer_expires = icsk->icsk_timeout;
2163 } else if (timer_pending(&sk->sk_timer)) {
2164 timer_active = 2;
2165 timer_expires = sk->sk_timer.expires;
2166 } else {
2167 timer_active = 0;
2168 timer_expires = jiffies;
2169 }
2170
2171 if (sk->sk_state == TCP_LISTEN)
2172 rx_queue = sk->sk_ack_backlog;
2173 else
2174 /*
2175 * because we dont lock socket, we might find a transient negative value
2176 */
2177 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2178
2179 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2180 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2181 i, src, srcp, dest, destp, sk->sk_state,
2182 tp->write_seq - tp->snd_una,
2183 rx_queue,
2184 timer_active,
2185 jiffies_delta_to_clock_t(timer_expires - jiffies),
2186 icsk->icsk_retransmits,
2187 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2188 icsk->icsk_probes_out,
2189 sock_i_ino(sk),
2190 atomic_read(&sk->sk_refcnt), sk,
2191 jiffies_to_clock_t(icsk->icsk_rto),
2192 jiffies_to_clock_t(icsk->icsk_ack.ato),
2193 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2194 tp->snd_cwnd,
2195 sk->sk_state == TCP_LISTEN ?
2196 (fastopenq ? fastopenq->max_qlen : 0) :
2197 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2198 }
2199
2200 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2201 struct seq_file *f, int i)
2202 {
2203 long delta = tw->tw_timer.expires - jiffies;
2204 __be32 dest, src;
2205 __u16 destp, srcp;
2206
2207 dest = tw->tw_daddr;
2208 src = tw->tw_rcv_saddr;
2209 destp = ntohs(tw->tw_dport);
2210 srcp = ntohs(tw->tw_sport);
2211
2212 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2213 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2214 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2215 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2216 atomic_read(&tw->tw_refcnt), tw);
2217 }
2218
2219 #define TMPSZ 150
2220
2221 static int tcp4_seq_show(struct seq_file *seq, void *v)
2222 {
2223 struct tcp_iter_state *st;
2224 struct sock *sk = v;
2225
2226 seq_setwidth(seq, TMPSZ - 1);
2227 if (v == SEQ_START_TOKEN) {
2228 seq_puts(seq, " sl local_address rem_address st tx_queue "
2229 "rx_queue tr tm->when retrnsmt uid timeout "
2230 "inode");
2231 goto out;
2232 }
2233 st = seq->private;
2234
2235 if (sk->sk_state == TCP_TIME_WAIT)
2236 get_timewait4_sock(v, seq, st->num);
2237 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2238 get_openreq4(v, seq, st->num);
2239 else
2240 get_tcp4_sock(v, seq, st->num);
2241 out:
2242 seq_pad(seq, '\n');
2243 return 0;
2244 }
2245
2246 static const struct file_operations tcp_afinfo_seq_fops = {
2247 .owner = THIS_MODULE,
2248 .open = tcp_seq_open,
2249 .read = seq_read,
2250 .llseek = seq_lseek,
2251 .release = seq_release_net
2252 };
2253
2254 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2255 .name = "tcp",
2256 .family = AF_INET,
2257 .seq_fops = &tcp_afinfo_seq_fops,
2258 .seq_ops = {
2259 .show = tcp4_seq_show,
2260 },
2261 };
2262
2263 static int __net_init tcp4_proc_init_net(struct net *net)
2264 {
2265 return tcp_proc_register(net, &tcp4_seq_afinfo);
2266 }
2267
2268 static void __net_exit tcp4_proc_exit_net(struct net *net)
2269 {
2270 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2271 }
2272
2273 static struct pernet_operations tcp4_net_ops = {
2274 .init = tcp4_proc_init_net,
2275 .exit = tcp4_proc_exit_net,
2276 };
2277
2278 int __init tcp4_proc_init(void)
2279 {
2280 return register_pernet_subsys(&tcp4_net_ops);
2281 }
2282
2283 void tcp4_proc_exit(void)
2284 {
2285 unregister_pernet_subsys(&tcp4_net_ops);
2286 }
2287 #endif /* CONFIG_PROC_FS */
2288
2289 struct proto tcp_prot = {
2290 .name = "TCP",
2291 .owner = THIS_MODULE,
2292 .close = tcp_close,
2293 .connect = tcp_v4_connect,
2294 .disconnect = tcp_disconnect,
2295 .accept = inet_csk_accept,
2296 .ioctl = tcp_ioctl,
2297 .init = tcp_v4_init_sock,
2298 .destroy = tcp_v4_destroy_sock,
2299 .shutdown = tcp_shutdown,
2300 .setsockopt = tcp_setsockopt,
2301 .getsockopt = tcp_getsockopt,
2302 .recvmsg = tcp_recvmsg,
2303 .sendmsg = tcp_sendmsg,
2304 .sendpage = tcp_sendpage,
2305 .backlog_rcv = tcp_v4_do_rcv,
2306 .release_cb = tcp_release_cb,
2307 .hash = inet_hash,
2308 .unhash = inet_unhash,
2309 .get_port = inet_csk_get_port,
2310 .enter_memory_pressure = tcp_enter_memory_pressure,
2311 .stream_memory_free = tcp_stream_memory_free,
2312 .sockets_allocated = &tcp_sockets_allocated,
2313 .orphan_count = &tcp_orphan_count,
2314 .memory_allocated = &tcp_memory_allocated,
2315 .memory_pressure = &tcp_memory_pressure,
2316 .sysctl_mem = sysctl_tcp_mem,
2317 .sysctl_wmem = sysctl_tcp_wmem,
2318 .sysctl_rmem = sysctl_tcp_rmem,
2319 .max_header = MAX_TCP_HEADER,
2320 .obj_size = sizeof(struct tcp_sock),
2321 .slab_flags = SLAB_DESTROY_BY_RCU,
2322 .twsk_prot = &tcp_timewait_sock_ops,
2323 .rsk_prot = &tcp_request_sock_ops,
2324 .h.hashinfo = &tcp_hashinfo,
2325 .no_autobind = true,
2326 #ifdef CONFIG_COMPAT
2327 .compat_setsockopt = compat_tcp_setsockopt,
2328 .compat_getsockopt = compat_tcp_getsockopt,
2329 #endif
2330 #ifdef CONFIG_MEMCG_KMEM
2331 .init_cgroup = tcp_init_cgroup,
2332 .destroy_cgroup = tcp_destroy_cgroup,
2333 .proto_cgroup = tcp_proto_cgroup,
2334 #endif
2335 };
2336 EXPORT_SYMBOL(tcp_prot);
2337
2338 static void __net_exit tcp_sk_exit(struct net *net)
2339 {
2340 int cpu;
2341
2342 for_each_possible_cpu(cpu)
2343 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2344 free_percpu(net->ipv4.tcp_sk);
2345 }
2346
2347 static int __net_init tcp_sk_init(struct net *net)
2348 {
2349 int res, cpu;
2350
2351 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2352 if (!net->ipv4.tcp_sk)
2353 return -ENOMEM;
2354
2355 for_each_possible_cpu(cpu) {
2356 struct sock *sk;
2357
2358 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2359 IPPROTO_TCP, net);
2360 if (res)
2361 goto fail;
2362 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2363 }
2364
2365 net->ipv4.sysctl_tcp_ecn = 2;
2366 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2367
2368 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2369 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2370 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2371
2372 return 0;
2373 fail:
2374 tcp_sk_exit(net);
2375
2376 return res;
2377 }
2378
2379 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2380 {
2381 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2382 }
2383
2384 static struct pernet_operations __net_initdata tcp_sk_ops = {
2385 .init = tcp_sk_init,
2386 .exit = tcp_sk_exit,
2387 .exit_batch = tcp_sk_exit_batch,
2388 };
2389
2390 void __init tcp_v4_init(void)
2391 {
2392 inet_hashinfo_init(&tcp_hashinfo);
2393 if (register_pernet_subsys(&tcp_sk_ops))
2394 panic("Failed to create the TCP control socket.\n");
2395 }
This page took 0.145678 seconds and 5 git commands to generate.