Merge tag 'ipvs2-for-v4.4' of https://git.kernel.org/pub/scm/linux/kernel/git/horms...
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
125 (!twp || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
148 struct flowi4 *fl4;
149 struct rtable *rt;
150 int err;
151 struct ip_options_rcu *inet_opt;
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet_opt->opt.faddr;
166 }
167
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
174 orig_sport, orig_dport, sk);
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 return err;
180 }
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
187 if (!inet_opt || !inet_opt->opt.srr)
188 daddr = fl4->daddr;
189
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
200 }
201
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
208
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
222 if (err)
223 goto failure;
224
225 sk_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
232 goto failure;
233 }
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
237
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
242 usin->sin_port);
243
244 inet->inet_id = tp->write_seq ^ jiffies;
245
246 err = tcp_connect(sk);
247
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254 failure:
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
263 return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
271 */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
277
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
280 return;
281
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
284 */
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
287
288 mtu = dst_mtu(dst);
289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
294
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
299 */
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309 if (dst)
310 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
319
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
322 */
323 WARN_ON(req->sk);
324
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 reqsk_put(req);
328 } else {
329 /*
330 * Still in SYN_RECV, just remove it silently.
331 * There is no good way to pass the error to the newly
332 * created socket, and POSIX does not want network
333 * errors returned from accept().
334 */
335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337 }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
348 *
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
354 *
355 */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 struct inet_connection_sock *icsk;
362 struct tcp_sock *tp;
363 struct inet_sock *inet;
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
366 struct sock *sk;
367 struct sk_buff *skb;
368 struct request_sock *fastopen;
369 __u32 seq, snd_una;
370 __u32 remaining;
371 int err;
372 struct net *net = dev_net(icmp_skb->dev);
373
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
376 inet_iif(icmp_skb));
377 if (!sk) {
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 return;
380 }
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
383 return;
384 }
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
388
389 bh_lock_sock(sk);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
394 */
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 }
399 if (sk->sk_state == TCP_CLOSE)
400 goto out;
401
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 goto out;
405 }
406
407 icsk = inet_csk(sk);
408 tp = tcp_sk(sk);
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 if (sk->sk_state != TCP_LISTEN &&
413 !between(seq, snd_una, tp->snd_nxt)) {
414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 goto out;
416 }
417
418 switch (type) {
419 case ICMP_REDIRECT:
420 do_redirect(icmp_skb, sk);
421 goto out;
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
424 goto out;
425 case ICMP_PARAMETERPROB:
426 err = EPROTO;
427 break;
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
430 goto out;
431
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
436 */
437 if (sk->sk_state == TCP_LISTEN)
438 goto out;
439
440 tp->mtu_info = info;
441 if (!sock_owned_by_user(sk)) {
442 tcp_v4_mtu_reduced(sk);
443 } else {
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 sock_hold(sk);
446 }
447 goto out;
448 }
449
450 err = icmp_err_convert[code].errno;
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 break;
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
456 !icsk->icsk_backoff || fastopen)
457 break;
458
459 if (sock_owned_by_user(sk))
460 break;
461
462 icsk->icsk_backoff--;
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 TCP_TIMEOUT_INIT;
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467 skb = tcp_write_queue_head(sk);
468 BUG_ON(!skb);
469
470 remaining = icsk->icsk_rto -
471 min(icsk->icsk_rto,
472 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474 if (remaining) {
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
477 } else {
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
481 }
482
483 break;
484 case ICMP_TIME_EXCEEDED:
485 err = EHOSTUNREACH;
486 break;
487 default:
488 goto out;
489 }
490
491 switch (sk->sk_state) {
492 case TCP_SYN_SENT:
493 case TCP_SYN_RECV:
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
496 */
497 if (fastopen && !fastopen->sk)
498 break;
499
500 if (!sock_owned_by_user(sk)) {
501 sk->sk_err = err;
502
503 sk->sk_error_report(sk);
504
505 tcp_done(sk);
506 } else {
507 sk->sk_err_soft = err;
508 }
509 goto out;
510 }
511
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
514 *
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
518 *
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 *
524 * Now we are in compliance with RFCs.
525 * --ANK (980905)
526 */
527
528 inet = inet_sk(sk);
529 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_err = err;
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
534 }
535
536 out:
537 bh_unlock_sock(sk);
538 sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 struct tcphdr *th = tcp_hdr(skb);
544
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
549 } else {
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
551 csum_partial(th,
552 th->doff << 2,
553 skb->csum));
554 }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 const struct inet_sock *inet = inet_sk(sk);
561
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567 * This routine will send an RST to the other tcp.
568 *
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * for reset.
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
577 */
578
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581 const struct tcphdr *th = tcp_hdr(skb);
582 struct {
583 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 } rep;
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
593 int genhash;
594 struct sock *sk1 = NULL;
595 #endif
596 struct net *net;
597
598 /* Never send a reset in response to a reset. */
599 if (th->rst)
600 return;
601
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
604 */
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 return;
607
608 /* Swap the send and the receive. */
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
613 rep.th.rst = 1;
614
615 if (th->ack) {
616 rep.th.seq = th->ack_seq;
617 } else {
618 rep.th.ack = 1;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
621 }
622
623 memset(&arg, 0, sizeof(arg));
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
626
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
631 /*
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
637 */
638 sk1 = __inet_lookup_listener(net,
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
643 if (!sk1)
644 return;
645 rcu_read_lock();
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
648 if (!key)
649 goto release_sk1;
650
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 }
659
660 if (key) {
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_NOP << 16) |
663 (TCPOPT_MD5SIG << 8) |
664 TCPOLEN_MD5SIG);
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
668
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
672 }
673 #endif
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
682 */
683 if (sk)
684 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
691
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 if (sk1) {
698 rcu_read_unlock();
699 sock_put(sk1);
700 }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
706 */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 u32 win, u32 tsval, u32 tsecr, int oif,
710 struct tcp_md5sig_key *key,
711 int reply_flags, u8 tos)
712 {
713 const struct tcphdr *th = tcp_hdr(skb);
714 struct {
715 struct tcphdr th;
716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 ];
721 } rep;
722 struct ip_reply_arg arg;
723 struct net *net = dev_net(skb_dst(skb)->dev);
724
725 memset(&rep.th, 0, sizeof(struct tcphdr));
726 memset(&arg, 0, sizeof(arg));
727
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
730 if (tsecr) {
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
733 TCPOLEN_TIMESTAMP);
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 }
738
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
745 rep.th.ack = 1;
746 rep.th.window = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749 if (key) {
750 int offset = (tsecr) ? 3 : 0;
751
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 (TCPOPT_NOP << 16) |
754 (TCPOPT_MD5SIG << 8) |
755 TCPOLEN_MD5SIG);
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
758
759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
762 }
763 #endif
764 arg.flags = reply_flags;
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 if (oif)
770 arg.bound_dev_if = oif;
771 arg.tos = tos;
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
776
777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 struct inet_timewait_sock *tw = inet_twsk(sk);
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 tcp_time_stamp + tcptw->tw_ts_offset,
788 tcptw->tw_ts_recent,
789 tw->tw_bound_dev_if,
790 tcp_twsk_md5_key(tcptw),
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 tw->tw_tos
793 );
794
795 inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799 struct request_sock *req)
800 {
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 */
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807 tcp_time_stamp,
808 req->ts_recent,
809 0,
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 AF_INET),
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 ip_hdr(skb)->tos);
814 }
815
816 /*
817 * Send a SYN-ACK after having received a SYN.
818 * This still operates on a request_sock only, not on a big
819 * socket.
820 */
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822 struct flowi *fl,
823 struct request_sock *req,
824 u16 queue_mapping,
825 struct tcp_fastopen_cookie *foc)
826 {
827 const struct inet_request_sock *ireq = inet_rsk(req);
828 struct flowi4 fl4;
829 int err = -1;
830 struct sk_buff *skb;
831
832 /* First, grab a route. */
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 return -1;
835
836 skb = tcp_make_synack(sk, dst, req, foc);
837
838 if (skb) {
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840
841 skb_set_queue_mapping(skb, queue_mapping);
842 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843 ireq->ir_rmt_addr,
844 ireq->opt);
845 err = net_xmit_eval(err);
846 }
847
848 return err;
849 }
850
851 /*
852 * IPv4 request_sock destructor.
853 */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856 kfree(inet_rsk(req)->opt);
857 }
858
859
860 #ifdef CONFIG_TCP_MD5SIG
861 /*
862 * RFC2385 MD5 checksumming requires a mapping of
863 * IP address->MD5 Key.
864 * We need to maintain these in the sk structure.
865 */
866
867 /* Find the Key structure for an address. */
868 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
869 const union tcp_md5_addr *addr,
870 int family)
871 {
872 const struct tcp_sock *tp = tcp_sk(sk);
873 struct tcp_md5sig_key *key;
874 unsigned int size = sizeof(struct in_addr);
875 const struct tcp_md5sig_info *md5sig;
876
877 /* caller either holds rcu_read_lock() or socket lock */
878 md5sig = rcu_dereference_check(tp->md5sig_info,
879 sock_owned_by_user(sk) ||
880 lockdep_is_held(&sk->sk_lock.slock));
881 if (!md5sig)
882 return NULL;
883 #if IS_ENABLED(CONFIG_IPV6)
884 if (family == AF_INET6)
885 size = sizeof(struct in6_addr);
886 #endif
887 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
888 if (key->family != family)
889 continue;
890 if (!memcmp(&key->addr, addr, size))
891 return key;
892 }
893 return NULL;
894 }
895 EXPORT_SYMBOL(tcp_md5_do_lookup);
896
897 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
898 const struct sock *addr_sk)
899 {
900 const union tcp_md5_addr *addr;
901
902 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
903 return tcp_md5_do_lookup(sk, addr, AF_INET);
904 }
905 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906
907 /* This can be called on a newly created socket, from other files */
908 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910 {
911 /* Add Key to the list */
912 struct tcp_md5sig_key *key;
913 struct tcp_sock *tp = tcp_sk(sk);
914 struct tcp_md5sig_info *md5sig;
915
916 key = tcp_md5_do_lookup(sk, addr, family);
917 if (key) {
918 /* Pre-existing entry - just update that one. */
919 memcpy(key->key, newkey, newkeylen);
920 key->keylen = newkeylen;
921 return 0;
922 }
923
924 md5sig = rcu_dereference_protected(tp->md5sig_info,
925 sock_owned_by_user(sk));
926 if (!md5sig) {
927 md5sig = kmalloc(sizeof(*md5sig), gfp);
928 if (!md5sig)
929 return -ENOMEM;
930
931 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
932 INIT_HLIST_HEAD(&md5sig->head);
933 rcu_assign_pointer(tp->md5sig_info, md5sig);
934 }
935
936 key = sock_kmalloc(sk, sizeof(*key), gfp);
937 if (!key)
938 return -ENOMEM;
939 if (!tcp_alloc_md5sig_pool()) {
940 sock_kfree_s(sk, key, sizeof(*key));
941 return -ENOMEM;
942 }
943
944 memcpy(key->key, newkey, newkeylen);
945 key->keylen = newkeylen;
946 key->family = family;
947 memcpy(&key->addr, addr,
948 (family == AF_INET6) ? sizeof(struct in6_addr) :
949 sizeof(struct in_addr));
950 hlist_add_head_rcu(&key->node, &md5sig->head);
951 return 0;
952 }
953 EXPORT_SYMBOL(tcp_md5_do_add);
954
955 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
956 {
957 struct tcp_md5sig_key *key;
958
959 key = tcp_md5_do_lookup(sk, addr, family);
960 if (!key)
961 return -ENOENT;
962 hlist_del_rcu(&key->node);
963 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
964 kfree_rcu(key, rcu);
965 return 0;
966 }
967 EXPORT_SYMBOL(tcp_md5_do_del);
968
969 static void tcp_clear_md5_list(struct sock *sk)
970 {
971 struct tcp_sock *tp = tcp_sk(sk);
972 struct tcp_md5sig_key *key;
973 struct hlist_node *n;
974 struct tcp_md5sig_info *md5sig;
975
976 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977
978 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
979 hlist_del_rcu(&key->node);
980 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
981 kfree_rcu(key, rcu);
982 }
983 }
984
985 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
986 int optlen)
987 {
988 struct tcp_md5sig cmd;
989 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
990
991 if (optlen < sizeof(cmd))
992 return -EINVAL;
993
994 if (copy_from_user(&cmd, optval, sizeof(cmd)))
995 return -EFAULT;
996
997 if (sin->sin_family != AF_INET)
998 return -EINVAL;
999
1000 if (!cmd.tcpm_keylen)
1001 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1002 AF_INET);
1003
1004 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1005 return -EINVAL;
1006
1007 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1009 GFP_KERNEL);
1010 }
1011
1012 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1013 __be32 daddr, __be32 saddr, int nbytes)
1014 {
1015 struct tcp4_pseudohdr *bp;
1016 struct scatterlist sg;
1017
1018 bp = &hp->md5_blk.ip4;
1019
1020 /*
1021 * 1. the TCP pseudo-header (in the order: source IP address,
1022 * destination IP address, zero-padded protocol number, and
1023 * segment length)
1024 */
1025 bp->saddr = saddr;
1026 bp->daddr = daddr;
1027 bp->pad = 0;
1028 bp->protocol = IPPROTO_TCP;
1029 bp->len = cpu_to_be16(nbytes);
1030
1031 sg_init_one(&sg, bp, sizeof(*bp));
1032 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1033 }
1034
1035 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1036 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1037 {
1038 struct tcp_md5sig_pool *hp;
1039 struct hash_desc *desc;
1040
1041 hp = tcp_get_md5sig_pool();
1042 if (!hp)
1043 goto clear_hash_noput;
1044 desc = &hp->md5_desc;
1045
1046 if (crypto_hash_init(desc))
1047 goto clear_hash;
1048 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049 goto clear_hash;
1050 if (tcp_md5_hash_header(hp, th))
1051 goto clear_hash;
1052 if (tcp_md5_hash_key(hp, key))
1053 goto clear_hash;
1054 if (crypto_hash_final(desc, md5_hash))
1055 goto clear_hash;
1056
1057 tcp_put_md5sig_pool();
1058 return 0;
1059
1060 clear_hash:
1061 tcp_put_md5sig_pool();
1062 clear_hash_noput:
1063 memset(md5_hash, 0, 16);
1064 return 1;
1065 }
1066
1067 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1068 const struct sock *sk,
1069 const struct sk_buff *skb)
1070 {
1071 struct tcp_md5sig_pool *hp;
1072 struct hash_desc *desc;
1073 const struct tcphdr *th = tcp_hdr(skb);
1074 __be32 saddr, daddr;
1075
1076 if (sk) { /* valid for establish/request sockets */
1077 saddr = sk->sk_rcv_saddr;
1078 daddr = sk->sk_daddr;
1079 } else {
1080 const struct iphdr *iph = ip_hdr(skb);
1081 saddr = iph->saddr;
1082 daddr = iph->daddr;
1083 }
1084
1085 hp = tcp_get_md5sig_pool();
1086 if (!hp)
1087 goto clear_hash_noput;
1088 desc = &hp->md5_desc;
1089
1090 if (crypto_hash_init(desc))
1091 goto clear_hash;
1092
1093 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094 goto clear_hash;
1095 if (tcp_md5_hash_header(hp, th))
1096 goto clear_hash;
1097 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098 goto clear_hash;
1099 if (tcp_md5_hash_key(hp, key))
1100 goto clear_hash;
1101 if (crypto_hash_final(desc, md5_hash))
1102 goto clear_hash;
1103
1104 tcp_put_md5sig_pool();
1105 return 0;
1106
1107 clear_hash:
1108 tcp_put_md5sig_pool();
1109 clear_hash_noput:
1110 memset(md5_hash, 0, 16);
1111 return 1;
1112 }
1113 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1114
1115 /* Called with rcu_read_lock() */
1116 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1117 const struct sk_buff *skb)
1118 {
1119 /*
1120 * This gets called for each TCP segment that arrives
1121 * so we want to be efficient.
1122 * We have 3 drop cases:
1123 * o No MD5 hash and one expected.
1124 * o MD5 hash and we're not expecting one.
1125 * o MD5 hash and its wrong.
1126 */
1127 const __u8 *hash_location = NULL;
1128 struct tcp_md5sig_key *hash_expected;
1129 const struct iphdr *iph = ip_hdr(skb);
1130 const struct tcphdr *th = tcp_hdr(skb);
1131 int genhash;
1132 unsigned char newhash[16];
1133
1134 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1135 AF_INET);
1136 hash_location = tcp_parse_md5sig_option(th);
1137
1138 /* We've parsed the options - do we have a hash? */
1139 if (!hash_expected && !hash_location)
1140 return false;
1141
1142 if (hash_expected && !hash_location) {
1143 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1144 return true;
1145 }
1146
1147 if (!hash_expected && hash_location) {
1148 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1149 return true;
1150 }
1151
1152 /* Okay, so this is hash_expected and hash_location -
1153 * so we need to calculate the checksum.
1154 */
1155 genhash = tcp_v4_md5_hash_skb(newhash,
1156 hash_expected,
1157 NULL, skb);
1158
1159 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1160 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1161 &iph->saddr, ntohs(th->source),
1162 &iph->daddr, ntohs(th->dest),
1163 genhash ? " tcp_v4_calc_md5_hash failed"
1164 : "");
1165 return true;
1166 }
1167 return false;
1168 }
1169 #endif
1170
1171 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1172 struct sk_buff *skb)
1173 {
1174 struct inet_request_sock *ireq = inet_rsk(req);
1175
1176 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1177 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1178 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1179 ireq->opt = tcp_v4_save_options(skb);
1180 }
1181
1182 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1183 const struct request_sock *req,
1184 bool *strict)
1185 {
1186 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1187
1188 if (strict) {
1189 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1190 *strict = true;
1191 else
1192 *strict = false;
1193 }
1194
1195 return dst;
1196 }
1197
1198 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1199 .family = PF_INET,
1200 .obj_size = sizeof(struct tcp_request_sock),
1201 .rtx_syn_ack = tcp_rtx_synack,
1202 .send_ack = tcp_v4_reqsk_send_ack,
1203 .destructor = tcp_v4_reqsk_destructor,
1204 .send_reset = tcp_v4_send_reset,
1205 .syn_ack_timeout = tcp_syn_ack_timeout,
1206 };
1207
1208 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1209 .mss_clamp = TCP_MSS_DEFAULT,
1210 #ifdef CONFIG_TCP_MD5SIG
1211 .req_md5_lookup = tcp_v4_md5_lookup,
1212 .calc_md5_hash = tcp_v4_md5_hash_skb,
1213 #endif
1214 .init_req = tcp_v4_init_req,
1215 #ifdef CONFIG_SYN_COOKIES
1216 .cookie_init_seq = cookie_v4_init_sequence,
1217 #endif
1218 .route_req = tcp_v4_route_req,
1219 .init_seq = tcp_v4_init_sequence,
1220 .send_synack = tcp_v4_send_synack,
1221 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1222 };
1223
1224 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1225 {
1226 /* Never answer to SYNs send to broadcast or multicast */
1227 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1228 goto drop;
1229
1230 return tcp_conn_request(&tcp_request_sock_ops,
1231 &tcp_request_sock_ipv4_ops, sk, skb);
1232
1233 drop:
1234 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1235 return 0;
1236 }
1237 EXPORT_SYMBOL(tcp_v4_conn_request);
1238
1239
1240 /*
1241 * The three way handshake has completed - we got a valid synack -
1242 * now create the new socket.
1243 */
1244 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1245 struct request_sock *req,
1246 struct dst_entry *dst)
1247 {
1248 struct inet_request_sock *ireq;
1249 struct inet_sock *newinet;
1250 struct tcp_sock *newtp;
1251 struct sock *newsk;
1252 #ifdef CONFIG_TCP_MD5SIG
1253 struct tcp_md5sig_key *key;
1254 #endif
1255 struct ip_options_rcu *inet_opt;
1256
1257 if (sk_acceptq_is_full(sk))
1258 goto exit_overflow;
1259
1260 newsk = tcp_create_openreq_child(sk, req, skb);
1261 if (!newsk)
1262 goto exit_nonewsk;
1263
1264 newsk->sk_gso_type = SKB_GSO_TCPV4;
1265 inet_sk_rx_dst_set(newsk, skb);
1266
1267 newtp = tcp_sk(newsk);
1268 newinet = inet_sk(newsk);
1269 ireq = inet_rsk(req);
1270 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1271 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1272 newinet->inet_saddr = ireq->ir_loc_addr;
1273 inet_opt = ireq->opt;
1274 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1275 ireq->opt = NULL;
1276 newinet->mc_index = inet_iif(skb);
1277 newinet->mc_ttl = ip_hdr(skb)->ttl;
1278 newinet->rcv_tos = ip_hdr(skb)->tos;
1279 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1280 if (inet_opt)
1281 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1282 newinet->inet_id = newtp->write_seq ^ jiffies;
1283
1284 if (!dst) {
1285 dst = inet_csk_route_child_sock(sk, newsk, req);
1286 if (!dst)
1287 goto put_and_exit;
1288 } else {
1289 /* syncookie case : see end of cookie_v4_check() */
1290 }
1291 sk_setup_caps(newsk, dst);
1292
1293 tcp_ca_openreq_child(newsk, dst);
1294
1295 tcp_sync_mss(newsk, dst_mtu(dst));
1296 newtp->advmss = dst_metric_advmss(dst);
1297 if (tcp_sk(sk)->rx_opt.user_mss &&
1298 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1299 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1300
1301 tcp_initialize_rcv_mss(newsk);
1302
1303 #ifdef CONFIG_TCP_MD5SIG
1304 /* Copy over the MD5 key from the original socket */
1305 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1306 AF_INET);
1307 if (key) {
1308 /*
1309 * We're using one, so create a matching key
1310 * on the newsk structure. If we fail to get
1311 * memory, then we end up not copying the key
1312 * across. Shucks.
1313 */
1314 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1315 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1316 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1317 }
1318 #endif
1319
1320 if (__inet_inherit_port(sk, newsk) < 0)
1321 goto put_and_exit;
1322 __inet_hash_nolisten(newsk, NULL);
1323
1324 return newsk;
1325
1326 exit_overflow:
1327 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1328 exit_nonewsk:
1329 dst_release(dst);
1330 exit:
1331 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1332 return NULL;
1333 put_and_exit:
1334 inet_csk_prepare_forced_close(newsk);
1335 tcp_done(newsk);
1336 goto exit;
1337 }
1338 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1339
1340 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1341 {
1342 const struct tcphdr *th = tcp_hdr(skb);
1343 const struct iphdr *iph = ip_hdr(skb);
1344 struct request_sock *req;
1345 struct sock *nsk;
1346
1347 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1348 if (req) {
1349 nsk = tcp_check_req(sk, skb, req, false);
1350 if (!nsk || nsk == sk)
1351 reqsk_put(req);
1352 return nsk;
1353 }
1354
1355 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1356 th->source, iph->daddr, th->dest, inet_iif(skb));
1357
1358 if (nsk) {
1359 if (nsk->sk_state != TCP_TIME_WAIT) {
1360 bh_lock_sock(nsk);
1361 return nsk;
1362 }
1363 inet_twsk_put(inet_twsk(nsk));
1364 return NULL;
1365 }
1366
1367 #ifdef CONFIG_SYN_COOKIES
1368 if (!th->syn)
1369 sk = cookie_v4_check(sk, skb);
1370 #endif
1371 return sk;
1372 }
1373
1374 /* The socket must have it's spinlock held when we get
1375 * here.
1376 *
1377 * We have a potential double-lock case here, so even when
1378 * doing backlog processing we use the BH locking scheme.
1379 * This is because we cannot sleep with the original spinlock
1380 * held.
1381 */
1382 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1383 {
1384 struct sock *rsk;
1385
1386 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1387 struct dst_entry *dst = sk->sk_rx_dst;
1388
1389 sock_rps_save_rxhash(sk, skb);
1390 sk_mark_napi_id(sk, skb);
1391 if (dst) {
1392 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1393 !dst->ops->check(dst, 0)) {
1394 dst_release(dst);
1395 sk->sk_rx_dst = NULL;
1396 }
1397 }
1398 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1399 return 0;
1400 }
1401
1402 if (tcp_checksum_complete(skb))
1403 goto csum_err;
1404
1405 if (sk->sk_state == TCP_LISTEN) {
1406 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1407 if (!nsk)
1408 goto discard;
1409
1410 if (nsk != sk) {
1411 sock_rps_save_rxhash(nsk, skb);
1412 sk_mark_napi_id(sk, skb);
1413 if (tcp_child_process(sk, nsk, skb)) {
1414 rsk = nsk;
1415 goto reset;
1416 }
1417 return 0;
1418 }
1419 } else
1420 sock_rps_save_rxhash(sk, skb);
1421
1422 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1423 rsk = sk;
1424 goto reset;
1425 }
1426 return 0;
1427
1428 reset:
1429 tcp_v4_send_reset(rsk, skb);
1430 discard:
1431 kfree_skb(skb);
1432 /* Be careful here. If this function gets more complicated and
1433 * gcc suffers from register pressure on the x86, sk (in %ebx)
1434 * might be destroyed here. This current version compiles correctly,
1435 * but you have been warned.
1436 */
1437 return 0;
1438
1439 csum_err:
1440 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1441 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1442 goto discard;
1443 }
1444 EXPORT_SYMBOL(tcp_v4_do_rcv);
1445
1446 void tcp_v4_early_demux(struct sk_buff *skb)
1447 {
1448 const struct iphdr *iph;
1449 const struct tcphdr *th;
1450 struct sock *sk;
1451
1452 if (skb->pkt_type != PACKET_HOST)
1453 return;
1454
1455 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1456 return;
1457
1458 iph = ip_hdr(skb);
1459 th = tcp_hdr(skb);
1460
1461 if (th->doff < sizeof(struct tcphdr) / 4)
1462 return;
1463
1464 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1465 iph->saddr, th->source,
1466 iph->daddr, ntohs(th->dest),
1467 skb->skb_iif);
1468 if (sk) {
1469 skb->sk = sk;
1470 skb->destructor = sock_edemux;
1471 if (sk_fullsock(sk)) {
1472 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1473
1474 if (dst)
1475 dst = dst_check(dst, 0);
1476 if (dst &&
1477 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1478 skb_dst_set_noref(skb, dst);
1479 }
1480 }
1481 }
1482
1483 /* Packet is added to VJ-style prequeue for processing in process
1484 * context, if a reader task is waiting. Apparently, this exciting
1485 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1486 * failed somewhere. Latency? Burstiness? Well, at least now we will
1487 * see, why it failed. 8)8) --ANK
1488 *
1489 */
1490 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1491 {
1492 struct tcp_sock *tp = tcp_sk(sk);
1493
1494 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1495 return false;
1496
1497 if (skb->len <= tcp_hdrlen(skb) &&
1498 skb_queue_len(&tp->ucopy.prequeue) == 0)
1499 return false;
1500
1501 /* Before escaping RCU protected region, we need to take care of skb
1502 * dst. Prequeue is only enabled for established sockets.
1503 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1504 * Instead of doing full sk_rx_dst validity here, let's perform
1505 * an optimistic check.
1506 */
1507 if (likely(sk->sk_rx_dst))
1508 skb_dst_drop(skb);
1509 else
1510 skb_dst_force(skb);
1511
1512 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1513 tp->ucopy.memory += skb->truesize;
1514 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1515 struct sk_buff *skb1;
1516
1517 BUG_ON(sock_owned_by_user(sk));
1518
1519 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1520 sk_backlog_rcv(sk, skb1);
1521 NET_INC_STATS_BH(sock_net(sk),
1522 LINUX_MIB_TCPPREQUEUEDROPPED);
1523 }
1524
1525 tp->ucopy.memory = 0;
1526 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1527 wake_up_interruptible_sync_poll(sk_sleep(sk),
1528 POLLIN | POLLRDNORM | POLLRDBAND);
1529 if (!inet_csk_ack_scheduled(sk))
1530 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1531 (3 * tcp_rto_min(sk)) / 4,
1532 TCP_RTO_MAX);
1533 }
1534 return true;
1535 }
1536 EXPORT_SYMBOL(tcp_prequeue);
1537
1538 /*
1539 * From tcp_input.c
1540 */
1541
1542 int tcp_v4_rcv(struct sk_buff *skb)
1543 {
1544 const struct iphdr *iph;
1545 const struct tcphdr *th;
1546 struct sock *sk;
1547 int ret;
1548 struct net *net = dev_net(skb->dev);
1549
1550 if (skb->pkt_type != PACKET_HOST)
1551 goto discard_it;
1552
1553 /* Count it even if it's bad */
1554 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1555
1556 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1557 goto discard_it;
1558
1559 th = tcp_hdr(skb);
1560
1561 if (th->doff < sizeof(struct tcphdr) / 4)
1562 goto bad_packet;
1563 if (!pskb_may_pull(skb, th->doff * 4))
1564 goto discard_it;
1565
1566 /* An explanation is required here, I think.
1567 * Packet length and doff are validated by header prediction,
1568 * provided case of th->doff==0 is eliminated.
1569 * So, we defer the checks. */
1570
1571 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1572 goto csum_error;
1573
1574 th = tcp_hdr(skb);
1575 iph = ip_hdr(skb);
1576 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1577 * barrier() makes sure compiler wont play fool^Waliasing games.
1578 */
1579 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1580 sizeof(struct inet_skb_parm));
1581 barrier();
1582
1583 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1584 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1585 skb->len - th->doff * 4);
1586 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1587 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1588 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1589 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1590 TCP_SKB_CB(skb)->sacked = 0;
1591
1592 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1593 if (!sk)
1594 goto no_tcp_socket;
1595
1596 process:
1597 if (sk->sk_state == TCP_TIME_WAIT)
1598 goto do_time_wait;
1599
1600 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1601 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1602 goto discard_and_relse;
1603 }
1604
1605 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1606 goto discard_and_relse;
1607
1608 #ifdef CONFIG_TCP_MD5SIG
1609 /*
1610 * We really want to reject the packet as early as possible
1611 * if:
1612 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1613 * o There is an MD5 option and we're not expecting one
1614 */
1615 if (tcp_v4_inbound_md5_hash(sk, skb))
1616 goto discard_and_relse;
1617 #endif
1618
1619 nf_reset(skb);
1620
1621 if (sk_filter(sk, skb))
1622 goto discard_and_relse;
1623
1624 sk_incoming_cpu_update(sk);
1625 skb->dev = NULL;
1626
1627 bh_lock_sock_nested(sk);
1628 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1629 ret = 0;
1630 if (!sock_owned_by_user(sk)) {
1631 if (!tcp_prequeue(sk, skb))
1632 ret = tcp_v4_do_rcv(sk, skb);
1633 } else if (unlikely(sk_add_backlog(sk, skb,
1634 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1635 bh_unlock_sock(sk);
1636 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1637 goto discard_and_relse;
1638 }
1639 bh_unlock_sock(sk);
1640
1641 sock_put(sk);
1642
1643 return ret;
1644
1645 no_tcp_socket:
1646 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1647 goto discard_it;
1648
1649 if (tcp_checksum_complete(skb)) {
1650 csum_error:
1651 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1652 bad_packet:
1653 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1654 } else {
1655 tcp_v4_send_reset(NULL, skb);
1656 }
1657
1658 discard_it:
1659 /* Discard frame. */
1660 kfree_skb(skb);
1661 return 0;
1662
1663 discard_and_relse:
1664 sock_put(sk);
1665 goto discard_it;
1666
1667 do_time_wait:
1668 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1669 inet_twsk_put(inet_twsk(sk));
1670 goto discard_it;
1671 }
1672
1673 if (tcp_checksum_complete(skb)) {
1674 inet_twsk_put(inet_twsk(sk));
1675 goto csum_error;
1676 }
1677 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1678 case TCP_TW_SYN: {
1679 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1680 &tcp_hashinfo,
1681 iph->saddr, th->source,
1682 iph->daddr, th->dest,
1683 inet_iif(skb));
1684 if (sk2) {
1685 inet_twsk_deschedule_put(inet_twsk(sk));
1686 sk = sk2;
1687 goto process;
1688 }
1689 /* Fall through to ACK */
1690 }
1691 case TCP_TW_ACK:
1692 tcp_v4_timewait_ack(sk, skb);
1693 break;
1694 case TCP_TW_RST:
1695 goto no_tcp_socket;
1696 case TCP_TW_SUCCESS:;
1697 }
1698 goto discard_it;
1699 }
1700
1701 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1702 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1703 .twsk_unique = tcp_twsk_unique,
1704 .twsk_destructor= tcp_twsk_destructor,
1705 };
1706
1707 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1708 {
1709 struct dst_entry *dst = skb_dst(skb);
1710
1711 if (dst) {
1712 dst_hold(dst);
1713 sk->sk_rx_dst = dst;
1714 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1715 }
1716 }
1717 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1718
1719 const struct inet_connection_sock_af_ops ipv4_specific = {
1720 .queue_xmit = ip_queue_xmit,
1721 .send_check = tcp_v4_send_check,
1722 .rebuild_header = inet_sk_rebuild_header,
1723 .sk_rx_dst_set = inet_sk_rx_dst_set,
1724 .conn_request = tcp_v4_conn_request,
1725 .syn_recv_sock = tcp_v4_syn_recv_sock,
1726 .net_header_len = sizeof(struct iphdr),
1727 .setsockopt = ip_setsockopt,
1728 .getsockopt = ip_getsockopt,
1729 .addr2sockaddr = inet_csk_addr2sockaddr,
1730 .sockaddr_len = sizeof(struct sockaddr_in),
1731 .bind_conflict = inet_csk_bind_conflict,
1732 #ifdef CONFIG_COMPAT
1733 .compat_setsockopt = compat_ip_setsockopt,
1734 .compat_getsockopt = compat_ip_getsockopt,
1735 #endif
1736 .mtu_reduced = tcp_v4_mtu_reduced,
1737 };
1738 EXPORT_SYMBOL(ipv4_specific);
1739
1740 #ifdef CONFIG_TCP_MD5SIG
1741 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1742 .md5_lookup = tcp_v4_md5_lookup,
1743 .calc_md5_hash = tcp_v4_md5_hash_skb,
1744 .md5_parse = tcp_v4_parse_md5_keys,
1745 };
1746 #endif
1747
1748 /* NOTE: A lot of things set to zero explicitly by call to
1749 * sk_alloc() so need not be done here.
1750 */
1751 static int tcp_v4_init_sock(struct sock *sk)
1752 {
1753 struct inet_connection_sock *icsk = inet_csk(sk);
1754
1755 tcp_init_sock(sk);
1756
1757 icsk->icsk_af_ops = &ipv4_specific;
1758
1759 #ifdef CONFIG_TCP_MD5SIG
1760 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1761 #endif
1762
1763 return 0;
1764 }
1765
1766 void tcp_v4_destroy_sock(struct sock *sk)
1767 {
1768 struct tcp_sock *tp = tcp_sk(sk);
1769
1770 tcp_clear_xmit_timers(sk);
1771
1772 tcp_cleanup_congestion_control(sk);
1773
1774 /* Cleanup up the write buffer. */
1775 tcp_write_queue_purge(sk);
1776
1777 /* Cleans up our, hopefully empty, out_of_order_queue. */
1778 __skb_queue_purge(&tp->out_of_order_queue);
1779
1780 #ifdef CONFIG_TCP_MD5SIG
1781 /* Clean up the MD5 key list, if any */
1782 if (tp->md5sig_info) {
1783 tcp_clear_md5_list(sk);
1784 kfree_rcu(tp->md5sig_info, rcu);
1785 tp->md5sig_info = NULL;
1786 }
1787 #endif
1788
1789 /* Clean prequeue, it must be empty really */
1790 __skb_queue_purge(&tp->ucopy.prequeue);
1791
1792 /* Clean up a referenced TCP bind bucket. */
1793 if (inet_csk(sk)->icsk_bind_hash)
1794 inet_put_port(sk);
1795
1796 BUG_ON(tp->fastopen_rsk);
1797
1798 /* If socket is aborted during connect operation */
1799 tcp_free_fastopen_req(tp);
1800 tcp_saved_syn_free(tp);
1801
1802 sk_sockets_allocated_dec(sk);
1803 sock_release_memcg(sk);
1804 }
1805 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1806
1807 #ifdef CONFIG_PROC_FS
1808 /* Proc filesystem TCP sock list dumping. */
1809
1810 /*
1811 * Get next listener socket follow cur. If cur is NULL, get first socket
1812 * starting from bucket given in st->bucket; when st->bucket is zero the
1813 * very first socket in the hash table is returned.
1814 */
1815 static void *listening_get_next(struct seq_file *seq, void *cur)
1816 {
1817 struct inet_connection_sock *icsk;
1818 struct hlist_nulls_node *node;
1819 struct sock *sk = cur;
1820 struct inet_listen_hashbucket *ilb;
1821 struct tcp_iter_state *st = seq->private;
1822 struct net *net = seq_file_net(seq);
1823
1824 if (!sk) {
1825 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1826 spin_lock_bh(&ilb->lock);
1827 sk = sk_nulls_head(&ilb->head);
1828 st->offset = 0;
1829 goto get_sk;
1830 }
1831 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1832 ++st->num;
1833 ++st->offset;
1834
1835 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1836 struct request_sock *req = cur;
1837
1838 icsk = inet_csk(st->syn_wait_sk);
1839 req = req->dl_next;
1840 while (1) {
1841 while (req) {
1842 if (req->rsk_ops->family == st->family) {
1843 cur = req;
1844 goto out;
1845 }
1846 req = req->dl_next;
1847 }
1848 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1849 break;
1850 get_req:
1851 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1852 }
1853 sk = sk_nulls_next(st->syn_wait_sk);
1854 st->state = TCP_SEQ_STATE_LISTENING;
1855 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1856 } else {
1857 icsk = inet_csk(sk);
1858 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1859 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1860 goto start_req;
1861 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1862 sk = sk_nulls_next(sk);
1863 }
1864 get_sk:
1865 sk_nulls_for_each_from(sk, node) {
1866 if (!net_eq(sock_net(sk), net))
1867 continue;
1868 if (sk->sk_family == st->family) {
1869 cur = sk;
1870 goto out;
1871 }
1872 icsk = inet_csk(sk);
1873 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1874 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1875 start_req:
1876 st->uid = sock_i_uid(sk);
1877 st->syn_wait_sk = sk;
1878 st->state = TCP_SEQ_STATE_OPENREQ;
1879 st->sbucket = 0;
1880 goto get_req;
1881 }
1882 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1883 }
1884 spin_unlock_bh(&ilb->lock);
1885 st->offset = 0;
1886 if (++st->bucket < INET_LHTABLE_SIZE) {
1887 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1888 spin_lock_bh(&ilb->lock);
1889 sk = sk_nulls_head(&ilb->head);
1890 goto get_sk;
1891 }
1892 cur = NULL;
1893 out:
1894 return cur;
1895 }
1896
1897 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1898 {
1899 struct tcp_iter_state *st = seq->private;
1900 void *rc;
1901
1902 st->bucket = 0;
1903 st->offset = 0;
1904 rc = listening_get_next(seq, NULL);
1905
1906 while (rc && *pos) {
1907 rc = listening_get_next(seq, rc);
1908 --*pos;
1909 }
1910 return rc;
1911 }
1912
1913 static inline bool empty_bucket(const struct tcp_iter_state *st)
1914 {
1915 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1916 }
1917
1918 /*
1919 * Get first established socket starting from bucket given in st->bucket.
1920 * If st->bucket is zero, the very first socket in the hash is returned.
1921 */
1922 static void *established_get_first(struct seq_file *seq)
1923 {
1924 struct tcp_iter_state *st = seq->private;
1925 struct net *net = seq_file_net(seq);
1926 void *rc = NULL;
1927
1928 st->offset = 0;
1929 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1930 struct sock *sk;
1931 struct hlist_nulls_node *node;
1932 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1933
1934 /* Lockless fast path for the common case of empty buckets */
1935 if (empty_bucket(st))
1936 continue;
1937
1938 spin_lock_bh(lock);
1939 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1940 if (sk->sk_family != st->family ||
1941 !net_eq(sock_net(sk), net)) {
1942 continue;
1943 }
1944 rc = sk;
1945 goto out;
1946 }
1947 spin_unlock_bh(lock);
1948 }
1949 out:
1950 return rc;
1951 }
1952
1953 static void *established_get_next(struct seq_file *seq, void *cur)
1954 {
1955 struct sock *sk = cur;
1956 struct hlist_nulls_node *node;
1957 struct tcp_iter_state *st = seq->private;
1958 struct net *net = seq_file_net(seq);
1959
1960 ++st->num;
1961 ++st->offset;
1962
1963 sk = sk_nulls_next(sk);
1964
1965 sk_nulls_for_each_from(sk, node) {
1966 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1967 return sk;
1968 }
1969
1970 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1971 ++st->bucket;
1972 return established_get_first(seq);
1973 }
1974
1975 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1976 {
1977 struct tcp_iter_state *st = seq->private;
1978 void *rc;
1979
1980 st->bucket = 0;
1981 rc = established_get_first(seq);
1982
1983 while (rc && pos) {
1984 rc = established_get_next(seq, rc);
1985 --pos;
1986 }
1987 return rc;
1988 }
1989
1990 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1991 {
1992 void *rc;
1993 struct tcp_iter_state *st = seq->private;
1994
1995 st->state = TCP_SEQ_STATE_LISTENING;
1996 rc = listening_get_idx(seq, &pos);
1997
1998 if (!rc) {
1999 st->state = TCP_SEQ_STATE_ESTABLISHED;
2000 rc = established_get_idx(seq, pos);
2001 }
2002
2003 return rc;
2004 }
2005
2006 static void *tcp_seek_last_pos(struct seq_file *seq)
2007 {
2008 struct tcp_iter_state *st = seq->private;
2009 int offset = st->offset;
2010 int orig_num = st->num;
2011 void *rc = NULL;
2012
2013 switch (st->state) {
2014 case TCP_SEQ_STATE_OPENREQ:
2015 case TCP_SEQ_STATE_LISTENING:
2016 if (st->bucket >= INET_LHTABLE_SIZE)
2017 break;
2018 st->state = TCP_SEQ_STATE_LISTENING;
2019 rc = listening_get_next(seq, NULL);
2020 while (offset-- && rc)
2021 rc = listening_get_next(seq, rc);
2022 if (rc)
2023 break;
2024 st->bucket = 0;
2025 st->state = TCP_SEQ_STATE_ESTABLISHED;
2026 /* Fallthrough */
2027 case TCP_SEQ_STATE_ESTABLISHED:
2028 if (st->bucket > tcp_hashinfo.ehash_mask)
2029 break;
2030 rc = established_get_first(seq);
2031 while (offset-- && rc)
2032 rc = established_get_next(seq, rc);
2033 }
2034
2035 st->num = orig_num;
2036
2037 return rc;
2038 }
2039
2040 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2041 {
2042 struct tcp_iter_state *st = seq->private;
2043 void *rc;
2044
2045 if (*pos && *pos == st->last_pos) {
2046 rc = tcp_seek_last_pos(seq);
2047 if (rc)
2048 goto out;
2049 }
2050
2051 st->state = TCP_SEQ_STATE_LISTENING;
2052 st->num = 0;
2053 st->bucket = 0;
2054 st->offset = 0;
2055 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2056
2057 out:
2058 st->last_pos = *pos;
2059 return rc;
2060 }
2061
2062 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2063 {
2064 struct tcp_iter_state *st = seq->private;
2065 void *rc = NULL;
2066
2067 if (v == SEQ_START_TOKEN) {
2068 rc = tcp_get_idx(seq, 0);
2069 goto out;
2070 }
2071
2072 switch (st->state) {
2073 case TCP_SEQ_STATE_OPENREQ:
2074 case TCP_SEQ_STATE_LISTENING:
2075 rc = listening_get_next(seq, v);
2076 if (!rc) {
2077 st->state = TCP_SEQ_STATE_ESTABLISHED;
2078 st->bucket = 0;
2079 st->offset = 0;
2080 rc = established_get_first(seq);
2081 }
2082 break;
2083 case TCP_SEQ_STATE_ESTABLISHED:
2084 rc = established_get_next(seq, v);
2085 break;
2086 }
2087 out:
2088 ++*pos;
2089 st->last_pos = *pos;
2090 return rc;
2091 }
2092
2093 static void tcp_seq_stop(struct seq_file *seq, void *v)
2094 {
2095 struct tcp_iter_state *st = seq->private;
2096
2097 switch (st->state) {
2098 case TCP_SEQ_STATE_OPENREQ:
2099 if (v) {
2100 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2101 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2102 }
2103 case TCP_SEQ_STATE_LISTENING:
2104 if (v != SEQ_START_TOKEN)
2105 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2106 break;
2107 case TCP_SEQ_STATE_ESTABLISHED:
2108 if (v)
2109 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2110 break;
2111 }
2112 }
2113
2114 int tcp_seq_open(struct inode *inode, struct file *file)
2115 {
2116 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2117 struct tcp_iter_state *s;
2118 int err;
2119
2120 err = seq_open_net(inode, file, &afinfo->seq_ops,
2121 sizeof(struct tcp_iter_state));
2122 if (err < 0)
2123 return err;
2124
2125 s = ((struct seq_file *)file->private_data)->private;
2126 s->family = afinfo->family;
2127 s->last_pos = 0;
2128 return 0;
2129 }
2130 EXPORT_SYMBOL(tcp_seq_open);
2131
2132 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2133 {
2134 int rc = 0;
2135 struct proc_dir_entry *p;
2136
2137 afinfo->seq_ops.start = tcp_seq_start;
2138 afinfo->seq_ops.next = tcp_seq_next;
2139 afinfo->seq_ops.stop = tcp_seq_stop;
2140
2141 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2142 afinfo->seq_fops, afinfo);
2143 if (!p)
2144 rc = -ENOMEM;
2145 return rc;
2146 }
2147 EXPORT_SYMBOL(tcp_proc_register);
2148
2149 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2150 {
2151 remove_proc_entry(afinfo->name, net->proc_net);
2152 }
2153 EXPORT_SYMBOL(tcp_proc_unregister);
2154
2155 static void get_openreq4(const struct request_sock *req,
2156 struct seq_file *f, int i, kuid_t uid)
2157 {
2158 const struct inet_request_sock *ireq = inet_rsk(req);
2159 long delta = req->rsk_timer.expires - jiffies;
2160
2161 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2162 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2163 i,
2164 ireq->ir_loc_addr,
2165 ireq->ir_num,
2166 ireq->ir_rmt_addr,
2167 ntohs(ireq->ir_rmt_port),
2168 TCP_SYN_RECV,
2169 0, 0, /* could print option size, but that is af dependent. */
2170 1, /* timers active (only the expire timer) */
2171 jiffies_delta_to_clock_t(delta),
2172 req->num_timeout,
2173 from_kuid_munged(seq_user_ns(f), uid),
2174 0, /* non standard timer */
2175 0, /* open_requests have no inode */
2176 0,
2177 req);
2178 }
2179
2180 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2181 {
2182 int timer_active;
2183 unsigned long timer_expires;
2184 const struct tcp_sock *tp = tcp_sk(sk);
2185 const struct inet_connection_sock *icsk = inet_csk(sk);
2186 const struct inet_sock *inet = inet_sk(sk);
2187 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2188 __be32 dest = inet->inet_daddr;
2189 __be32 src = inet->inet_rcv_saddr;
2190 __u16 destp = ntohs(inet->inet_dport);
2191 __u16 srcp = ntohs(inet->inet_sport);
2192 int rx_queue;
2193
2194 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2195 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2196 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2197 timer_active = 1;
2198 timer_expires = icsk->icsk_timeout;
2199 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2200 timer_active = 4;
2201 timer_expires = icsk->icsk_timeout;
2202 } else if (timer_pending(&sk->sk_timer)) {
2203 timer_active = 2;
2204 timer_expires = sk->sk_timer.expires;
2205 } else {
2206 timer_active = 0;
2207 timer_expires = jiffies;
2208 }
2209
2210 if (sk->sk_state == TCP_LISTEN)
2211 rx_queue = sk->sk_ack_backlog;
2212 else
2213 /*
2214 * because we dont lock socket, we might find a transient negative value
2215 */
2216 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2217
2218 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2219 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2220 i, src, srcp, dest, destp, sk->sk_state,
2221 tp->write_seq - tp->snd_una,
2222 rx_queue,
2223 timer_active,
2224 jiffies_delta_to_clock_t(timer_expires - jiffies),
2225 icsk->icsk_retransmits,
2226 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2227 icsk->icsk_probes_out,
2228 sock_i_ino(sk),
2229 atomic_read(&sk->sk_refcnt), sk,
2230 jiffies_to_clock_t(icsk->icsk_rto),
2231 jiffies_to_clock_t(icsk->icsk_ack.ato),
2232 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2233 tp->snd_cwnd,
2234 sk->sk_state == TCP_LISTEN ?
2235 (fastopenq ? fastopenq->max_qlen : 0) :
2236 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2237 }
2238
2239 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2240 struct seq_file *f, int i)
2241 {
2242 long delta = tw->tw_timer.expires - jiffies;
2243 __be32 dest, src;
2244 __u16 destp, srcp;
2245
2246 dest = tw->tw_daddr;
2247 src = tw->tw_rcv_saddr;
2248 destp = ntohs(tw->tw_dport);
2249 srcp = ntohs(tw->tw_sport);
2250
2251 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2252 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2253 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2254 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2255 atomic_read(&tw->tw_refcnt), tw);
2256 }
2257
2258 #define TMPSZ 150
2259
2260 static int tcp4_seq_show(struct seq_file *seq, void *v)
2261 {
2262 struct tcp_iter_state *st;
2263 struct sock *sk = v;
2264
2265 seq_setwidth(seq, TMPSZ - 1);
2266 if (v == SEQ_START_TOKEN) {
2267 seq_puts(seq, " sl local_address rem_address st tx_queue "
2268 "rx_queue tr tm->when retrnsmt uid timeout "
2269 "inode");
2270 goto out;
2271 }
2272 st = seq->private;
2273
2274 switch (st->state) {
2275 case TCP_SEQ_STATE_LISTENING:
2276 case TCP_SEQ_STATE_ESTABLISHED:
2277 if (sk->sk_state == TCP_TIME_WAIT)
2278 get_timewait4_sock(v, seq, st->num);
2279 else
2280 get_tcp4_sock(v, seq, st->num);
2281 break;
2282 case TCP_SEQ_STATE_OPENREQ:
2283 get_openreq4(v, seq, st->num, st->uid);
2284 break;
2285 }
2286 out:
2287 seq_pad(seq, '\n');
2288 return 0;
2289 }
2290
2291 static const struct file_operations tcp_afinfo_seq_fops = {
2292 .owner = THIS_MODULE,
2293 .open = tcp_seq_open,
2294 .read = seq_read,
2295 .llseek = seq_lseek,
2296 .release = seq_release_net
2297 };
2298
2299 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2300 .name = "tcp",
2301 .family = AF_INET,
2302 .seq_fops = &tcp_afinfo_seq_fops,
2303 .seq_ops = {
2304 .show = tcp4_seq_show,
2305 },
2306 };
2307
2308 static int __net_init tcp4_proc_init_net(struct net *net)
2309 {
2310 return tcp_proc_register(net, &tcp4_seq_afinfo);
2311 }
2312
2313 static void __net_exit tcp4_proc_exit_net(struct net *net)
2314 {
2315 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2316 }
2317
2318 static struct pernet_operations tcp4_net_ops = {
2319 .init = tcp4_proc_init_net,
2320 .exit = tcp4_proc_exit_net,
2321 };
2322
2323 int __init tcp4_proc_init(void)
2324 {
2325 return register_pernet_subsys(&tcp4_net_ops);
2326 }
2327
2328 void tcp4_proc_exit(void)
2329 {
2330 unregister_pernet_subsys(&tcp4_net_ops);
2331 }
2332 #endif /* CONFIG_PROC_FS */
2333
2334 struct proto tcp_prot = {
2335 .name = "TCP",
2336 .owner = THIS_MODULE,
2337 .close = tcp_close,
2338 .connect = tcp_v4_connect,
2339 .disconnect = tcp_disconnect,
2340 .accept = inet_csk_accept,
2341 .ioctl = tcp_ioctl,
2342 .init = tcp_v4_init_sock,
2343 .destroy = tcp_v4_destroy_sock,
2344 .shutdown = tcp_shutdown,
2345 .setsockopt = tcp_setsockopt,
2346 .getsockopt = tcp_getsockopt,
2347 .recvmsg = tcp_recvmsg,
2348 .sendmsg = tcp_sendmsg,
2349 .sendpage = tcp_sendpage,
2350 .backlog_rcv = tcp_v4_do_rcv,
2351 .release_cb = tcp_release_cb,
2352 .hash = inet_hash,
2353 .unhash = inet_unhash,
2354 .get_port = inet_csk_get_port,
2355 .enter_memory_pressure = tcp_enter_memory_pressure,
2356 .stream_memory_free = tcp_stream_memory_free,
2357 .sockets_allocated = &tcp_sockets_allocated,
2358 .orphan_count = &tcp_orphan_count,
2359 .memory_allocated = &tcp_memory_allocated,
2360 .memory_pressure = &tcp_memory_pressure,
2361 .sysctl_mem = sysctl_tcp_mem,
2362 .sysctl_wmem = sysctl_tcp_wmem,
2363 .sysctl_rmem = sysctl_tcp_rmem,
2364 .max_header = MAX_TCP_HEADER,
2365 .obj_size = sizeof(struct tcp_sock),
2366 .slab_flags = SLAB_DESTROY_BY_RCU,
2367 .twsk_prot = &tcp_timewait_sock_ops,
2368 .rsk_prot = &tcp_request_sock_ops,
2369 .h.hashinfo = &tcp_hashinfo,
2370 .no_autobind = true,
2371 #ifdef CONFIG_COMPAT
2372 .compat_setsockopt = compat_tcp_setsockopt,
2373 .compat_getsockopt = compat_tcp_getsockopt,
2374 #endif
2375 #ifdef CONFIG_MEMCG_KMEM
2376 .init_cgroup = tcp_init_cgroup,
2377 .destroy_cgroup = tcp_destroy_cgroup,
2378 .proto_cgroup = tcp_proto_cgroup,
2379 #endif
2380 };
2381 EXPORT_SYMBOL(tcp_prot);
2382
2383 static void __net_exit tcp_sk_exit(struct net *net)
2384 {
2385 int cpu;
2386
2387 for_each_possible_cpu(cpu)
2388 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2389 free_percpu(net->ipv4.tcp_sk);
2390 }
2391
2392 static int __net_init tcp_sk_init(struct net *net)
2393 {
2394 int res, cpu;
2395
2396 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2397 if (!net->ipv4.tcp_sk)
2398 return -ENOMEM;
2399
2400 for_each_possible_cpu(cpu) {
2401 struct sock *sk;
2402
2403 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2404 IPPROTO_TCP, net);
2405 if (res)
2406 goto fail;
2407 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2408 }
2409
2410 net->ipv4.sysctl_tcp_ecn = 2;
2411 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2412
2413 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2414 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2415 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2416
2417 return 0;
2418 fail:
2419 tcp_sk_exit(net);
2420
2421 return res;
2422 }
2423
2424 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2425 {
2426 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2427 }
2428
2429 static struct pernet_operations __net_initdata tcp_sk_ops = {
2430 .init = tcp_sk_init,
2431 .exit = tcp_sk_exit,
2432 .exit_batch = tcp_sk_exit_batch,
2433 };
2434
2435 void __init tcp_v4_init(void)
2436 {
2437 inet_hashinfo_init(&tcp_hashinfo);
2438 if (register_pernet_subsys(&tcp_sk_ops))
2439 panic("Failed to create the TCP control socket.\n");
2440 }
This page took 0.083122 seconds and 6 git commands to generate.