Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
125 (!twp || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
148 struct flowi4 *fl4;
149 struct rtable *rt;
150 int err;
151 struct ip_options_rcu *inet_opt;
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet_opt->opt.faddr;
166 }
167
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
174 orig_sport, orig_dport, sk);
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 return err;
180 }
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
187 if (!inet_opt || !inet_opt->opt.srr)
188 daddr = fl4->daddr;
189
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
200 }
201
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
208
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
222 if (err)
223 goto failure;
224
225 sk_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
232 goto failure;
233 }
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
237
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
242 usin->sin_port);
243
244 inet->inet_id = tp->write_seq ^ jiffies;
245
246 err = tcp_connect(sk);
247
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254 failure:
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
263 return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
271 */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
277
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
280 return;
281
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
284 */
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
287
288 mtu = dst_mtu(dst);
289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
294
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
299 */
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309 if (dst)
310 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
319
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
322 */
323 WARN_ON(req->sk);
324
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 reqsk_put(req);
328 } else {
329 /*
330 * Still in SYN_RECV, just remove it silently.
331 * There is no good way to pass the error to the newly
332 * created socket, and POSIX does not want network
333 * errors returned from accept().
334 */
335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337 }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
348 *
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
354 *
355 */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 struct inet_connection_sock *icsk;
362 struct tcp_sock *tp;
363 struct inet_sock *inet;
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
366 struct sock *sk;
367 struct sk_buff *skb;
368 struct request_sock *fastopen;
369 __u32 seq, snd_una;
370 __u32 remaining;
371 int err;
372 struct net *net = dev_net(icmp_skb->dev);
373
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
376 inet_iif(icmp_skb));
377 if (!sk) {
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 return;
380 }
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
383 return;
384 }
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
388
389 bh_lock_sock(sk);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
394 */
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 }
399 if (sk->sk_state == TCP_CLOSE)
400 goto out;
401
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 goto out;
405 }
406
407 icsk = inet_csk(sk);
408 tp = tcp_sk(sk);
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 if (sk->sk_state != TCP_LISTEN &&
413 !between(seq, snd_una, tp->snd_nxt)) {
414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 goto out;
416 }
417
418 switch (type) {
419 case ICMP_REDIRECT:
420 do_redirect(icmp_skb, sk);
421 goto out;
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
424 goto out;
425 case ICMP_PARAMETERPROB:
426 err = EPROTO;
427 break;
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
430 goto out;
431
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
436 */
437 if (sk->sk_state == TCP_LISTEN)
438 goto out;
439
440 tp->mtu_info = info;
441 if (!sock_owned_by_user(sk)) {
442 tcp_v4_mtu_reduced(sk);
443 } else {
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 sock_hold(sk);
446 }
447 goto out;
448 }
449
450 err = icmp_err_convert[code].errno;
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 break;
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
456 !icsk->icsk_backoff || fastopen)
457 break;
458
459 if (sock_owned_by_user(sk))
460 break;
461
462 icsk->icsk_backoff--;
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 TCP_TIMEOUT_INIT;
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467 skb = tcp_write_queue_head(sk);
468 BUG_ON(!skb);
469
470 remaining = icsk->icsk_rto -
471 min(icsk->icsk_rto,
472 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474 if (remaining) {
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
477 } else {
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
481 }
482
483 break;
484 case ICMP_TIME_EXCEEDED:
485 err = EHOSTUNREACH;
486 break;
487 default:
488 goto out;
489 }
490
491 switch (sk->sk_state) {
492 case TCP_SYN_SENT:
493 case TCP_SYN_RECV:
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
496 */
497 if (fastopen && !fastopen->sk)
498 break;
499
500 if (!sock_owned_by_user(sk)) {
501 sk->sk_err = err;
502
503 sk->sk_error_report(sk);
504
505 tcp_done(sk);
506 } else {
507 sk->sk_err_soft = err;
508 }
509 goto out;
510 }
511
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
514 *
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
518 *
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 *
524 * Now we are in compliance with RFCs.
525 * --ANK (980905)
526 */
527
528 inet = inet_sk(sk);
529 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_err = err;
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
534 }
535
536 out:
537 bh_unlock_sock(sk);
538 sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 struct tcphdr *th = tcp_hdr(skb);
544
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
549 } else {
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
551 csum_partial(th,
552 th->doff << 2,
553 skb->csum));
554 }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 const struct inet_sock *inet = inet_sk(sk);
561
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567 * This routine will send an RST to the other tcp.
568 *
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * for reset.
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
577 */
578
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581 const struct tcphdr *th = tcp_hdr(skb);
582 struct {
583 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 } rep;
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
593 int genhash;
594 struct sock *sk1 = NULL;
595 #endif
596 struct net *net;
597
598 /* Never send a reset in response to a reset. */
599 if (th->rst)
600 return;
601
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
604 */
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 return;
607
608 /* Swap the send and the receive. */
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
613 rep.th.rst = 1;
614
615 if (th->ack) {
616 rep.th.seq = th->ack_seq;
617 } else {
618 rep.th.ack = 1;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
621 }
622
623 memset(&arg, 0, sizeof(arg));
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
626
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
631 /*
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
637 */
638 sk1 = __inet_lookup_listener(net,
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
643 if (!sk1)
644 return;
645 rcu_read_lock();
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
648 if (!key)
649 goto release_sk1;
650
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 }
659
660 if (key) {
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_NOP << 16) |
663 (TCPOPT_MD5SIG << 8) |
664 TCPOLEN_MD5SIG);
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
668
669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
672 }
673 #endif
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
682 */
683 if (sk)
684 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
691
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 if (sk1) {
698 rcu_read_unlock();
699 sock_put(sk1);
700 }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
706 */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 u32 win, u32 tsval, u32 tsecr, int oif,
710 struct tcp_md5sig_key *key,
711 int reply_flags, u8 tos)
712 {
713 const struct tcphdr *th = tcp_hdr(skb);
714 struct {
715 struct tcphdr th;
716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 ];
721 } rep;
722 struct ip_reply_arg arg;
723 struct net *net = dev_net(skb_dst(skb)->dev);
724
725 memset(&rep.th, 0, sizeof(struct tcphdr));
726 memset(&arg, 0, sizeof(arg));
727
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
730 if (tsecr) {
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
733 TCPOLEN_TIMESTAMP);
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 }
738
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
745 rep.th.ack = 1;
746 rep.th.window = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749 if (key) {
750 int offset = (tsecr) ? 3 : 0;
751
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 (TCPOPT_NOP << 16) |
754 (TCPOPT_MD5SIG << 8) |
755 TCPOLEN_MD5SIG);
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
758
759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
762 }
763 #endif
764 arg.flags = reply_flags;
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 if (oif)
770 arg.bound_dev_if = oif;
771 arg.tos = tos;
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
776
777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 struct inet_timewait_sock *tw = inet_twsk(sk);
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 tcp_time_stamp + tcptw->tw_ts_offset,
788 tcptw->tw_ts_recent,
789 tw->tw_bound_dev_if,
790 tcp_twsk_md5_key(tcptw),
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 tw->tw_tos
793 );
794
795 inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799 struct request_sock *req)
800 {
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 */
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807 tcp_time_stamp,
808 req->ts_recent,
809 0,
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 AF_INET),
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 ip_hdr(skb)->tos);
814 }
815
816 /*
817 * Send a SYN-ACK after having received a SYN.
818 * This still operates on a request_sock only, not on a big
819 * socket.
820 */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822 struct flowi *fl,
823 struct request_sock *req,
824 u16 queue_mapping,
825 struct tcp_fastopen_cookie *foc)
826 {
827 const struct inet_request_sock *ireq = inet_rsk(req);
828 struct flowi4 fl4;
829 int err = -1;
830 struct sk_buff *skb;
831
832 /* First, grab a route. */
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 return -1;
835
836 skb = tcp_make_synack(sk, dst, req, foc);
837
838 if (skb) {
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840
841 skb_set_queue_mapping(skb, queue_mapping);
842 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843 ireq->ir_rmt_addr,
844 ireq->opt);
845 err = net_xmit_eval(err);
846 }
847
848 return err;
849 }
850
851 /*
852 * IPv4 request_sock destructor.
853 */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856 kfree(inet_rsk(req)->opt);
857 }
858
859
860 #ifdef CONFIG_TCP_MD5SIG
861 /*
862 * RFC2385 MD5 checksumming requires a mapping of
863 * IP address->MD5 Key.
864 * We need to maintain these in the sk structure.
865 */
866
867 /* Find the Key structure for an address. */
868 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
869 const union tcp_md5_addr *addr,
870 int family)
871 {
872 const struct tcp_sock *tp = tcp_sk(sk);
873 struct tcp_md5sig_key *key;
874 unsigned int size = sizeof(struct in_addr);
875 const struct tcp_md5sig_info *md5sig;
876
877 /* caller either holds rcu_read_lock() or socket lock */
878 md5sig = rcu_dereference_check(tp->md5sig_info,
879 sock_owned_by_user(sk) ||
880 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
881 if (!md5sig)
882 return NULL;
883 #if IS_ENABLED(CONFIG_IPV6)
884 if (family == AF_INET6)
885 size = sizeof(struct in6_addr);
886 #endif
887 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
888 if (key->family != family)
889 continue;
890 if (!memcmp(&key->addr, addr, size))
891 return key;
892 }
893 return NULL;
894 }
895 EXPORT_SYMBOL(tcp_md5_do_lookup);
896
897 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
898 const struct sock *addr_sk)
899 {
900 const union tcp_md5_addr *addr;
901
902 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
903 return tcp_md5_do_lookup(sk, addr, AF_INET);
904 }
905 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906
907 /* This can be called on a newly created socket, from other files */
908 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910 {
911 /* Add Key to the list */
912 struct tcp_md5sig_key *key;
913 struct tcp_sock *tp = tcp_sk(sk);
914 struct tcp_md5sig_info *md5sig;
915
916 key = tcp_md5_do_lookup(sk, addr, family);
917 if (key) {
918 /* Pre-existing entry - just update that one. */
919 memcpy(key->key, newkey, newkeylen);
920 key->keylen = newkeylen;
921 return 0;
922 }
923
924 md5sig = rcu_dereference_protected(tp->md5sig_info,
925 sock_owned_by_user(sk));
926 if (!md5sig) {
927 md5sig = kmalloc(sizeof(*md5sig), gfp);
928 if (!md5sig)
929 return -ENOMEM;
930
931 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
932 INIT_HLIST_HEAD(&md5sig->head);
933 rcu_assign_pointer(tp->md5sig_info, md5sig);
934 }
935
936 key = sock_kmalloc(sk, sizeof(*key), gfp);
937 if (!key)
938 return -ENOMEM;
939 if (!tcp_alloc_md5sig_pool()) {
940 sock_kfree_s(sk, key, sizeof(*key));
941 return -ENOMEM;
942 }
943
944 memcpy(key->key, newkey, newkeylen);
945 key->keylen = newkeylen;
946 key->family = family;
947 memcpy(&key->addr, addr,
948 (family == AF_INET6) ? sizeof(struct in6_addr) :
949 sizeof(struct in_addr));
950 hlist_add_head_rcu(&key->node, &md5sig->head);
951 return 0;
952 }
953 EXPORT_SYMBOL(tcp_md5_do_add);
954
955 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
956 {
957 struct tcp_md5sig_key *key;
958
959 key = tcp_md5_do_lookup(sk, addr, family);
960 if (!key)
961 return -ENOENT;
962 hlist_del_rcu(&key->node);
963 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
964 kfree_rcu(key, rcu);
965 return 0;
966 }
967 EXPORT_SYMBOL(tcp_md5_do_del);
968
969 static void tcp_clear_md5_list(struct sock *sk)
970 {
971 struct tcp_sock *tp = tcp_sk(sk);
972 struct tcp_md5sig_key *key;
973 struct hlist_node *n;
974 struct tcp_md5sig_info *md5sig;
975
976 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977
978 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
979 hlist_del_rcu(&key->node);
980 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
981 kfree_rcu(key, rcu);
982 }
983 }
984
985 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
986 int optlen)
987 {
988 struct tcp_md5sig cmd;
989 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
990
991 if (optlen < sizeof(cmd))
992 return -EINVAL;
993
994 if (copy_from_user(&cmd, optval, sizeof(cmd)))
995 return -EFAULT;
996
997 if (sin->sin_family != AF_INET)
998 return -EINVAL;
999
1000 if (!cmd.tcpm_keylen)
1001 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1002 AF_INET);
1003
1004 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1005 return -EINVAL;
1006
1007 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1009 GFP_KERNEL);
1010 }
1011
1012 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1013 __be32 daddr, __be32 saddr, int nbytes)
1014 {
1015 struct tcp4_pseudohdr *bp;
1016 struct scatterlist sg;
1017
1018 bp = &hp->md5_blk.ip4;
1019
1020 /*
1021 * 1. the TCP pseudo-header (in the order: source IP address,
1022 * destination IP address, zero-padded protocol number, and
1023 * segment length)
1024 */
1025 bp->saddr = saddr;
1026 bp->daddr = daddr;
1027 bp->pad = 0;
1028 bp->protocol = IPPROTO_TCP;
1029 bp->len = cpu_to_be16(nbytes);
1030
1031 sg_init_one(&sg, bp, sizeof(*bp));
1032 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1033 }
1034
1035 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1036 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1037 {
1038 struct tcp_md5sig_pool *hp;
1039 struct hash_desc *desc;
1040
1041 hp = tcp_get_md5sig_pool();
1042 if (!hp)
1043 goto clear_hash_noput;
1044 desc = &hp->md5_desc;
1045
1046 if (crypto_hash_init(desc))
1047 goto clear_hash;
1048 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049 goto clear_hash;
1050 if (tcp_md5_hash_header(hp, th))
1051 goto clear_hash;
1052 if (tcp_md5_hash_key(hp, key))
1053 goto clear_hash;
1054 if (crypto_hash_final(desc, md5_hash))
1055 goto clear_hash;
1056
1057 tcp_put_md5sig_pool();
1058 return 0;
1059
1060 clear_hash:
1061 tcp_put_md5sig_pool();
1062 clear_hash_noput:
1063 memset(md5_hash, 0, 16);
1064 return 1;
1065 }
1066
1067 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1068 const struct sock *sk,
1069 const struct sk_buff *skb)
1070 {
1071 struct tcp_md5sig_pool *hp;
1072 struct hash_desc *desc;
1073 const struct tcphdr *th = tcp_hdr(skb);
1074 __be32 saddr, daddr;
1075
1076 if (sk) { /* valid for establish/request sockets */
1077 saddr = sk->sk_rcv_saddr;
1078 daddr = sk->sk_daddr;
1079 } else {
1080 const struct iphdr *iph = ip_hdr(skb);
1081 saddr = iph->saddr;
1082 daddr = iph->daddr;
1083 }
1084
1085 hp = tcp_get_md5sig_pool();
1086 if (!hp)
1087 goto clear_hash_noput;
1088 desc = &hp->md5_desc;
1089
1090 if (crypto_hash_init(desc))
1091 goto clear_hash;
1092
1093 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094 goto clear_hash;
1095 if (tcp_md5_hash_header(hp, th))
1096 goto clear_hash;
1097 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098 goto clear_hash;
1099 if (tcp_md5_hash_key(hp, key))
1100 goto clear_hash;
1101 if (crypto_hash_final(desc, md5_hash))
1102 goto clear_hash;
1103
1104 tcp_put_md5sig_pool();
1105 return 0;
1106
1107 clear_hash:
1108 tcp_put_md5sig_pool();
1109 clear_hash_noput:
1110 memset(md5_hash, 0, 16);
1111 return 1;
1112 }
1113 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1114
1115 /* Called with rcu_read_lock() */
1116 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1117 const struct sk_buff *skb)
1118 {
1119 /*
1120 * This gets called for each TCP segment that arrives
1121 * so we want to be efficient.
1122 * We have 3 drop cases:
1123 * o No MD5 hash and one expected.
1124 * o MD5 hash and we're not expecting one.
1125 * o MD5 hash and its wrong.
1126 */
1127 const __u8 *hash_location = NULL;
1128 struct tcp_md5sig_key *hash_expected;
1129 const struct iphdr *iph = ip_hdr(skb);
1130 const struct tcphdr *th = tcp_hdr(skb);
1131 int genhash;
1132 unsigned char newhash[16];
1133
1134 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1135 AF_INET);
1136 hash_location = tcp_parse_md5sig_option(th);
1137
1138 /* We've parsed the options - do we have a hash? */
1139 if (!hash_expected && !hash_location)
1140 return false;
1141
1142 if (hash_expected && !hash_location) {
1143 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1144 return true;
1145 }
1146
1147 if (!hash_expected && hash_location) {
1148 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1149 return true;
1150 }
1151
1152 /* Okay, so this is hash_expected and hash_location -
1153 * so we need to calculate the checksum.
1154 */
1155 genhash = tcp_v4_md5_hash_skb(newhash,
1156 hash_expected,
1157 NULL, skb);
1158
1159 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1160 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1161 &iph->saddr, ntohs(th->source),
1162 &iph->daddr, ntohs(th->dest),
1163 genhash ? " tcp_v4_calc_md5_hash failed"
1164 : "");
1165 return true;
1166 }
1167 return false;
1168 }
1169 #endif
1170
1171 static void tcp_v4_init_req(struct request_sock *req,
1172 const struct sock *sk_listener,
1173 struct sk_buff *skb)
1174 {
1175 struct inet_request_sock *ireq = inet_rsk(req);
1176
1177 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1178 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1179 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1180 ireq->opt = tcp_v4_save_options(skb);
1181 }
1182
1183 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1184 struct flowi *fl,
1185 const struct request_sock *req,
1186 bool *strict)
1187 {
1188 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1189
1190 if (strict) {
1191 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1192 *strict = true;
1193 else
1194 *strict = false;
1195 }
1196
1197 return dst;
1198 }
1199
1200 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1201 .family = PF_INET,
1202 .obj_size = sizeof(struct tcp_request_sock),
1203 .rtx_syn_ack = tcp_rtx_synack,
1204 .send_ack = tcp_v4_reqsk_send_ack,
1205 .destructor = tcp_v4_reqsk_destructor,
1206 .send_reset = tcp_v4_send_reset,
1207 .syn_ack_timeout = tcp_syn_ack_timeout,
1208 };
1209
1210 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1211 .mss_clamp = TCP_MSS_DEFAULT,
1212 #ifdef CONFIG_TCP_MD5SIG
1213 .req_md5_lookup = tcp_v4_md5_lookup,
1214 .calc_md5_hash = tcp_v4_md5_hash_skb,
1215 #endif
1216 .init_req = tcp_v4_init_req,
1217 #ifdef CONFIG_SYN_COOKIES
1218 .cookie_init_seq = cookie_v4_init_sequence,
1219 #endif
1220 .route_req = tcp_v4_route_req,
1221 .init_seq = tcp_v4_init_sequence,
1222 .send_synack = tcp_v4_send_synack,
1223 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1224 };
1225
1226 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1227 {
1228 /* Never answer to SYNs send to broadcast or multicast */
1229 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1230 goto drop;
1231
1232 return tcp_conn_request(&tcp_request_sock_ops,
1233 &tcp_request_sock_ipv4_ops, sk, skb);
1234
1235 drop:
1236 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1237 return 0;
1238 }
1239 EXPORT_SYMBOL(tcp_v4_conn_request);
1240
1241
1242 /*
1243 * The three way handshake has completed - we got a valid synack -
1244 * now create the new socket.
1245 */
1246 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1247 struct request_sock *req,
1248 struct dst_entry *dst)
1249 {
1250 struct inet_request_sock *ireq;
1251 struct inet_sock *newinet;
1252 struct tcp_sock *newtp;
1253 struct sock *newsk;
1254 #ifdef CONFIG_TCP_MD5SIG
1255 struct tcp_md5sig_key *key;
1256 #endif
1257 struct ip_options_rcu *inet_opt;
1258
1259 if (sk_acceptq_is_full(sk))
1260 goto exit_overflow;
1261
1262 newsk = tcp_create_openreq_child(sk, req, skb);
1263 if (!newsk)
1264 goto exit_nonewsk;
1265
1266 newsk->sk_gso_type = SKB_GSO_TCPV4;
1267 inet_sk_rx_dst_set(newsk, skb);
1268
1269 newtp = tcp_sk(newsk);
1270 newinet = inet_sk(newsk);
1271 ireq = inet_rsk(req);
1272 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1273 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1274 newinet->inet_saddr = ireq->ir_loc_addr;
1275 inet_opt = ireq->opt;
1276 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1277 ireq->opt = NULL;
1278 newinet->mc_index = inet_iif(skb);
1279 newinet->mc_ttl = ip_hdr(skb)->ttl;
1280 newinet->rcv_tos = ip_hdr(skb)->tos;
1281 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1282 if (inet_opt)
1283 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1284 newinet->inet_id = newtp->write_seq ^ jiffies;
1285
1286 if (!dst) {
1287 dst = inet_csk_route_child_sock(sk, newsk, req);
1288 if (!dst)
1289 goto put_and_exit;
1290 } else {
1291 /* syncookie case : see end of cookie_v4_check() */
1292 }
1293 sk_setup_caps(newsk, dst);
1294
1295 tcp_ca_openreq_child(newsk, dst);
1296
1297 tcp_sync_mss(newsk, dst_mtu(dst));
1298 newtp->advmss = dst_metric_advmss(dst);
1299 if (tcp_sk(sk)->rx_opt.user_mss &&
1300 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1301 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1302
1303 tcp_initialize_rcv_mss(newsk);
1304
1305 #ifdef CONFIG_TCP_MD5SIG
1306 /* Copy over the MD5 key from the original socket */
1307 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1308 AF_INET);
1309 if (key) {
1310 /*
1311 * We're using one, so create a matching key
1312 * on the newsk structure. If we fail to get
1313 * memory, then we end up not copying the key
1314 * across. Shucks.
1315 */
1316 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1317 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1318 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1319 }
1320 #endif
1321
1322 if (__inet_inherit_port(sk, newsk) < 0)
1323 goto put_and_exit;
1324 __inet_hash_nolisten(newsk, NULL);
1325
1326 return newsk;
1327
1328 exit_overflow:
1329 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1330 exit_nonewsk:
1331 dst_release(dst);
1332 exit:
1333 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1334 return NULL;
1335 put_and_exit:
1336 inet_csk_prepare_forced_close(newsk);
1337 tcp_done(newsk);
1338 goto exit;
1339 }
1340 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1341
1342 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1343 {
1344 const struct tcphdr *th = tcp_hdr(skb);
1345 const struct iphdr *iph = ip_hdr(skb);
1346 struct request_sock *req;
1347 struct sock *nsk;
1348
1349 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1350 if (req) {
1351 nsk = tcp_check_req(sk, skb, req, false);
1352 if (!nsk || nsk == sk)
1353 reqsk_put(req);
1354 return nsk;
1355 }
1356
1357 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1358 th->source, iph->daddr, th->dest, inet_iif(skb));
1359
1360 if (nsk) {
1361 if (nsk->sk_state != TCP_TIME_WAIT) {
1362 bh_lock_sock(nsk);
1363 return nsk;
1364 }
1365 inet_twsk_put(inet_twsk(nsk));
1366 return NULL;
1367 }
1368
1369 #ifdef CONFIG_SYN_COOKIES
1370 if (!th->syn)
1371 sk = cookie_v4_check(sk, skb);
1372 #endif
1373 return sk;
1374 }
1375
1376 /* The socket must have it's spinlock held when we get
1377 * here.
1378 *
1379 * We have a potential double-lock case here, so even when
1380 * doing backlog processing we use the BH locking scheme.
1381 * This is because we cannot sleep with the original spinlock
1382 * held.
1383 */
1384 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1385 {
1386 struct sock *rsk;
1387
1388 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1389 struct dst_entry *dst = sk->sk_rx_dst;
1390
1391 sock_rps_save_rxhash(sk, skb);
1392 sk_mark_napi_id(sk, skb);
1393 if (dst) {
1394 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1395 !dst->ops->check(dst, 0)) {
1396 dst_release(dst);
1397 sk->sk_rx_dst = NULL;
1398 }
1399 }
1400 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1401 return 0;
1402 }
1403
1404 if (tcp_checksum_complete(skb))
1405 goto csum_err;
1406
1407 if (sk->sk_state == TCP_LISTEN) {
1408 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1409 if (!nsk)
1410 goto discard;
1411
1412 if (nsk != sk) {
1413 sock_rps_save_rxhash(nsk, skb);
1414 sk_mark_napi_id(sk, skb);
1415 if (tcp_child_process(sk, nsk, skb)) {
1416 rsk = nsk;
1417 goto reset;
1418 }
1419 return 0;
1420 }
1421 } else
1422 sock_rps_save_rxhash(sk, skb);
1423
1424 if (tcp_rcv_state_process(sk, skb)) {
1425 rsk = sk;
1426 goto reset;
1427 }
1428 return 0;
1429
1430 reset:
1431 tcp_v4_send_reset(rsk, skb);
1432 discard:
1433 kfree_skb(skb);
1434 /* Be careful here. If this function gets more complicated and
1435 * gcc suffers from register pressure on the x86, sk (in %ebx)
1436 * might be destroyed here. This current version compiles correctly,
1437 * but you have been warned.
1438 */
1439 return 0;
1440
1441 csum_err:
1442 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1443 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1444 goto discard;
1445 }
1446 EXPORT_SYMBOL(tcp_v4_do_rcv);
1447
1448 void tcp_v4_early_demux(struct sk_buff *skb)
1449 {
1450 const struct iphdr *iph;
1451 const struct tcphdr *th;
1452 struct sock *sk;
1453
1454 if (skb->pkt_type != PACKET_HOST)
1455 return;
1456
1457 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1458 return;
1459
1460 iph = ip_hdr(skb);
1461 th = tcp_hdr(skb);
1462
1463 if (th->doff < sizeof(struct tcphdr) / 4)
1464 return;
1465
1466 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1467 iph->saddr, th->source,
1468 iph->daddr, ntohs(th->dest),
1469 skb->skb_iif);
1470 if (sk) {
1471 skb->sk = sk;
1472 skb->destructor = sock_edemux;
1473 if (sk_fullsock(sk)) {
1474 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1475
1476 if (dst)
1477 dst = dst_check(dst, 0);
1478 if (dst &&
1479 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1480 skb_dst_set_noref(skb, dst);
1481 }
1482 }
1483 }
1484
1485 /* Packet is added to VJ-style prequeue for processing in process
1486 * context, if a reader task is waiting. Apparently, this exciting
1487 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1488 * failed somewhere. Latency? Burstiness? Well, at least now we will
1489 * see, why it failed. 8)8) --ANK
1490 *
1491 */
1492 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1493 {
1494 struct tcp_sock *tp = tcp_sk(sk);
1495
1496 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1497 return false;
1498
1499 if (skb->len <= tcp_hdrlen(skb) &&
1500 skb_queue_len(&tp->ucopy.prequeue) == 0)
1501 return false;
1502
1503 /* Before escaping RCU protected region, we need to take care of skb
1504 * dst. Prequeue is only enabled for established sockets.
1505 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1506 * Instead of doing full sk_rx_dst validity here, let's perform
1507 * an optimistic check.
1508 */
1509 if (likely(sk->sk_rx_dst))
1510 skb_dst_drop(skb);
1511 else
1512 skb_dst_force(skb);
1513
1514 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1515 tp->ucopy.memory += skb->truesize;
1516 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1517 struct sk_buff *skb1;
1518
1519 BUG_ON(sock_owned_by_user(sk));
1520
1521 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1522 sk_backlog_rcv(sk, skb1);
1523 NET_INC_STATS_BH(sock_net(sk),
1524 LINUX_MIB_TCPPREQUEUEDROPPED);
1525 }
1526
1527 tp->ucopy.memory = 0;
1528 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1529 wake_up_interruptible_sync_poll(sk_sleep(sk),
1530 POLLIN | POLLRDNORM | POLLRDBAND);
1531 if (!inet_csk_ack_scheduled(sk))
1532 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1533 (3 * tcp_rto_min(sk)) / 4,
1534 TCP_RTO_MAX);
1535 }
1536 return true;
1537 }
1538 EXPORT_SYMBOL(tcp_prequeue);
1539
1540 /*
1541 * From tcp_input.c
1542 */
1543
1544 int tcp_v4_rcv(struct sk_buff *skb)
1545 {
1546 const struct iphdr *iph;
1547 const struct tcphdr *th;
1548 struct sock *sk;
1549 int ret;
1550 struct net *net = dev_net(skb->dev);
1551
1552 if (skb->pkt_type != PACKET_HOST)
1553 goto discard_it;
1554
1555 /* Count it even if it's bad */
1556 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1557
1558 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1559 goto discard_it;
1560
1561 th = tcp_hdr(skb);
1562
1563 if (th->doff < sizeof(struct tcphdr) / 4)
1564 goto bad_packet;
1565 if (!pskb_may_pull(skb, th->doff * 4))
1566 goto discard_it;
1567
1568 /* An explanation is required here, I think.
1569 * Packet length and doff are validated by header prediction,
1570 * provided case of th->doff==0 is eliminated.
1571 * So, we defer the checks. */
1572
1573 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1574 goto csum_error;
1575
1576 th = tcp_hdr(skb);
1577 iph = ip_hdr(skb);
1578 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1579 * barrier() makes sure compiler wont play fool^Waliasing games.
1580 */
1581 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1582 sizeof(struct inet_skb_parm));
1583 barrier();
1584
1585 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1586 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1587 skb->len - th->doff * 4);
1588 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1589 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1590 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1591 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1592 TCP_SKB_CB(skb)->sacked = 0;
1593
1594 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1595 if (!sk)
1596 goto no_tcp_socket;
1597
1598 process:
1599 if (sk->sk_state == TCP_TIME_WAIT)
1600 goto do_time_wait;
1601
1602 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1603 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1604 goto discard_and_relse;
1605 }
1606
1607 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1608 goto discard_and_relse;
1609
1610 #ifdef CONFIG_TCP_MD5SIG
1611 /*
1612 * We really want to reject the packet as early as possible
1613 * if:
1614 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1615 * o There is an MD5 option and we're not expecting one
1616 */
1617 if (tcp_v4_inbound_md5_hash(sk, skb))
1618 goto discard_and_relse;
1619 #endif
1620
1621 nf_reset(skb);
1622
1623 if (sk_filter(sk, skb))
1624 goto discard_and_relse;
1625
1626 sk_incoming_cpu_update(sk);
1627 skb->dev = NULL;
1628
1629 bh_lock_sock_nested(sk);
1630 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1631 ret = 0;
1632 if (!sock_owned_by_user(sk)) {
1633 if (!tcp_prequeue(sk, skb))
1634 ret = tcp_v4_do_rcv(sk, skb);
1635 } else if (unlikely(sk_add_backlog(sk, skb,
1636 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1637 bh_unlock_sock(sk);
1638 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1639 goto discard_and_relse;
1640 }
1641 bh_unlock_sock(sk);
1642
1643 sock_put(sk);
1644
1645 return ret;
1646
1647 no_tcp_socket:
1648 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1649 goto discard_it;
1650
1651 if (tcp_checksum_complete(skb)) {
1652 csum_error:
1653 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1654 bad_packet:
1655 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1656 } else {
1657 tcp_v4_send_reset(NULL, skb);
1658 }
1659
1660 discard_it:
1661 /* Discard frame. */
1662 kfree_skb(skb);
1663 return 0;
1664
1665 discard_and_relse:
1666 sock_put(sk);
1667 goto discard_it;
1668
1669 do_time_wait:
1670 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1671 inet_twsk_put(inet_twsk(sk));
1672 goto discard_it;
1673 }
1674
1675 if (tcp_checksum_complete(skb)) {
1676 inet_twsk_put(inet_twsk(sk));
1677 goto csum_error;
1678 }
1679 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1680 case TCP_TW_SYN: {
1681 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1682 &tcp_hashinfo,
1683 iph->saddr, th->source,
1684 iph->daddr, th->dest,
1685 inet_iif(skb));
1686 if (sk2) {
1687 inet_twsk_deschedule_put(inet_twsk(sk));
1688 sk = sk2;
1689 goto process;
1690 }
1691 /* Fall through to ACK */
1692 }
1693 case TCP_TW_ACK:
1694 tcp_v4_timewait_ack(sk, skb);
1695 break;
1696 case TCP_TW_RST:
1697 goto no_tcp_socket;
1698 case TCP_TW_SUCCESS:;
1699 }
1700 goto discard_it;
1701 }
1702
1703 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1704 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1705 .twsk_unique = tcp_twsk_unique,
1706 .twsk_destructor= tcp_twsk_destructor,
1707 };
1708
1709 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1710 {
1711 struct dst_entry *dst = skb_dst(skb);
1712
1713 if (dst) {
1714 dst_hold(dst);
1715 sk->sk_rx_dst = dst;
1716 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1717 }
1718 }
1719 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1720
1721 const struct inet_connection_sock_af_ops ipv4_specific = {
1722 .queue_xmit = ip_queue_xmit,
1723 .send_check = tcp_v4_send_check,
1724 .rebuild_header = inet_sk_rebuild_header,
1725 .sk_rx_dst_set = inet_sk_rx_dst_set,
1726 .conn_request = tcp_v4_conn_request,
1727 .syn_recv_sock = tcp_v4_syn_recv_sock,
1728 .net_header_len = sizeof(struct iphdr),
1729 .setsockopt = ip_setsockopt,
1730 .getsockopt = ip_getsockopt,
1731 .addr2sockaddr = inet_csk_addr2sockaddr,
1732 .sockaddr_len = sizeof(struct sockaddr_in),
1733 .bind_conflict = inet_csk_bind_conflict,
1734 #ifdef CONFIG_COMPAT
1735 .compat_setsockopt = compat_ip_setsockopt,
1736 .compat_getsockopt = compat_ip_getsockopt,
1737 #endif
1738 .mtu_reduced = tcp_v4_mtu_reduced,
1739 };
1740 EXPORT_SYMBOL(ipv4_specific);
1741
1742 #ifdef CONFIG_TCP_MD5SIG
1743 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1744 .md5_lookup = tcp_v4_md5_lookup,
1745 .calc_md5_hash = tcp_v4_md5_hash_skb,
1746 .md5_parse = tcp_v4_parse_md5_keys,
1747 };
1748 #endif
1749
1750 /* NOTE: A lot of things set to zero explicitly by call to
1751 * sk_alloc() so need not be done here.
1752 */
1753 static int tcp_v4_init_sock(struct sock *sk)
1754 {
1755 struct inet_connection_sock *icsk = inet_csk(sk);
1756
1757 tcp_init_sock(sk);
1758
1759 icsk->icsk_af_ops = &ipv4_specific;
1760
1761 #ifdef CONFIG_TCP_MD5SIG
1762 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1763 #endif
1764
1765 return 0;
1766 }
1767
1768 void tcp_v4_destroy_sock(struct sock *sk)
1769 {
1770 struct tcp_sock *tp = tcp_sk(sk);
1771
1772 tcp_clear_xmit_timers(sk);
1773
1774 tcp_cleanup_congestion_control(sk);
1775
1776 /* Cleanup up the write buffer. */
1777 tcp_write_queue_purge(sk);
1778
1779 /* Cleans up our, hopefully empty, out_of_order_queue. */
1780 __skb_queue_purge(&tp->out_of_order_queue);
1781
1782 #ifdef CONFIG_TCP_MD5SIG
1783 /* Clean up the MD5 key list, if any */
1784 if (tp->md5sig_info) {
1785 tcp_clear_md5_list(sk);
1786 kfree_rcu(tp->md5sig_info, rcu);
1787 tp->md5sig_info = NULL;
1788 }
1789 #endif
1790
1791 /* Clean prequeue, it must be empty really */
1792 __skb_queue_purge(&tp->ucopy.prequeue);
1793
1794 /* Clean up a referenced TCP bind bucket. */
1795 if (inet_csk(sk)->icsk_bind_hash)
1796 inet_put_port(sk);
1797
1798 BUG_ON(tp->fastopen_rsk);
1799
1800 /* If socket is aborted during connect operation */
1801 tcp_free_fastopen_req(tp);
1802 tcp_saved_syn_free(tp);
1803
1804 sk_sockets_allocated_dec(sk);
1805 sock_release_memcg(sk);
1806 }
1807 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1808
1809 #ifdef CONFIG_PROC_FS
1810 /* Proc filesystem TCP sock list dumping. */
1811
1812 /*
1813 * Get next listener socket follow cur. If cur is NULL, get first socket
1814 * starting from bucket given in st->bucket; when st->bucket is zero the
1815 * very first socket in the hash table is returned.
1816 */
1817 static void *listening_get_next(struct seq_file *seq, void *cur)
1818 {
1819 struct inet_connection_sock *icsk;
1820 struct hlist_nulls_node *node;
1821 struct sock *sk = cur;
1822 struct inet_listen_hashbucket *ilb;
1823 struct tcp_iter_state *st = seq->private;
1824 struct net *net = seq_file_net(seq);
1825
1826 if (!sk) {
1827 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1828 spin_lock_bh(&ilb->lock);
1829 sk = sk_nulls_head(&ilb->head);
1830 st->offset = 0;
1831 goto get_sk;
1832 }
1833 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1834 ++st->num;
1835 ++st->offset;
1836
1837 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1838 struct request_sock *req = cur;
1839
1840 icsk = inet_csk(st->syn_wait_sk);
1841 req = req->dl_next;
1842 while (1) {
1843 while (req) {
1844 if (req->rsk_ops->family == st->family) {
1845 cur = req;
1846 goto out;
1847 }
1848 req = req->dl_next;
1849 }
1850 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1851 break;
1852 get_req:
1853 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1854 }
1855 sk = sk_nulls_next(st->syn_wait_sk);
1856 st->state = TCP_SEQ_STATE_LISTENING;
1857 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1858 } else {
1859 icsk = inet_csk(sk);
1860 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1861 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1862 goto start_req;
1863 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1864 sk = sk_nulls_next(sk);
1865 }
1866 get_sk:
1867 sk_nulls_for_each_from(sk, node) {
1868 if (!net_eq(sock_net(sk), net))
1869 continue;
1870 if (sk->sk_family == st->family) {
1871 cur = sk;
1872 goto out;
1873 }
1874 icsk = inet_csk(sk);
1875 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1876 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1877 start_req:
1878 st->uid = sock_i_uid(sk);
1879 st->syn_wait_sk = sk;
1880 st->state = TCP_SEQ_STATE_OPENREQ;
1881 st->sbucket = 0;
1882 goto get_req;
1883 }
1884 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1885 }
1886 spin_unlock_bh(&ilb->lock);
1887 st->offset = 0;
1888 if (++st->bucket < INET_LHTABLE_SIZE) {
1889 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1890 spin_lock_bh(&ilb->lock);
1891 sk = sk_nulls_head(&ilb->head);
1892 goto get_sk;
1893 }
1894 cur = NULL;
1895 out:
1896 return cur;
1897 }
1898
1899 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1900 {
1901 struct tcp_iter_state *st = seq->private;
1902 void *rc;
1903
1904 st->bucket = 0;
1905 st->offset = 0;
1906 rc = listening_get_next(seq, NULL);
1907
1908 while (rc && *pos) {
1909 rc = listening_get_next(seq, rc);
1910 --*pos;
1911 }
1912 return rc;
1913 }
1914
1915 static inline bool empty_bucket(const struct tcp_iter_state *st)
1916 {
1917 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1918 }
1919
1920 /*
1921 * Get first established socket starting from bucket given in st->bucket.
1922 * If st->bucket is zero, the very first socket in the hash is returned.
1923 */
1924 static void *established_get_first(struct seq_file *seq)
1925 {
1926 struct tcp_iter_state *st = seq->private;
1927 struct net *net = seq_file_net(seq);
1928 void *rc = NULL;
1929
1930 st->offset = 0;
1931 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1932 struct sock *sk;
1933 struct hlist_nulls_node *node;
1934 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1935
1936 /* Lockless fast path for the common case of empty buckets */
1937 if (empty_bucket(st))
1938 continue;
1939
1940 spin_lock_bh(lock);
1941 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1942 if (sk->sk_family != st->family ||
1943 !net_eq(sock_net(sk), net)) {
1944 continue;
1945 }
1946 rc = sk;
1947 goto out;
1948 }
1949 spin_unlock_bh(lock);
1950 }
1951 out:
1952 return rc;
1953 }
1954
1955 static void *established_get_next(struct seq_file *seq, void *cur)
1956 {
1957 struct sock *sk = cur;
1958 struct hlist_nulls_node *node;
1959 struct tcp_iter_state *st = seq->private;
1960 struct net *net = seq_file_net(seq);
1961
1962 ++st->num;
1963 ++st->offset;
1964
1965 sk = sk_nulls_next(sk);
1966
1967 sk_nulls_for_each_from(sk, node) {
1968 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1969 return sk;
1970 }
1971
1972 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1973 ++st->bucket;
1974 return established_get_first(seq);
1975 }
1976
1977 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1978 {
1979 struct tcp_iter_state *st = seq->private;
1980 void *rc;
1981
1982 st->bucket = 0;
1983 rc = established_get_first(seq);
1984
1985 while (rc && pos) {
1986 rc = established_get_next(seq, rc);
1987 --pos;
1988 }
1989 return rc;
1990 }
1991
1992 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1993 {
1994 void *rc;
1995 struct tcp_iter_state *st = seq->private;
1996
1997 st->state = TCP_SEQ_STATE_LISTENING;
1998 rc = listening_get_idx(seq, &pos);
1999
2000 if (!rc) {
2001 st->state = TCP_SEQ_STATE_ESTABLISHED;
2002 rc = established_get_idx(seq, pos);
2003 }
2004
2005 return rc;
2006 }
2007
2008 static void *tcp_seek_last_pos(struct seq_file *seq)
2009 {
2010 struct tcp_iter_state *st = seq->private;
2011 int offset = st->offset;
2012 int orig_num = st->num;
2013 void *rc = NULL;
2014
2015 switch (st->state) {
2016 case TCP_SEQ_STATE_OPENREQ:
2017 case TCP_SEQ_STATE_LISTENING:
2018 if (st->bucket >= INET_LHTABLE_SIZE)
2019 break;
2020 st->state = TCP_SEQ_STATE_LISTENING;
2021 rc = listening_get_next(seq, NULL);
2022 while (offset-- && rc)
2023 rc = listening_get_next(seq, rc);
2024 if (rc)
2025 break;
2026 st->bucket = 0;
2027 st->state = TCP_SEQ_STATE_ESTABLISHED;
2028 /* Fallthrough */
2029 case TCP_SEQ_STATE_ESTABLISHED:
2030 if (st->bucket > tcp_hashinfo.ehash_mask)
2031 break;
2032 rc = established_get_first(seq);
2033 while (offset-- && rc)
2034 rc = established_get_next(seq, rc);
2035 }
2036
2037 st->num = orig_num;
2038
2039 return rc;
2040 }
2041
2042 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2043 {
2044 struct tcp_iter_state *st = seq->private;
2045 void *rc;
2046
2047 if (*pos && *pos == st->last_pos) {
2048 rc = tcp_seek_last_pos(seq);
2049 if (rc)
2050 goto out;
2051 }
2052
2053 st->state = TCP_SEQ_STATE_LISTENING;
2054 st->num = 0;
2055 st->bucket = 0;
2056 st->offset = 0;
2057 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2058
2059 out:
2060 st->last_pos = *pos;
2061 return rc;
2062 }
2063
2064 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2065 {
2066 struct tcp_iter_state *st = seq->private;
2067 void *rc = NULL;
2068
2069 if (v == SEQ_START_TOKEN) {
2070 rc = tcp_get_idx(seq, 0);
2071 goto out;
2072 }
2073
2074 switch (st->state) {
2075 case TCP_SEQ_STATE_OPENREQ:
2076 case TCP_SEQ_STATE_LISTENING:
2077 rc = listening_get_next(seq, v);
2078 if (!rc) {
2079 st->state = TCP_SEQ_STATE_ESTABLISHED;
2080 st->bucket = 0;
2081 st->offset = 0;
2082 rc = established_get_first(seq);
2083 }
2084 break;
2085 case TCP_SEQ_STATE_ESTABLISHED:
2086 rc = established_get_next(seq, v);
2087 break;
2088 }
2089 out:
2090 ++*pos;
2091 st->last_pos = *pos;
2092 return rc;
2093 }
2094
2095 static void tcp_seq_stop(struct seq_file *seq, void *v)
2096 {
2097 struct tcp_iter_state *st = seq->private;
2098
2099 switch (st->state) {
2100 case TCP_SEQ_STATE_OPENREQ:
2101 if (v) {
2102 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2103 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2104 }
2105 case TCP_SEQ_STATE_LISTENING:
2106 if (v != SEQ_START_TOKEN)
2107 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2108 break;
2109 case TCP_SEQ_STATE_ESTABLISHED:
2110 if (v)
2111 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2112 break;
2113 }
2114 }
2115
2116 int tcp_seq_open(struct inode *inode, struct file *file)
2117 {
2118 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2119 struct tcp_iter_state *s;
2120 int err;
2121
2122 err = seq_open_net(inode, file, &afinfo->seq_ops,
2123 sizeof(struct tcp_iter_state));
2124 if (err < 0)
2125 return err;
2126
2127 s = ((struct seq_file *)file->private_data)->private;
2128 s->family = afinfo->family;
2129 s->last_pos = 0;
2130 return 0;
2131 }
2132 EXPORT_SYMBOL(tcp_seq_open);
2133
2134 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2135 {
2136 int rc = 0;
2137 struct proc_dir_entry *p;
2138
2139 afinfo->seq_ops.start = tcp_seq_start;
2140 afinfo->seq_ops.next = tcp_seq_next;
2141 afinfo->seq_ops.stop = tcp_seq_stop;
2142
2143 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2144 afinfo->seq_fops, afinfo);
2145 if (!p)
2146 rc = -ENOMEM;
2147 return rc;
2148 }
2149 EXPORT_SYMBOL(tcp_proc_register);
2150
2151 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2152 {
2153 remove_proc_entry(afinfo->name, net->proc_net);
2154 }
2155 EXPORT_SYMBOL(tcp_proc_unregister);
2156
2157 static void get_openreq4(const struct request_sock *req,
2158 struct seq_file *f, int i, kuid_t uid)
2159 {
2160 const struct inet_request_sock *ireq = inet_rsk(req);
2161 long delta = req->rsk_timer.expires - jiffies;
2162
2163 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2164 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2165 i,
2166 ireq->ir_loc_addr,
2167 ireq->ir_num,
2168 ireq->ir_rmt_addr,
2169 ntohs(ireq->ir_rmt_port),
2170 TCP_SYN_RECV,
2171 0, 0, /* could print option size, but that is af dependent. */
2172 1, /* timers active (only the expire timer) */
2173 jiffies_delta_to_clock_t(delta),
2174 req->num_timeout,
2175 from_kuid_munged(seq_user_ns(f), uid),
2176 0, /* non standard timer */
2177 0, /* open_requests have no inode */
2178 0,
2179 req);
2180 }
2181
2182 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2183 {
2184 int timer_active;
2185 unsigned long timer_expires;
2186 const struct tcp_sock *tp = tcp_sk(sk);
2187 const struct inet_connection_sock *icsk = inet_csk(sk);
2188 const struct inet_sock *inet = inet_sk(sk);
2189 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2190 __be32 dest = inet->inet_daddr;
2191 __be32 src = inet->inet_rcv_saddr;
2192 __u16 destp = ntohs(inet->inet_dport);
2193 __u16 srcp = ntohs(inet->inet_sport);
2194 int rx_queue;
2195
2196 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2197 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2198 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2199 timer_active = 1;
2200 timer_expires = icsk->icsk_timeout;
2201 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2202 timer_active = 4;
2203 timer_expires = icsk->icsk_timeout;
2204 } else if (timer_pending(&sk->sk_timer)) {
2205 timer_active = 2;
2206 timer_expires = sk->sk_timer.expires;
2207 } else {
2208 timer_active = 0;
2209 timer_expires = jiffies;
2210 }
2211
2212 if (sk->sk_state == TCP_LISTEN)
2213 rx_queue = sk->sk_ack_backlog;
2214 else
2215 /*
2216 * because we dont lock socket, we might find a transient negative value
2217 */
2218 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2219
2220 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2221 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2222 i, src, srcp, dest, destp, sk->sk_state,
2223 tp->write_seq - tp->snd_una,
2224 rx_queue,
2225 timer_active,
2226 jiffies_delta_to_clock_t(timer_expires - jiffies),
2227 icsk->icsk_retransmits,
2228 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2229 icsk->icsk_probes_out,
2230 sock_i_ino(sk),
2231 atomic_read(&sk->sk_refcnt), sk,
2232 jiffies_to_clock_t(icsk->icsk_rto),
2233 jiffies_to_clock_t(icsk->icsk_ack.ato),
2234 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2235 tp->snd_cwnd,
2236 sk->sk_state == TCP_LISTEN ?
2237 (fastopenq ? fastopenq->max_qlen : 0) :
2238 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2239 }
2240
2241 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2242 struct seq_file *f, int i)
2243 {
2244 long delta = tw->tw_timer.expires - jiffies;
2245 __be32 dest, src;
2246 __u16 destp, srcp;
2247
2248 dest = tw->tw_daddr;
2249 src = tw->tw_rcv_saddr;
2250 destp = ntohs(tw->tw_dport);
2251 srcp = ntohs(tw->tw_sport);
2252
2253 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2254 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2255 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2256 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2257 atomic_read(&tw->tw_refcnt), tw);
2258 }
2259
2260 #define TMPSZ 150
2261
2262 static int tcp4_seq_show(struct seq_file *seq, void *v)
2263 {
2264 struct tcp_iter_state *st;
2265 struct sock *sk = v;
2266
2267 seq_setwidth(seq, TMPSZ - 1);
2268 if (v == SEQ_START_TOKEN) {
2269 seq_puts(seq, " sl local_address rem_address st tx_queue "
2270 "rx_queue tr tm->when retrnsmt uid timeout "
2271 "inode");
2272 goto out;
2273 }
2274 st = seq->private;
2275
2276 switch (st->state) {
2277 case TCP_SEQ_STATE_LISTENING:
2278 case TCP_SEQ_STATE_ESTABLISHED:
2279 if (sk->sk_state == TCP_TIME_WAIT)
2280 get_timewait4_sock(v, seq, st->num);
2281 else
2282 get_tcp4_sock(v, seq, st->num);
2283 break;
2284 case TCP_SEQ_STATE_OPENREQ:
2285 get_openreq4(v, seq, st->num, st->uid);
2286 break;
2287 }
2288 out:
2289 seq_pad(seq, '\n');
2290 return 0;
2291 }
2292
2293 static const struct file_operations tcp_afinfo_seq_fops = {
2294 .owner = THIS_MODULE,
2295 .open = tcp_seq_open,
2296 .read = seq_read,
2297 .llseek = seq_lseek,
2298 .release = seq_release_net
2299 };
2300
2301 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2302 .name = "tcp",
2303 .family = AF_INET,
2304 .seq_fops = &tcp_afinfo_seq_fops,
2305 .seq_ops = {
2306 .show = tcp4_seq_show,
2307 },
2308 };
2309
2310 static int __net_init tcp4_proc_init_net(struct net *net)
2311 {
2312 return tcp_proc_register(net, &tcp4_seq_afinfo);
2313 }
2314
2315 static void __net_exit tcp4_proc_exit_net(struct net *net)
2316 {
2317 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2318 }
2319
2320 static struct pernet_operations tcp4_net_ops = {
2321 .init = tcp4_proc_init_net,
2322 .exit = tcp4_proc_exit_net,
2323 };
2324
2325 int __init tcp4_proc_init(void)
2326 {
2327 return register_pernet_subsys(&tcp4_net_ops);
2328 }
2329
2330 void tcp4_proc_exit(void)
2331 {
2332 unregister_pernet_subsys(&tcp4_net_ops);
2333 }
2334 #endif /* CONFIG_PROC_FS */
2335
2336 struct proto tcp_prot = {
2337 .name = "TCP",
2338 .owner = THIS_MODULE,
2339 .close = tcp_close,
2340 .connect = tcp_v4_connect,
2341 .disconnect = tcp_disconnect,
2342 .accept = inet_csk_accept,
2343 .ioctl = tcp_ioctl,
2344 .init = tcp_v4_init_sock,
2345 .destroy = tcp_v4_destroy_sock,
2346 .shutdown = tcp_shutdown,
2347 .setsockopt = tcp_setsockopt,
2348 .getsockopt = tcp_getsockopt,
2349 .recvmsg = tcp_recvmsg,
2350 .sendmsg = tcp_sendmsg,
2351 .sendpage = tcp_sendpage,
2352 .backlog_rcv = tcp_v4_do_rcv,
2353 .release_cb = tcp_release_cb,
2354 .hash = inet_hash,
2355 .unhash = inet_unhash,
2356 .get_port = inet_csk_get_port,
2357 .enter_memory_pressure = tcp_enter_memory_pressure,
2358 .stream_memory_free = tcp_stream_memory_free,
2359 .sockets_allocated = &tcp_sockets_allocated,
2360 .orphan_count = &tcp_orphan_count,
2361 .memory_allocated = &tcp_memory_allocated,
2362 .memory_pressure = &tcp_memory_pressure,
2363 .sysctl_mem = sysctl_tcp_mem,
2364 .sysctl_wmem = sysctl_tcp_wmem,
2365 .sysctl_rmem = sysctl_tcp_rmem,
2366 .max_header = MAX_TCP_HEADER,
2367 .obj_size = sizeof(struct tcp_sock),
2368 .slab_flags = SLAB_DESTROY_BY_RCU,
2369 .twsk_prot = &tcp_timewait_sock_ops,
2370 .rsk_prot = &tcp_request_sock_ops,
2371 .h.hashinfo = &tcp_hashinfo,
2372 .no_autobind = true,
2373 #ifdef CONFIG_COMPAT
2374 .compat_setsockopt = compat_tcp_setsockopt,
2375 .compat_getsockopt = compat_tcp_getsockopt,
2376 #endif
2377 #ifdef CONFIG_MEMCG_KMEM
2378 .init_cgroup = tcp_init_cgroup,
2379 .destroy_cgroup = tcp_destroy_cgroup,
2380 .proto_cgroup = tcp_proto_cgroup,
2381 #endif
2382 };
2383 EXPORT_SYMBOL(tcp_prot);
2384
2385 static void __net_exit tcp_sk_exit(struct net *net)
2386 {
2387 int cpu;
2388
2389 for_each_possible_cpu(cpu)
2390 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2391 free_percpu(net->ipv4.tcp_sk);
2392 }
2393
2394 static int __net_init tcp_sk_init(struct net *net)
2395 {
2396 int res, cpu;
2397
2398 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2399 if (!net->ipv4.tcp_sk)
2400 return -ENOMEM;
2401
2402 for_each_possible_cpu(cpu) {
2403 struct sock *sk;
2404
2405 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2406 IPPROTO_TCP, net);
2407 if (res)
2408 goto fail;
2409 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2410 }
2411
2412 net->ipv4.sysctl_tcp_ecn = 2;
2413 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2414
2415 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2416 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2417 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2418
2419 return 0;
2420 fail:
2421 tcp_sk_exit(net);
2422
2423 return res;
2424 }
2425
2426 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2427 {
2428 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2429 }
2430
2431 static struct pernet_operations __net_initdata tcp_sk_ops = {
2432 .init = tcp_sk_init,
2433 .exit = tcp_sk_exit,
2434 .exit_batch = tcp_sk_exit_batch,
2435 };
2436
2437 void __init tcp_v4_init(void)
2438 {
2439 inet_hashinfo_init(&tcp_hashinfo);
2440 if (register_pernet_subsys(&tcp_sk_ops))
2441 panic("Failed to create the TCP control socket.\n");
2442 }
This page took 0.083369 seconds and 6 git commands to generate.