tcp: better TCP_SKB_CB layout to reduce cache line misses
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
79
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
85
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
88
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
92
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100
101 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
149 struct flowi4 *fl4;
150 struct rtable *rt;
151 int err;
152 struct ip_options_rcu *inet_opt;
153
154 if (addr_len < sizeof(struct sockaddr_in))
155 return -EINVAL;
156
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
159
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
164 if (!daddr)
165 return -EINVAL;
166 nexthop = inet_opt->opt.faddr;
167 }
168
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP,
175 orig_sport, orig_dport, sk);
176 if (IS_ERR(rt)) {
177 err = PTR_ERR(rt);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 return err;
181 }
182
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 ip_rt_put(rt);
185 return -ENETUNREACH;
186 }
187
188 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = fl4->daddr;
190
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
194
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
200 tp->write_seq = 0;
201 }
202
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
206
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
209
210 inet_set_txhash(sk);
211
212 inet_csk(sk)->icsk_ext_hdr_len = 0;
213 if (inet_opt)
214 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
215
216 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
217
218 /* Socket identity is still unknown (sport may be zero).
219 * However we set state to SYN-SENT and not releasing socket
220 * lock select source port, enter ourselves into the hash tables and
221 * complete initialization after this.
222 */
223 tcp_set_state(sk, TCP_SYN_SENT);
224 err = inet_hash_connect(&tcp_death_row, sk);
225 if (err)
226 goto failure;
227
228 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
229 inet->inet_sport, inet->inet_dport, sk);
230 if (IS_ERR(rt)) {
231 err = PTR_ERR(rt);
232 rt = NULL;
233 goto failure;
234 }
235 /* OK, now commit destination to socket. */
236 sk->sk_gso_type = SKB_GSO_TCPV4;
237 sk_setup_caps(sk, &rt->dst);
238
239 if (!tp->write_seq && likely(!tp->repair))
240 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
241 inet->inet_daddr,
242 inet->inet_sport,
243 usin->sin_port);
244
245 inet->inet_id = tp->write_seq ^ jiffies;
246
247 err = tcp_connect(sk);
248
249 rt = NULL;
250 if (err)
251 goto failure;
252
253 return 0;
254
255 failure:
256 /*
257 * This unhashes the socket and releases the local port,
258 * if necessary.
259 */
260 tcp_set_state(sk, TCP_CLOSE);
261 ip_rt_put(rt);
262 sk->sk_route_caps = 0;
263 inet->inet_dport = 0;
264 return err;
265 }
266 EXPORT_SYMBOL(tcp_v4_connect);
267
268 /*
269 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
270 * It can be called through tcp_release_cb() if socket was owned by user
271 * at the time tcp_v4_err() was called to handle ICMP message.
272 */
273 void tcp_v4_mtu_reduced(struct sock *sk)
274 {
275 struct dst_entry *dst;
276 struct inet_sock *inet = inet_sk(sk);
277 u32 mtu = tcp_sk(sk)->mtu_info;
278
279 dst = inet_csk_update_pmtu(sk, mtu);
280 if (!dst)
281 return;
282
283 /* Something is about to be wrong... Remember soft error
284 * for the case, if this connection will not able to recover.
285 */
286 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
287 sk->sk_err_soft = EMSGSIZE;
288
289 mtu = dst_mtu(dst);
290
291 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
292 ip_sk_accept_pmtu(sk) &&
293 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
294 tcp_sync_mss(sk, mtu);
295
296 /* Resend the TCP packet because it's
297 * clear that the old packet has been
298 * dropped. This is the new "fast" path mtu
299 * discovery.
300 */
301 tcp_simple_retransmit(sk);
302 } /* else let the usual retransmit timer handle it */
303 }
304 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
305
306 static void do_redirect(struct sk_buff *skb, struct sock *sk)
307 {
308 struct dst_entry *dst = __sk_dst_check(sk, 0);
309
310 if (dst)
311 dst->ops->redirect(dst, sk, skb);
312 }
313
314 /*
315 * This routine is called by the ICMP module when it gets some
316 * sort of error condition. If err < 0 then the socket should
317 * be closed and the error returned to the user. If err > 0
318 * it's just the icmp type << 8 | icmp code. After adjustment
319 * header points to the first 8 bytes of the tcp header. We need
320 * to find the appropriate port.
321 *
322 * The locking strategy used here is very "optimistic". When
323 * someone else accesses the socket the ICMP is just dropped
324 * and for some paths there is no check at all.
325 * A more general error queue to queue errors for later handling
326 * is probably better.
327 *
328 */
329
330 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
331 {
332 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
333 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
334 struct inet_connection_sock *icsk;
335 struct tcp_sock *tp;
336 struct inet_sock *inet;
337 const int type = icmp_hdr(icmp_skb)->type;
338 const int code = icmp_hdr(icmp_skb)->code;
339 struct sock *sk;
340 struct sk_buff *skb;
341 struct request_sock *fastopen;
342 __u32 seq, snd_una;
343 __u32 remaining;
344 int err;
345 struct net *net = dev_net(icmp_skb->dev);
346
347 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
348 iph->saddr, th->source, inet_iif(icmp_skb));
349 if (!sk) {
350 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
351 return;
352 }
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
355 return;
356 }
357
358 bh_lock_sock(sk);
359 /* If too many ICMPs get dropped on busy
360 * servers this needs to be solved differently.
361 * We do take care of PMTU discovery (RFC1191) special case :
362 * we can receive locally generated ICMP messages while socket is held.
363 */
364 if (sock_owned_by_user(sk)) {
365 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
366 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
367 }
368 if (sk->sk_state == TCP_CLOSE)
369 goto out;
370
371 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
372 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
373 goto out;
374 }
375
376 icsk = inet_csk(sk);
377 tp = tcp_sk(sk);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
385 goto out;
386 }
387
388 switch (type) {
389 case ICMP_REDIRECT:
390 do_redirect(icmp_skb, sk);
391 goto out;
392 case ICMP_SOURCE_QUENCH:
393 /* Just silently ignore these. */
394 goto out;
395 case ICMP_PARAMETERPROB:
396 err = EPROTO;
397 break;
398 case ICMP_DEST_UNREACH:
399 if (code > NR_ICMP_UNREACH)
400 goto out;
401
402 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
403 /* We are not interested in TCP_LISTEN and open_requests
404 * (SYN-ACKs send out by Linux are always <576bytes so
405 * they should go through unfragmented).
406 */
407 if (sk->sk_state == TCP_LISTEN)
408 goto out;
409
410 tp->mtu_info = info;
411 if (!sock_owned_by_user(sk)) {
412 tcp_v4_mtu_reduced(sk);
413 } else {
414 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
415 sock_hold(sk);
416 }
417 goto out;
418 }
419
420 err = icmp_err_convert[code].errno;
421 /* check if icmp_skb allows revert of backoff
422 * (see draft-zimmermann-tcp-lcd) */
423 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
424 break;
425 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
426 !icsk->icsk_backoff || fastopen)
427 break;
428
429 if (sock_owned_by_user(sk))
430 break;
431
432 icsk->icsk_backoff--;
433 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
434 TCP_TIMEOUT_INIT;
435 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
436
437 skb = tcp_write_queue_head(sk);
438 BUG_ON(!skb);
439
440 remaining = icsk->icsk_rto -
441 min(icsk->icsk_rto,
442 tcp_time_stamp - tcp_skb_timestamp(skb));
443
444 if (remaining) {
445 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
446 remaining, TCP_RTO_MAX);
447 } else {
448 /* RTO revert clocked out retransmission.
449 * Will retransmit now */
450 tcp_retransmit_timer(sk);
451 }
452
453 break;
454 case ICMP_TIME_EXCEEDED:
455 err = EHOSTUNREACH;
456 break;
457 default:
458 goto out;
459 }
460
461 switch (sk->sk_state) {
462 struct request_sock *req, **prev;
463 case TCP_LISTEN:
464 if (sock_owned_by_user(sk))
465 goto out;
466
467 req = inet_csk_search_req(sk, &prev, th->dest,
468 iph->daddr, iph->saddr);
469 if (!req)
470 goto out;
471
472 /* ICMPs are not backlogged, hence we cannot get
473 an established socket here.
474 */
475 WARN_ON(req->sk);
476
477 if (seq != tcp_rsk(req)->snt_isn) {
478 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
479 goto out;
480 }
481
482 /*
483 * Still in SYN_RECV, just remove it silently.
484 * There is no good way to pass the error to the newly
485 * created socket, and POSIX does not want network
486 * errors returned from accept().
487 */
488 inet_csk_reqsk_queue_drop(sk, req, prev);
489 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
490 goto out;
491
492 case TCP_SYN_SENT:
493 case TCP_SYN_RECV:
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
496 */
497 if (fastopen && fastopen->sk == NULL)
498 break;
499
500 if (!sock_owned_by_user(sk)) {
501 sk->sk_err = err;
502
503 sk->sk_error_report(sk);
504
505 tcp_done(sk);
506 } else {
507 sk->sk_err_soft = err;
508 }
509 goto out;
510 }
511
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
514 *
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
518 *
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 *
524 * Now we are in compliance with RFCs.
525 * --ANK (980905)
526 */
527
528 inet = inet_sk(sk);
529 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_err = err;
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
534 }
535
536 out:
537 bh_unlock_sock(sk);
538 sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 struct tcphdr *th = tcp_hdr(skb);
544
545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 skb->csum_start = skb_transport_header(skb) - skb->head;
548 skb->csum_offset = offsetof(struct tcphdr, check);
549 } else {
550 th->check = tcp_v4_check(skb->len, saddr, daddr,
551 csum_partial(th,
552 th->doff << 2,
553 skb->csum));
554 }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 const struct inet_sock *inet = inet_sk(sk);
561
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567 * This routine will send an RST to the other tcp.
568 *
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * for reset.
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
577 */
578
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581 const struct tcphdr *th = tcp_hdr(skb);
582 struct {
583 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 } rep;
588 struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
593 int genhash;
594 struct sock *sk1 = NULL;
595 #endif
596 struct net *net;
597
598 /* Never send a reset in response to a reset. */
599 if (th->rst)
600 return;
601
602 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
603 return;
604
605 /* Swap the send and the receive. */
606 memset(&rep, 0, sizeof(rep));
607 rep.th.dest = th->source;
608 rep.th.source = th->dest;
609 rep.th.doff = sizeof(struct tcphdr) / 4;
610 rep.th.rst = 1;
611
612 if (th->ack) {
613 rep.th.seq = th->ack_seq;
614 } else {
615 rep.th.ack = 1;
616 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
617 skb->len - (th->doff << 2));
618 }
619
620 memset(&arg, 0, sizeof(arg));
621 arg.iov[0].iov_base = (unsigned char *)&rep;
622 arg.iov[0].iov_len = sizeof(rep.th);
623
624 #ifdef CONFIG_TCP_MD5SIG
625 hash_location = tcp_parse_md5sig_option(th);
626 if (!sk && hash_location) {
627 /*
628 * active side is lost. Try to find listening socket through
629 * source port, and then find md5 key through listening socket.
630 * we are not loose security here:
631 * Incoming packet is checked with md5 hash with finding key,
632 * no RST generated if md5 hash doesn't match.
633 */
634 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
635 &tcp_hashinfo, ip_hdr(skb)->saddr,
636 th->source, ip_hdr(skb)->daddr,
637 ntohs(th->source), inet_iif(skb));
638 /* don't send rst if it can't find key */
639 if (!sk1)
640 return;
641 rcu_read_lock();
642 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
643 &ip_hdr(skb)->saddr, AF_INET);
644 if (!key)
645 goto release_sk1;
646
647 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
648 if (genhash || memcmp(hash_location, newhash, 16) != 0)
649 goto release_sk1;
650 } else {
651 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
652 &ip_hdr(skb)->saddr,
653 AF_INET) : NULL;
654 }
655
656 if (key) {
657 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
658 (TCPOPT_NOP << 16) |
659 (TCPOPT_MD5SIG << 8) |
660 TCPOLEN_MD5SIG);
661 /* Update length and the length the header thinks exists */
662 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
663 rep.th.doff = arg.iov[0].iov_len / 4;
664
665 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
666 key, ip_hdr(skb)->saddr,
667 ip_hdr(skb)->daddr, &rep.th);
668 }
669 #endif
670 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
671 ip_hdr(skb)->saddr, /* XXX */
672 arg.iov[0].iov_len, IPPROTO_TCP, 0);
673 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
674 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
675 /* When socket is gone, all binding information is lost.
676 * routing might fail in this case. No choice here, if we choose to force
677 * input interface, we will misroute in case of asymmetric route.
678 */
679 if (sk)
680 arg.bound_dev_if = sk->sk_bound_dev_if;
681
682 net = dev_net(skb_dst(skb)->dev);
683 arg.tos = ip_hdr(skb)->tos;
684 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
685 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
686 &arg, arg.iov[0].iov_len);
687
688 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
689 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
690
691 #ifdef CONFIG_TCP_MD5SIG
692 release_sk1:
693 if (sk1) {
694 rcu_read_unlock();
695 sock_put(sk1);
696 }
697 #endif
698 }
699
700 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
701 outside socket context is ugly, certainly. What can I do?
702 */
703
704 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
705 u32 win, u32 tsval, u32 tsecr, int oif,
706 struct tcp_md5sig_key *key,
707 int reply_flags, u8 tos)
708 {
709 const struct tcphdr *th = tcp_hdr(skb);
710 struct {
711 struct tcphdr th;
712 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
713 #ifdef CONFIG_TCP_MD5SIG
714 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
715 #endif
716 ];
717 } rep;
718 struct ip_reply_arg arg;
719 struct net *net = dev_net(skb_dst(skb)->dev);
720
721 memset(&rep.th, 0, sizeof(struct tcphdr));
722 memset(&arg, 0, sizeof(arg));
723
724 arg.iov[0].iov_base = (unsigned char *)&rep;
725 arg.iov[0].iov_len = sizeof(rep.th);
726 if (tsecr) {
727 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
728 (TCPOPT_TIMESTAMP << 8) |
729 TCPOLEN_TIMESTAMP);
730 rep.opt[1] = htonl(tsval);
731 rep.opt[2] = htonl(tsecr);
732 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
733 }
734
735 /* Swap the send and the receive. */
736 rep.th.dest = th->source;
737 rep.th.source = th->dest;
738 rep.th.doff = arg.iov[0].iov_len / 4;
739 rep.th.seq = htonl(seq);
740 rep.th.ack_seq = htonl(ack);
741 rep.th.ack = 1;
742 rep.th.window = htons(win);
743
744 #ifdef CONFIG_TCP_MD5SIG
745 if (key) {
746 int offset = (tsecr) ? 3 : 0;
747
748 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
749 (TCPOPT_NOP << 16) |
750 (TCPOPT_MD5SIG << 8) |
751 TCPOLEN_MD5SIG);
752 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
753 rep.th.doff = arg.iov[0].iov_len/4;
754
755 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
756 key, ip_hdr(skb)->saddr,
757 ip_hdr(skb)->daddr, &rep.th);
758 }
759 #endif
760 arg.flags = reply_flags;
761 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
762 ip_hdr(skb)->saddr, /* XXX */
763 arg.iov[0].iov_len, IPPROTO_TCP, 0);
764 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
765 if (oif)
766 arg.bound_dev_if = oif;
767 arg.tos = tos;
768 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
769 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
770 &arg, arg.iov[0].iov_len);
771
772 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
773 }
774
775 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
776 {
777 struct inet_timewait_sock *tw = inet_twsk(sk);
778 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
779
780 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
781 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
782 tcp_time_stamp + tcptw->tw_ts_offset,
783 tcptw->tw_ts_recent,
784 tw->tw_bound_dev_if,
785 tcp_twsk_md5_key(tcptw),
786 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
787 tw->tw_tos
788 );
789
790 inet_twsk_put(tw);
791 }
792
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794 struct request_sock *req)
795 {
796 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
797 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
798 */
799 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
800 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
801 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
802 tcp_time_stamp,
803 req->ts_recent,
804 0,
805 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
806 AF_INET),
807 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
808 ip_hdr(skb)->tos);
809 }
810
811 /*
812 * Send a SYN-ACK after having received a SYN.
813 * This still operates on a request_sock only, not on a big
814 * socket.
815 */
816 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
817 struct flowi *fl,
818 struct request_sock *req,
819 u16 queue_mapping,
820 struct tcp_fastopen_cookie *foc)
821 {
822 const struct inet_request_sock *ireq = inet_rsk(req);
823 struct flowi4 fl4;
824 int err = -1;
825 struct sk_buff *skb;
826
827 /* First, grab a route. */
828 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
829 return -1;
830
831 skb = tcp_make_synack(sk, dst, req, foc);
832
833 if (skb) {
834 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
835
836 skb_set_queue_mapping(skb, queue_mapping);
837 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
838 ireq->ir_rmt_addr,
839 ireq->opt);
840 err = net_xmit_eval(err);
841 }
842
843 return err;
844 }
845
846 /*
847 * IPv4 request_sock destructor.
848 */
849 static void tcp_v4_reqsk_destructor(struct request_sock *req)
850 {
851 kfree(inet_rsk(req)->opt);
852 }
853
854 /*
855 * Return true if a syncookie should be sent
856 */
857 bool tcp_syn_flood_action(struct sock *sk,
858 const struct sk_buff *skb,
859 const char *proto)
860 {
861 const char *msg = "Dropping request";
862 bool want_cookie = false;
863 struct listen_sock *lopt;
864
865 #ifdef CONFIG_SYN_COOKIES
866 if (sysctl_tcp_syncookies) {
867 msg = "Sending cookies";
868 want_cookie = true;
869 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
870 } else
871 #endif
872 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
873
874 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
875 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
876 lopt->synflood_warned = 1;
877 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
878 proto, ntohs(tcp_hdr(skb)->dest), msg);
879 }
880 return want_cookie;
881 }
882 EXPORT_SYMBOL(tcp_syn_flood_action);
883
884 /*
885 * Save and compile IPv4 options into the request_sock if needed.
886 */
887 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
888 {
889 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
890 struct ip_options_rcu *dopt = NULL;
891
892 if (opt && opt->optlen) {
893 int opt_size = sizeof(*dopt) + opt->optlen;
894
895 dopt = kmalloc(opt_size, GFP_ATOMIC);
896 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
897 kfree(dopt);
898 dopt = NULL;
899 }
900 }
901 return dopt;
902 }
903
904 #ifdef CONFIG_TCP_MD5SIG
905 /*
906 * RFC2385 MD5 checksumming requires a mapping of
907 * IP address->MD5 Key.
908 * We need to maintain these in the sk structure.
909 */
910
911 /* Find the Key structure for an address. */
912 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
913 const union tcp_md5_addr *addr,
914 int family)
915 {
916 struct tcp_sock *tp = tcp_sk(sk);
917 struct tcp_md5sig_key *key;
918 unsigned int size = sizeof(struct in_addr);
919 struct tcp_md5sig_info *md5sig;
920
921 /* caller either holds rcu_read_lock() or socket lock */
922 md5sig = rcu_dereference_check(tp->md5sig_info,
923 sock_owned_by_user(sk) ||
924 lockdep_is_held(&sk->sk_lock.slock));
925 if (!md5sig)
926 return NULL;
927 #if IS_ENABLED(CONFIG_IPV6)
928 if (family == AF_INET6)
929 size = sizeof(struct in6_addr);
930 #endif
931 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
932 if (key->family != family)
933 continue;
934 if (!memcmp(&key->addr, addr, size))
935 return key;
936 }
937 return NULL;
938 }
939 EXPORT_SYMBOL(tcp_md5_do_lookup);
940
941 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
942 struct sock *addr_sk)
943 {
944 union tcp_md5_addr *addr;
945
946 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
947 return tcp_md5_do_lookup(sk, addr, AF_INET);
948 }
949 EXPORT_SYMBOL(tcp_v4_md5_lookup);
950
951 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
952 struct request_sock *req)
953 {
954 union tcp_md5_addr *addr;
955
956 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
957 return tcp_md5_do_lookup(sk, addr, AF_INET);
958 }
959
960 /* This can be called on a newly created socket, from other files */
961 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
962 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
963 {
964 /* Add Key to the list */
965 struct tcp_md5sig_key *key;
966 struct tcp_sock *tp = tcp_sk(sk);
967 struct tcp_md5sig_info *md5sig;
968
969 key = tcp_md5_do_lookup(sk, addr, family);
970 if (key) {
971 /* Pre-existing entry - just update that one. */
972 memcpy(key->key, newkey, newkeylen);
973 key->keylen = newkeylen;
974 return 0;
975 }
976
977 md5sig = rcu_dereference_protected(tp->md5sig_info,
978 sock_owned_by_user(sk));
979 if (!md5sig) {
980 md5sig = kmalloc(sizeof(*md5sig), gfp);
981 if (!md5sig)
982 return -ENOMEM;
983
984 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
985 INIT_HLIST_HEAD(&md5sig->head);
986 rcu_assign_pointer(tp->md5sig_info, md5sig);
987 }
988
989 key = sock_kmalloc(sk, sizeof(*key), gfp);
990 if (!key)
991 return -ENOMEM;
992 if (!tcp_alloc_md5sig_pool()) {
993 sock_kfree_s(sk, key, sizeof(*key));
994 return -ENOMEM;
995 }
996
997 memcpy(key->key, newkey, newkeylen);
998 key->keylen = newkeylen;
999 key->family = family;
1000 memcpy(&key->addr, addr,
1001 (family == AF_INET6) ? sizeof(struct in6_addr) :
1002 sizeof(struct in_addr));
1003 hlist_add_head_rcu(&key->node, &md5sig->head);
1004 return 0;
1005 }
1006 EXPORT_SYMBOL(tcp_md5_do_add);
1007
1008 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1009 {
1010 struct tcp_md5sig_key *key;
1011
1012 key = tcp_md5_do_lookup(sk, addr, family);
1013 if (!key)
1014 return -ENOENT;
1015 hlist_del_rcu(&key->node);
1016 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1017 kfree_rcu(key, rcu);
1018 return 0;
1019 }
1020 EXPORT_SYMBOL(tcp_md5_do_del);
1021
1022 static void tcp_clear_md5_list(struct sock *sk)
1023 {
1024 struct tcp_sock *tp = tcp_sk(sk);
1025 struct tcp_md5sig_key *key;
1026 struct hlist_node *n;
1027 struct tcp_md5sig_info *md5sig;
1028
1029 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1030
1031 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1032 hlist_del_rcu(&key->node);
1033 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1034 kfree_rcu(key, rcu);
1035 }
1036 }
1037
1038 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1039 int optlen)
1040 {
1041 struct tcp_md5sig cmd;
1042 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1043
1044 if (optlen < sizeof(cmd))
1045 return -EINVAL;
1046
1047 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1048 return -EFAULT;
1049
1050 if (sin->sin_family != AF_INET)
1051 return -EINVAL;
1052
1053 if (!cmd.tcpm_keylen)
1054 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1055 AF_INET);
1056
1057 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1058 return -EINVAL;
1059
1060 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1061 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1062 GFP_KERNEL);
1063 }
1064
1065 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1066 __be32 daddr, __be32 saddr, int nbytes)
1067 {
1068 struct tcp4_pseudohdr *bp;
1069 struct scatterlist sg;
1070
1071 bp = &hp->md5_blk.ip4;
1072
1073 /*
1074 * 1. the TCP pseudo-header (in the order: source IP address,
1075 * destination IP address, zero-padded protocol number, and
1076 * segment length)
1077 */
1078 bp->saddr = saddr;
1079 bp->daddr = daddr;
1080 bp->pad = 0;
1081 bp->protocol = IPPROTO_TCP;
1082 bp->len = cpu_to_be16(nbytes);
1083
1084 sg_init_one(&sg, bp, sizeof(*bp));
1085 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1086 }
1087
1088 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1089 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1090 {
1091 struct tcp_md5sig_pool *hp;
1092 struct hash_desc *desc;
1093
1094 hp = tcp_get_md5sig_pool();
1095 if (!hp)
1096 goto clear_hash_noput;
1097 desc = &hp->md5_desc;
1098
1099 if (crypto_hash_init(desc))
1100 goto clear_hash;
1101 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1102 goto clear_hash;
1103 if (tcp_md5_hash_header(hp, th))
1104 goto clear_hash;
1105 if (tcp_md5_hash_key(hp, key))
1106 goto clear_hash;
1107 if (crypto_hash_final(desc, md5_hash))
1108 goto clear_hash;
1109
1110 tcp_put_md5sig_pool();
1111 return 0;
1112
1113 clear_hash:
1114 tcp_put_md5sig_pool();
1115 clear_hash_noput:
1116 memset(md5_hash, 0, 16);
1117 return 1;
1118 }
1119
1120 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1121 const struct sock *sk, const struct request_sock *req,
1122 const struct sk_buff *skb)
1123 {
1124 struct tcp_md5sig_pool *hp;
1125 struct hash_desc *desc;
1126 const struct tcphdr *th = tcp_hdr(skb);
1127 __be32 saddr, daddr;
1128
1129 if (sk) {
1130 saddr = inet_sk(sk)->inet_saddr;
1131 daddr = inet_sk(sk)->inet_daddr;
1132 } else if (req) {
1133 saddr = inet_rsk(req)->ir_loc_addr;
1134 daddr = inet_rsk(req)->ir_rmt_addr;
1135 } else {
1136 const struct iphdr *iph = ip_hdr(skb);
1137 saddr = iph->saddr;
1138 daddr = iph->daddr;
1139 }
1140
1141 hp = tcp_get_md5sig_pool();
1142 if (!hp)
1143 goto clear_hash_noput;
1144 desc = &hp->md5_desc;
1145
1146 if (crypto_hash_init(desc))
1147 goto clear_hash;
1148
1149 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1150 goto clear_hash;
1151 if (tcp_md5_hash_header(hp, th))
1152 goto clear_hash;
1153 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1154 goto clear_hash;
1155 if (tcp_md5_hash_key(hp, key))
1156 goto clear_hash;
1157 if (crypto_hash_final(desc, md5_hash))
1158 goto clear_hash;
1159
1160 tcp_put_md5sig_pool();
1161 return 0;
1162
1163 clear_hash:
1164 tcp_put_md5sig_pool();
1165 clear_hash_noput:
1166 memset(md5_hash, 0, 16);
1167 return 1;
1168 }
1169 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1170
1171 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1172 const struct sk_buff *skb)
1173 {
1174 /*
1175 * This gets called for each TCP segment that arrives
1176 * so we want to be efficient.
1177 * We have 3 drop cases:
1178 * o No MD5 hash and one expected.
1179 * o MD5 hash and we're not expecting one.
1180 * o MD5 hash and its wrong.
1181 */
1182 const __u8 *hash_location = NULL;
1183 struct tcp_md5sig_key *hash_expected;
1184 const struct iphdr *iph = ip_hdr(skb);
1185 const struct tcphdr *th = tcp_hdr(skb);
1186 int genhash;
1187 unsigned char newhash[16];
1188
1189 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1190 AF_INET);
1191 hash_location = tcp_parse_md5sig_option(th);
1192
1193 /* We've parsed the options - do we have a hash? */
1194 if (!hash_expected && !hash_location)
1195 return false;
1196
1197 if (hash_expected && !hash_location) {
1198 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1199 return true;
1200 }
1201
1202 if (!hash_expected && hash_location) {
1203 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1204 return true;
1205 }
1206
1207 /* Okay, so this is hash_expected and hash_location -
1208 * so we need to calculate the checksum.
1209 */
1210 genhash = tcp_v4_md5_hash_skb(newhash,
1211 hash_expected,
1212 NULL, NULL, skb);
1213
1214 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1215 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1216 &iph->saddr, ntohs(th->source),
1217 &iph->daddr, ntohs(th->dest),
1218 genhash ? " tcp_v4_calc_md5_hash failed"
1219 : "");
1220 return true;
1221 }
1222 return false;
1223 }
1224
1225 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1226 {
1227 bool ret;
1228
1229 rcu_read_lock();
1230 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1231 rcu_read_unlock();
1232
1233 return ret;
1234 }
1235
1236 #endif
1237
1238 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1239 struct sk_buff *skb)
1240 {
1241 struct inet_request_sock *ireq = inet_rsk(req);
1242
1243 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1244 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1245 ireq->no_srccheck = inet_sk(sk)->transparent;
1246 ireq->opt = tcp_v4_save_options(skb);
1247 }
1248
1249 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1250 const struct request_sock *req,
1251 bool *strict)
1252 {
1253 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1254
1255 if (strict) {
1256 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1257 *strict = true;
1258 else
1259 *strict = false;
1260 }
1261
1262 return dst;
1263 }
1264
1265 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1266 .family = PF_INET,
1267 .obj_size = sizeof(struct tcp_request_sock),
1268 .rtx_syn_ack = tcp_rtx_synack,
1269 .send_ack = tcp_v4_reqsk_send_ack,
1270 .destructor = tcp_v4_reqsk_destructor,
1271 .send_reset = tcp_v4_send_reset,
1272 .syn_ack_timeout = tcp_syn_ack_timeout,
1273 };
1274
1275 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1276 .mss_clamp = TCP_MSS_DEFAULT,
1277 #ifdef CONFIG_TCP_MD5SIG
1278 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1279 .calc_md5_hash = tcp_v4_md5_hash_skb,
1280 #endif
1281 .init_req = tcp_v4_init_req,
1282 #ifdef CONFIG_SYN_COOKIES
1283 .cookie_init_seq = cookie_v4_init_sequence,
1284 #endif
1285 .route_req = tcp_v4_route_req,
1286 .init_seq = tcp_v4_init_sequence,
1287 .send_synack = tcp_v4_send_synack,
1288 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1289 };
1290
1291 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1292 {
1293 /* Never answer to SYNs send to broadcast or multicast */
1294 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1295 goto drop;
1296
1297 return tcp_conn_request(&tcp_request_sock_ops,
1298 &tcp_request_sock_ipv4_ops, sk, skb);
1299
1300 drop:
1301 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1302 return 0;
1303 }
1304 EXPORT_SYMBOL(tcp_v4_conn_request);
1305
1306
1307 /*
1308 * The three way handshake has completed - we got a valid synack -
1309 * now create the new socket.
1310 */
1311 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1312 struct request_sock *req,
1313 struct dst_entry *dst)
1314 {
1315 struct inet_request_sock *ireq;
1316 struct inet_sock *newinet;
1317 struct tcp_sock *newtp;
1318 struct sock *newsk;
1319 #ifdef CONFIG_TCP_MD5SIG
1320 struct tcp_md5sig_key *key;
1321 #endif
1322 struct ip_options_rcu *inet_opt;
1323
1324 if (sk_acceptq_is_full(sk))
1325 goto exit_overflow;
1326
1327 newsk = tcp_create_openreq_child(sk, req, skb);
1328 if (!newsk)
1329 goto exit_nonewsk;
1330
1331 newsk->sk_gso_type = SKB_GSO_TCPV4;
1332 inet_sk_rx_dst_set(newsk, skb);
1333
1334 newtp = tcp_sk(newsk);
1335 newinet = inet_sk(newsk);
1336 ireq = inet_rsk(req);
1337 newinet->inet_daddr = ireq->ir_rmt_addr;
1338 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1339 newinet->inet_saddr = ireq->ir_loc_addr;
1340 inet_opt = ireq->opt;
1341 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1342 ireq->opt = NULL;
1343 newinet->mc_index = inet_iif(skb);
1344 newinet->mc_ttl = ip_hdr(skb)->ttl;
1345 newinet->rcv_tos = ip_hdr(skb)->tos;
1346 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1347 inet_set_txhash(newsk);
1348 if (inet_opt)
1349 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1350 newinet->inet_id = newtp->write_seq ^ jiffies;
1351
1352 if (!dst) {
1353 dst = inet_csk_route_child_sock(sk, newsk, req);
1354 if (!dst)
1355 goto put_and_exit;
1356 } else {
1357 /* syncookie case : see end of cookie_v4_check() */
1358 }
1359 sk_setup_caps(newsk, dst);
1360
1361 tcp_sync_mss(newsk, dst_mtu(dst));
1362 newtp->advmss = dst_metric_advmss(dst);
1363 if (tcp_sk(sk)->rx_opt.user_mss &&
1364 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1365 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1366
1367 tcp_initialize_rcv_mss(newsk);
1368
1369 #ifdef CONFIG_TCP_MD5SIG
1370 /* Copy over the MD5 key from the original socket */
1371 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1372 AF_INET);
1373 if (key != NULL) {
1374 /*
1375 * We're using one, so create a matching key
1376 * on the newsk structure. If we fail to get
1377 * memory, then we end up not copying the key
1378 * across. Shucks.
1379 */
1380 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1381 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1382 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1383 }
1384 #endif
1385
1386 if (__inet_inherit_port(sk, newsk) < 0)
1387 goto put_and_exit;
1388 __inet_hash_nolisten(newsk, NULL);
1389
1390 return newsk;
1391
1392 exit_overflow:
1393 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1394 exit_nonewsk:
1395 dst_release(dst);
1396 exit:
1397 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1398 return NULL;
1399 put_and_exit:
1400 inet_csk_prepare_forced_close(newsk);
1401 tcp_done(newsk);
1402 goto exit;
1403 }
1404 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1405
1406 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1407 {
1408 struct tcphdr *th = tcp_hdr(skb);
1409 const struct iphdr *iph = ip_hdr(skb);
1410 struct sock *nsk;
1411 struct request_sock **prev;
1412 /* Find possible connection requests. */
1413 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1414 iph->saddr, iph->daddr);
1415 if (req)
1416 return tcp_check_req(sk, skb, req, prev, false);
1417
1418 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1419 th->source, iph->daddr, th->dest, inet_iif(skb));
1420
1421 if (nsk) {
1422 if (nsk->sk_state != TCP_TIME_WAIT) {
1423 bh_lock_sock(nsk);
1424 return nsk;
1425 }
1426 inet_twsk_put(inet_twsk(nsk));
1427 return NULL;
1428 }
1429
1430 #ifdef CONFIG_SYN_COOKIES
1431 if (!th->syn)
1432 sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt);
1433 #endif
1434 return sk;
1435 }
1436
1437 /* The socket must have it's spinlock held when we get
1438 * here.
1439 *
1440 * We have a potential double-lock case here, so even when
1441 * doing backlog processing we use the BH locking scheme.
1442 * This is because we cannot sleep with the original spinlock
1443 * held.
1444 */
1445 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1446 {
1447 struct sock *rsk;
1448
1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1450 struct dst_entry *dst = sk->sk_rx_dst;
1451
1452 sock_rps_save_rxhash(sk, skb);
1453 if (dst) {
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 dst->ops->check(dst, 0) == NULL) {
1456 dst_release(dst);
1457 sk->sk_rx_dst = NULL;
1458 }
1459 }
1460 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1461 return 0;
1462 }
1463
1464 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1465 goto csum_err;
1466
1467 if (sk->sk_state == TCP_LISTEN) {
1468 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1469 if (!nsk)
1470 goto discard;
1471
1472 if (nsk != sk) {
1473 sock_rps_save_rxhash(nsk, skb);
1474 if (tcp_child_process(sk, nsk, skb)) {
1475 rsk = nsk;
1476 goto reset;
1477 }
1478 return 0;
1479 }
1480 } else
1481 sock_rps_save_rxhash(sk, skb);
1482
1483 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1484 rsk = sk;
1485 goto reset;
1486 }
1487 return 0;
1488
1489 reset:
1490 tcp_v4_send_reset(rsk, skb);
1491 discard:
1492 kfree_skb(skb);
1493 /* Be careful here. If this function gets more complicated and
1494 * gcc suffers from register pressure on the x86, sk (in %ebx)
1495 * might be destroyed here. This current version compiles correctly,
1496 * but you have been warned.
1497 */
1498 return 0;
1499
1500 csum_err:
1501 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1502 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1503 goto discard;
1504 }
1505 EXPORT_SYMBOL(tcp_v4_do_rcv);
1506
1507 void tcp_v4_early_demux(struct sk_buff *skb)
1508 {
1509 const struct iphdr *iph;
1510 const struct tcphdr *th;
1511 struct sock *sk;
1512
1513 if (skb->pkt_type != PACKET_HOST)
1514 return;
1515
1516 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1517 return;
1518
1519 iph = ip_hdr(skb);
1520 th = tcp_hdr(skb);
1521
1522 if (th->doff < sizeof(struct tcphdr) / 4)
1523 return;
1524
1525 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1526 iph->saddr, th->source,
1527 iph->daddr, ntohs(th->dest),
1528 skb->skb_iif);
1529 if (sk) {
1530 skb->sk = sk;
1531 skb->destructor = sock_edemux;
1532 if (sk->sk_state != TCP_TIME_WAIT) {
1533 struct dst_entry *dst = sk->sk_rx_dst;
1534
1535 if (dst)
1536 dst = dst_check(dst, 0);
1537 if (dst &&
1538 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1539 skb_dst_set_noref(skb, dst);
1540 }
1541 }
1542 }
1543
1544 /* Packet is added to VJ-style prequeue for processing in process
1545 * context, if a reader task is waiting. Apparently, this exciting
1546 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1547 * failed somewhere. Latency? Burstiness? Well, at least now we will
1548 * see, why it failed. 8)8) --ANK
1549 *
1550 */
1551 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1552 {
1553 struct tcp_sock *tp = tcp_sk(sk);
1554
1555 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1556 return false;
1557
1558 if (skb->len <= tcp_hdrlen(skb) &&
1559 skb_queue_len(&tp->ucopy.prequeue) == 0)
1560 return false;
1561
1562 /* Before escaping RCU protected region, we need to take care of skb
1563 * dst. Prequeue is only enabled for established sockets.
1564 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1565 * Instead of doing full sk_rx_dst validity here, let's perform
1566 * an optimistic check.
1567 */
1568 if (likely(sk->sk_rx_dst))
1569 skb_dst_drop(skb);
1570 else
1571 skb_dst_force(skb);
1572
1573 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1574 tp->ucopy.memory += skb->truesize;
1575 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1576 struct sk_buff *skb1;
1577
1578 BUG_ON(sock_owned_by_user(sk));
1579
1580 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1581 sk_backlog_rcv(sk, skb1);
1582 NET_INC_STATS_BH(sock_net(sk),
1583 LINUX_MIB_TCPPREQUEUEDROPPED);
1584 }
1585
1586 tp->ucopy.memory = 0;
1587 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1588 wake_up_interruptible_sync_poll(sk_sleep(sk),
1589 POLLIN | POLLRDNORM | POLLRDBAND);
1590 if (!inet_csk_ack_scheduled(sk))
1591 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1592 (3 * tcp_rto_min(sk)) / 4,
1593 TCP_RTO_MAX);
1594 }
1595 return true;
1596 }
1597 EXPORT_SYMBOL(tcp_prequeue);
1598
1599 /*
1600 * From tcp_input.c
1601 */
1602
1603 int tcp_v4_rcv(struct sk_buff *skb)
1604 {
1605 const struct iphdr *iph;
1606 const struct tcphdr *th;
1607 struct sock *sk;
1608 int ret;
1609 struct net *net = dev_net(skb->dev);
1610
1611 if (skb->pkt_type != PACKET_HOST)
1612 goto discard_it;
1613
1614 /* Count it even if it's bad */
1615 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1616
1617 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1618 goto discard_it;
1619
1620 th = tcp_hdr(skb);
1621
1622 if (th->doff < sizeof(struct tcphdr) / 4)
1623 goto bad_packet;
1624 if (!pskb_may_pull(skb, th->doff * 4))
1625 goto discard_it;
1626
1627 /* An explanation is required here, I think.
1628 * Packet length and doff are validated by header prediction,
1629 * provided case of th->doff==0 is eliminated.
1630 * So, we defer the checks. */
1631
1632 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1633 goto csum_error;
1634
1635 th = tcp_hdr(skb);
1636 iph = ip_hdr(skb);
1637 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1638 * barrier() makes sure compiler wont play fool^Waliasing games.
1639 */
1640 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1641 sizeof(struct inet_skb_parm));
1642 barrier();
1643
1644 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1645 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1646 skb->len - th->doff * 4);
1647 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1648 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1649 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1650 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1651 TCP_SKB_CB(skb)->sacked = 0;
1652
1653 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1654 if (!sk)
1655 goto no_tcp_socket;
1656
1657 process:
1658 if (sk->sk_state == TCP_TIME_WAIT)
1659 goto do_time_wait;
1660
1661 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1662 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1663 goto discard_and_relse;
1664 }
1665
1666 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1667 goto discard_and_relse;
1668
1669 #ifdef CONFIG_TCP_MD5SIG
1670 /*
1671 * We really want to reject the packet as early as possible
1672 * if:
1673 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1674 * o There is an MD5 option and we're not expecting one
1675 */
1676 if (tcp_v4_inbound_md5_hash(sk, skb))
1677 goto discard_and_relse;
1678 #endif
1679
1680 nf_reset(skb);
1681
1682 if (sk_filter(sk, skb))
1683 goto discard_and_relse;
1684
1685 sk_mark_napi_id(sk, skb);
1686 skb->dev = NULL;
1687
1688 bh_lock_sock_nested(sk);
1689 ret = 0;
1690 if (!sock_owned_by_user(sk)) {
1691 #ifdef CONFIG_NET_DMA
1692 struct tcp_sock *tp = tcp_sk(sk);
1693 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1694 tp->ucopy.dma_chan = net_dma_find_channel();
1695 if (tp->ucopy.dma_chan)
1696 ret = tcp_v4_do_rcv(sk, skb);
1697 else
1698 #endif
1699 {
1700 if (!tcp_prequeue(sk, skb))
1701 ret = tcp_v4_do_rcv(sk, skb);
1702 }
1703 } else if (unlikely(sk_add_backlog(sk, skb,
1704 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1705 bh_unlock_sock(sk);
1706 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1707 goto discard_and_relse;
1708 }
1709 bh_unlock_sock(sk);
1710
1711 sock_put(sk);
1712
1713 return ret;
1714
1715 no_tcp_socket:
1716 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1717 goto discard_it;
1718
1719 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1720 csum_error:
1721 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1722 bad_packet:
1723 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1724 } else {
1725 tcp_v4_send_reset(NULL, skb);
1726 }
1727
1728 discard_it:
1729 /* Discard frame. */
1730 kfree_skb(skb);
1731 return 0;
1732
1733 discard_and_relse:
1734 sock_put(sk);
1735 goto discard_it;
1736
1737 do_time_wait:
1738 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1739 inet_twsk_put(inet_twsk(sk));
1740 goto discard_it;
1741 }
1742
1743 if (skb->len < (th->doff << 2)) {
1744 inet_twsk_put(inet_twsk(sk));
1745 goto bad_packet;
1746 }
1747 if (tcp_checksum_complete(skb)) {
1748 inet_twsk_put(inet_twsk(sk));
1749 goto csum_error;
1750 }
1751 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1752 case TCP_TW_SYN: {
1753 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1754 &tcp_hashinfo,
1755 iph->saddr, th->source,
1756 iph->daddr, th->dest,
1757 inet_iif(skb));
1758 if (sk2) {
1759 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1760 inet_twsk_put(inet_twsk(sk));
1761 sk = sk2;
1762 goto process;
1763 }
1764 /* Fall through to ACK */
1765 }
1766 case TCP_TW_ACK:
1767 tcp_v4_timewait_ack(sk, skb);
1768 break;
1769 case TCP_TW_RST:
1770 goto no_tcp_socket;
1771 case TCP_TW_SUCCESS:;
1772 }
1773 goto discard_it;
1774 }
1775
1776 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1777 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1778 .twsk_unique = tcp_twsk_unique,
1779 .twsk_destructor= tcp_twsk_destructor,
1780 };
1781
1782 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1783 {
1784 struct dst_entry *dst = skb_dst(skb);
1785
1786 if (dst) {
1787 dst_hold(dst);
1788 sk->sk_rx_dst = dst;
1789 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1790 }
1791 }
1792 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1793
1794 const struct inet_connection_sock_af_ops ipv4_specific = {
1795 .queue_xmit = ip_queue_xmit,
1796 .send_check = tcp_v4_send_check,
1797 .rebuild_header = inet_sk_rebuild_header,
1798 .sk_rx_dst_set = inet_sk_rx_dst_set,
1799 .conn_request = tcp_v4_conn_request,
1800 .syn_recv_sock = tcp_v4_syn_recv_sock,
1801 .net_header_len = sizeof(struct iphdr),
1802 .setsockopt = ip_setsockopt,
1803 .getsockopt = ip_getsockopt,
1804 .addr2sockaddr = inet_csk_addr2sockaddr,
1805 .sockaddr_len = sizeof(struct sockaddr_in),
1806 .bind_conflict = inet_csk_bind_conflict,
1807 #ifdef CONFIG_COMPAT
1808 .compat_setsockopt = compat_ip_setsockopt,
1809 .compat_getsockopt = compat_ip_getsockopt,
1810 #endif
1811 .mtu_reduced = tcp_v4_mtu_reduced,
1812 };
1813 EXPORT_SYMBOL(ipv4_specific);
1814
1815 #ifdef CONFIG_TCP_MD5SIG
1816 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1817 .md5_lookup = tcp_v4_md5_lookup,
1818 .calc_md5_hash = tcp_v4_md5_hash_skb,
1819 .md5_parse = tcp_v4_parse_md5_keys,
1820 };
1821 #endif
1822
1823 /* NOTE: A lot of things set to zero explicitly by call to
1824 * sk_alloc() so need not be done here.
1825 */
1826 static int tcp_v4_init_sock(struct sock *sk)
1827 {
1828 struct inet_connection_sock *icsk = inet_csk(sk);
1829
1830 tcp_init_sock(sk);
1831
1832 icsk->icsk_af_ops = &ipv4_specific;
1833
1834 #ifdef CONFIG_TCP_MD5SIG
1835 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1836 #endif
1837
1838 return 0;
1839 }
1840
1841 void tcp_v4_destroy_sock(struct sock *sk)
1842 {
1843 struct tcp_sock *tp = tcp_sk(sk);
1844
1845 tcp_clear_xmit_timers(sk);
1846
1847 tcp_cleanup_congestion_control(sk);
1848
1849 /* Cleanup up the write buffer. */
1850 tcp_write_queue_purge(sk);
1851
1852 /* Cleans up our, hopefully empty, out_of_order_queue. */
1853 __skb_queue_purge(&tp->out_of_order_queue);
1854
1855 #ifdef CONFIG_TCP_MD5SIG
1856 /* Clean up the MD5 key list, if any */
1857 if (tp->md5sig_info) {
1858 tcp_clear_md5_list(sk);
1859 kfree_rcu(tp->md5sig_info, rcu);
1860 tp->md5sig_info = NULL;
1861 }
1862 #endif
1863
1864 #ifdef CONFIG_NET_DMA
1865 /* Cleans up our sk_async_wait_queue */
1866 __skb_queue_purge(&sk->sk_async_wait_queue);
1867 #endif
1868
1869 /* Clean prequeue, it must be empty really */
1870 __skb_queue_purge(&tp->ucopy.prequeue);
1871
1872 /* Clean up a referenced TCP bind bucket. */
1873 if (inet_csk(sk)->icsk_bind_hash)
1874 inet_put_port(sk);
1875
1876 BUG_ON(tp->fastopen_rsk != NULL);
1877
1878 /* If socket is aborted during connect operation */
1879 tcp_free_fastopen_req(tp);
1880
1881 sk_sockets_allocated_dec(sk);
1882 sock_release_memcg(sk);
1883 }
1884 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1885
1886 #ifdef CONFIG_PROC_FS
1887 /* Proc filesystem TCP sock list dumping. */
1888
1889 /*
1890 * Get next listener socket follow cur. If cur is NULL, get first socket
1891 * starting from bucket given in st->bucket; when st->bucket is zero the
1892 * very first socket in the hash table is returned.
1893 */
1894 static void *listening_get_next(struct seq_file *seq, void *cur)
1895 {
1896 struct inet_connection_sock *icsk;
1897 struct hlist_nulls_node *node;
1898 struct sock *sk = cur;
1899 struct inet_listen_hashbucket *ilb;
1900 struct tcp_iter_state *st = seq->private;
1901 struct net *net = seq_file_net(seq);
1902
1903 if (!sk) {
1904 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1905 spin_lock_bh(&ilb->lock);
1906 sk = sk_nulls_head(&ilb->head);
1907 st->offset = 0;
1908 goto get_sk;
1909 }
1910 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1911 ++st->num;
1912 ++st->offset;
1913
1914 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1915 struct request_sock *req = cur;
1916
1917 icsk = inet_csk(st->syn_wait_sk);
1918 req = req->dl_next;
1919 while (1) {
1920 while (req) {
1921 if (req->rsk_ops->family == st->family) {
1922 cur = req;
1923 goto out;
1924 }
1925 req = req->dl_next;
1926 }
1927 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1928 break;
1929 get_req:
1930 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1931 }
1932 sk = sk_nulls_next(st->syn_wait_sk);
1933 st->state = TCP_SEQ_STATE_LISTENING;
1934 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1935 } else {
1936 icsk = inet_csk(sk);
1937 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1938 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1939 goto start_req;
1940 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1941 sk = sk_nulls_next(sk);
1942 }
1943 get_sk:
1944 sk_nulls_for_each_from(sk, node) {
1945 if (!net_eq(sock_net(sk), net))
1946 continue;
1947 if (sk->sk_family == st->family) {
1948 cur = sk;
1949 goto out;
1950 }
1951 icsk = inet_csk(sk);
1952 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1953 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1954 start_req:
1955 st->uid = sock_i_uid(sk);
1956 st->syn_wait_sk = sk;
1957 st->state = TCP_SEQ_STATE_OPENREQ;
1958 st->sbucket = 0;
1959 goto get_req;
1960 }
1961 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1962 }
1963 spin_unlock_bh(&ilb->lock);
1964 st->offset = 0;
1965 if (++st->bucket < INET_LHTABLE_SIZE) {
1966 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1967 spin_lock_bh(&ilb->lock);
1968 sk = sk_nulls_head(&ilb->head);
1969 goto get_sk;
1970 }
1971 cur = NULL;
1972 out:
1973 return cur;
1974 }
1975
1976 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1977 {
1978 struct tcp_iter_state *st = seq->private;
1979 void *rc;
1980
1981 st->bucket = 0;
1982 st->offset = 0;
1983 rc = listening_get_next(seq, NULL);
1984
1985 while (rc && *pos) {
1986 rc = listening_get_next(seq, rc);
1987 --*pos;
1988 }
1989 return rc;
1990 }
1991
1992 static inline bool empty_bucket(const struct tcp_iter_state *st)
1993 {
1994 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1995 }
1996
1997 /*
1998 * Get first established socket starting from bucket given in st->bucket.
1999 * If st->bucket is zero, the very first socket in the hash is returned.
2000 */
2001 static void *established_get_first(struct seq_file *seq)
2002 {
2003 struct tcp_iter_state *st = seq->private;
2004 struct net *net = seq_file_net(seq);
2005 void *rc = NULL;
2006
2007 st->offset = 0;
2008 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2009 struct sock *sk;
2010 struct hlist_nulls_node *node;
2011 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2012
2013 /* Lockless fast path for the common case of empty buckets */
2014 if (empty_bucket(st))
2015 continue;
2016
2017 spin_lock_bh(lock);
2018 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2019 if (sk->sk_family != st->family ||
2020 !net_eq(sock_net(sk), net)) {
2021 continue;
2022 }
2023 rc = sk;
2024 goto out;
2025 }
2026 spin_unlock_bh(lock);
2027 }
2028 out:
2029 return rc;
2030 }
2031
2032 static void *established_get_next(struct seq_file *seq, void *cur)
2033 {
2034 struct sock *sk = cur;
2035 struct hlist_nulls_node *node;
2036 struct tcp_iter_state *st = seq->private;
2037 struct net *net = seq_file_net(seq);
2038
2039 ++st->num;
2040 ++st->offset;
2041
2042 sk = sk_nulls_next(sk);
2043
2044 sk_nulls_for_each_from(sk, node) {
2045 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2046 return sk;
2047 }
2048
2049 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2050 ++st->bucket;
2051 return established_get_first(seq);
2052 }
2053
2054 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2055 {
2056 struct tcp_iter_state *st = seq->private;
2057 void *rc;
2058
2059 st->bucket = 0;
2060 rc = established_get_first(seq);
2061
2062 while (rc && pos) {
2063 rc = established_get_next(seq, rc);
2064 --pos;
2065 }
2066 return rc;
2067 }
2068
2069 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2070 {
2071 void *rc;
2072 struct tcp_iter_state *st = seq->private;
2073
2074 st->state = TCP_SEQ_STATE_LISTENING;
2075 rc = listening_get_idx(seq, &pos);
2076
2077 if (!rc) {
2078 st->state = TCP_SEQ_STATE_ESTABLISHED;
2079 rc = established_get_idx(seq, pos);
2080 }
2081
2082 return rc;
2083 }
2084
2085 static void *tcp_seek_last_pos(struct seq_file *seq)
2086 {
2087 struct tcp_iter_state *st = seq->private;
2088 int offset = st->offset;
2089 int orig_num = st->num;
2090 void *rc = NULL;
2091
2092 switch (st->state) {
2093 case TCP_SEQ_STATE_OPENREQ:
2094 case TCP_SEQ_STATE_LISTENING:
2095 if (st->bucket >= INET_LHTABLE_SIZE)
2096 break;
2097 st->state = TCP_SEQ_STATE_LISTENING;
2098 rc = listening_get_next(seq, NULL);
2099 while (offset-- && rc)
2100 rc = listening_get_next(seq, rc);
2101 if (rc)
2102 break;
2103 st->bucket = 0;
2104 st->state = TCP_SEQ_STATE_ESTABLISHED;
2105 /* Fallthrough */
2106 case TCP_SEQ_STATE_ESTABLISHED:
2107 if (st->bucket > tcp_hashinfo.ehash_mask)
2108 break;
2109 rc = established_get_first(seq);
2110 while (offset-- && rc)
2111 rc = established_get_next(seq, rc);
2112 }
2113
2114 st->num = orig_num;
2115
2116 return rc;
2117 }
2118
2119 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2120 {
2121 struct tcp_iter_state *st = seq->private;
2122 void *rc;
2123
2124 if (*pos && *pos == st->last_pos) {
2125 rc = tcp_seek_last_pos(seq);
2126 if (rc)
2127 goto out;
2128 }
2129
2130 st->state = TCP_SEQ_STATE_LISTENING;
2131 st->num = 0;
2132 st->bucket = 0;
2133 st->offset = 0;
2134 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2135
2136 out:
2137 st->last_pos = *pos;
2138 return rc;
2139 }
2140
2141 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2142 {
2143 struct tcp_iter_state *st = seq->private;
2144 void *rc = NULL;
2145
2146 if (v == SEQ_START_TOKEN) {
2147 rc = tcp_get_idx(seq, 0);
2148 goto out;
2149 }
2150
2151 switch (st->state) {
2152 case TCP_SEQ_STATE_OPENREQ:
2153 case TCP_SEQ_STATE_LISTENING:
2154 rc = listening_get_next(seq, v);
2155 if (!rc) {
2156 st->state = TCP_SEQ_STATE_ESTABLISHED;
2157 st->bucket = 0;
2158 st->offset = 0;
2159 rc = established_get_first(seq);
2160 }
2161 break;
2162 case TCP_SEQ_STATE_ESTABLISHED:
2163 rc = established_get_next(seq, v);
2164 break;
2165 }
2166 out:
2167 ++*pos;
2168 st->last_pos = *pos;
2169 return rc;
2170 }
2171
2172 static void tcp_seq_stop(struct seq_file *seq, void *v)
2173 {
2174 struct tcp_iter_state *st = seq->private;
2175
2176 switch (st->state) {
2177 case TCP_SEQ_STATE_OPENREQ:
2178 if (v) {
2179 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2180 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2181 }
2182 case TCP_SEQ_STATE_LISTENING:
2183 if (v != SEQ_START_TOKEN)
2184 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2185 break;
2186 case TCP_SEQ_STATE_ESTABLISHED:
2187 if (v)
2188 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2189 break;
2190 }
2191 }
2192
2193 int tcp_seq_open(struct inode *inode, struct file *file)
2194 {
2195 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2196 struct tcp_iter_state *s;
2197 int err;
2198
2199 err = seq_open_net(inode, file, &afinfo->seq_ops,
2200 sizeof(struct tcp_iter_state));
2201 if (err < 0)
2202 return err;
2203
2204 s = ((struct seq_file *)file->private_data)->private;
2205 s->family = afinfo->family;
2206 s->last_pos = 0;
2207 return 0;
2208 }
2209 EXPORT_SYMBOL(tcp_seq_open);
2210
2211 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2212 {
2213 int rc = 0;
2214 struct proc_dir_entry *p;
2215
2216 afinfo->seq_ops.start = tcp_seq_start;
2217 afinfo->seq_ops.next = tcp_seq_next;
2218 afinfo->seq_ops.stop = tcp_seq_stop;
2219
2220 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2221 afinfo->seq_fops, afinfo);
2222 if (!p)
2223 rc = -ENOMEM;
2224 return rc;
2225 }
2226 EXPORT_SYMBOL(tcp_proc_register);
2227
2228 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2229 {
2230 remove_proc_entry(afinfo->name, net->proc_net);
2231 }
2232 EXPORT_SYMBOL(tcp_proc_unregister);
2233
2234 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2235 struct seq_file *f, int i, kuid_t uid)
2236 {
2237 const struct inet_request_sock *ireq = inet_rsk(req);
2238 long delta = req->expires - jiffies;
2239
2240 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2241 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2242 i,
2243 ireq->ir_loc_addr,
2244 ntohs(inet_sk(sk)->inet_sport),
2245 ireq->ir_rmt_addr,
2246 ntohs(ireq->ir_rmt_port),
2247 TCP_SYN_RECV,
2248 0, 0, /* could print option size, but that is af dependent. */
2249 1, /* timers active (only the expire timer) */
2250 jiffies_delta_to_clock_t(delta),
2251 req->num_timeout,
2252 from_kuid_munged(seq_user_ns(f), uid),
2253 0, /* non standard timer */
2254 0, /* open_requests have no inode */
2255 atomic_read(&sk->sk_refcnt),
2256 req);
2257 }
2258
2259 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2260 {
2261 int timer_active;
2262 unsigned long timer_expires;
2263 const struct tcp_sock *tp = tcp_sk(sk);
2264 const struct inet_connection_sock *icsk = inet_csk(sk);
2265 const struct inet_sock *inet = inet_sk(sk);
2266 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2267 __be32 dest = inet->inet_daddr;
2268 __be32 src = inet->inet_rcv_saddr;
2269 __u16 destp = ntohs(inet->inet_dport);
2270 __u16 srcp = ntohs(inet->inet_sport);
2271 int rx_queue;
2272
2273 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2274 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2275 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2276 timer_active = 1;
2277 timer_expires = icsk->icsk_timeout;
2278 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2279 timer_active = 4;
2280 timer_expires = icsk->icsk_timeout;
2281 } else if (timer_pending(&sk->sk_timer)) {
2282 timer_active = 2;
2283 timer_expires = sk->sk_timer.expires;
2284 } else {
2285 timer_active = 0;
2286 timer_expires = jiffies;
2287 }
2288
2289 if (sk->sk_state == TCP_LISTEN)
2290 rx_queue = sk->sk_ack_backlog;
2291 else
2292 /*
2293 * because we dont lock socket, we might find a transient negative value
2294 */
2295 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2296
2297 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2298 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2299 i, src, srcp, dest, destp, sk->sk_state,
2300 tp->write_seq - tp->snd_una,
2301 rx_queue,
2302 timer_active,
2303 jiffies_delta_to_clock_t(timer_expires - jiffies),
2304 icsk->icsk_retransmits,
2305 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2306 icsk->icsk_probes_out,
2307 sock_i_ino(sk),
2308 atomic_read(&sk->sk_refcnt), sk,
2309 jiffies_to_clock_t(icsk->icsk_rto),
2310 jiffies_to_clock_t(icsk->icsk_ack.ato),
2311 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2312 tp->snd_cwnd,
2313 sk->sk_state == TCP_LISTEN ?
2314 (fastopenq ? fastopenq->max_qlen : 0) :
2315 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2316 }
2317
2318 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2319 struct seq_file *f, int i)
2320 {
2321 __be32 dest, src;
2322 __u16 destp, srcp;
2323 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2324
2325 dest = tw->tw_daddr;
2326 src = tw->tw_rcv_saddr;
2327 destp = ntohs(tw->tw_dport);
2328 srcp = ntohs(tw->tw_sport);
2329
2330 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2331 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2332 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2333 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2334 atomic_read(&tw->tw_refcnt), tw);
2335 }
2336
2337 #define TMPSZ 150
2338
2339 static int tcp4_seq_show(struct seq_file *seq, void *v)
2340 {
2341 struct tcp_iter_state *st;
2342 struct sock *sk = v;
2343
2344 seq_setwidth(seq, TMPSZ - 1);
2345 if (v == SEQ_START_TOKEN) {
2346 seq_puts(seq, " sl local_address rem_address st tx_queue "
2347 "rx_queue tr tm->when retrnsmt uid timeout "
2348 "inode");
2349 goto out;
2350 }
2351 st = seq->private;
2352
2353 switch (st->state) {
2354 case TCP_SEQ_STATE_LISTENING:
2355 case TCP_SEQ_STATE_ESTABLISHED:
2356 if (sk->sk_state == TCP_TIME_WAIT)
2357 get_timewait4_sock(v, seq, st->num);
2358 else
2359 get_tcp4_sock(v, seq, st->num);
2360 break;
2361 case TCP_SEQ_STATE_OPENREQ:
2362 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2363 break;
2364 }
2365 out:
2366 seq_pad(seq, '\n');
2367 return 0;
2368 }
2369
2370 static const struct file_operations tcp_afinfo_seq_fops = {
2371 .owner = THIS_MODULE,
2372 .open = tcp_seq_open,
2373 .read = seq_read,
2374 .llseek = seq_lseek,
2375 .release = seq_release_net
2376 };
2377
2378 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2379 .name = "tcp",
2380 .family = AF_INET,
2381 .seq_fops = &tcp_afinfo_seq_fops,
2382 .seq_ops = {
2383 .show = tcp4_seq_show,
2384 },
2385 };
2386
2387 static int __net_init tcp4_proc_init_net(struct net *net)
2388 {
2389 return tcp_proc_register(net, &tcp4_seq_afinfo);
2390 }
2391
2392 static void __net_exit tcp4_proc_exit_net(struct net *net)
2393 {
2394 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2395 }
2396
2397 static struct pernet_operations tcp4_net_ops = {
2398 .init = tcp4_proc_init_net,
2399 .exit = tcp4_proc_exit_net,
2400 };
2401
2402 int __init tcp4_proc_init(void)
2403 {
2404 return register_pernet_subsys(&tcp4_net_ops);
2405 }
2406
2407 void tcp4_proc_exit(void)
2408 {
2409 unregister_pernet_subsys(&tcp4_net_ops);
2410 }
2411 #endif /* CONFIG_PROC_FS */
2412
2413 struct proto tcp_prot = {
2414 .name = "TCP",
2415 .owner = THIS_MODULE,
2416 .close = tcp_close,
2417 .connect = tcp_v4_connect,
2418 .disconnect = tcp_disconnect,
2419 .accept = inet_csk_accept,
2420 .ioctl = tcp_ioctl,
2421 .init = tcp_v4_init_sock,
2422 .destroy = tcp_v4_destroy_sock,
2423 .shutdown = tcp_shutdown,
2424 .setsockopt = tcp_setsockopt,
2425 .getsockopt = tcp_getsockopt,
2426 .recvmsg = tcp_recvmsg,
2427 .sendmsg = tcp_sendmsg,
2428 .sendpage = tcp_sendpage,
2429 .backlog_rcv = tcp_v4_do_rcv,
2430 .release_cb = tcp_release_cb,
2431 .hash = inet_hash,
2432 .unhash = inet_unhash,
2433 .get_port = inet_csk_get_port,
2434 .enter_memory_pressure = tcp_enter_memory_pressure,
2435 .stream_memory_free = tcp_stream_memory_free,
2436 .sockets_allocated = &tcp_sockets_allocated,
2437 .orphan_count = &tcp_orphan_count,
2438 .memory_allocated = &tcp_memory_allocated,
2439 .memory_pressure = &tcp_memory_pressure,
2440 .sysctl_mem = sysctl_tcp_mem,
2441 .sysctl_wmem = sysctl_tcp_wmem,
2442 .sysctl_rmem = sysctl_tcp_rmem,
2443 .max_header = MAX_TCP_HEADER,
2444 .obj_size = sizeof(struct tcp_sock),
2445 .slab_flags = SLAB_DESTROY_BY_RCU,
2446 .twsk_prot = &tcp_timewait_sock_ops,
2447 .rsk_prot = &tcp_request_sock_ops,
2448 .h.hashinfo = &tcp_hashinfo,
2449 .no_autobind = true,
2450 #ifdef CONFIG_COMPAT
2451 .compat_setsockopt = compat_tcp_setsockopt,
2452 .compat_getsockopt = compat_tcp_getsockopt,
2453 #endif
2454 #ifdef CONFIG_MEMCG_KMEM
2455 .init_cgroup = tcp_init_cgroup,
2456 .destroy_cgroup = tcp_destroy_cgroup,
2457 .proto_cgroup = tcp_proto_cgroup,
2458 #endif
2459 };
2460 EXPORT_SYMBOL(tcp_prot);
2461
2462 static int __net_init tcp_sk_init(struct net *net)
2463 {
2464 net->ipv4.sysctl_tcp_ecn = 2;
2465 return 0;
2466 }
2467
2468 static void __net_exit tcp_sk_exit(struct net *net)
2469 {
2470 }
2471
2472 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2473 {
2474 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2475 }
2476
2477 static struct pernet_operations __net_initdata tcp_sk_ops = {
2478 .init = tcp_sk_init,
2479 .exit = tcp_sk_exit,
2480 .exit_batch = tcp_sk_exit_batch,
2481 };
2482
2483 void __init tcp_v4_init(void)
2484 {
2485 inet_hashinfo_init(&tcp_hashinfo);
2486 if (register_pernet_subsys(&tcp_sk_ops))
2487 panic("Failed to create the TCP control socket.\n");
2488 }
This page took 0.083419 seconds and 5 git commands to generate.