crypto: Move md5_transform to lib/md5.c
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
... / ...
CommitLineData
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53
54#include <linux/bottom_half.h>
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
63#include <linux/slab.h>
64
65#include <net/net_namespace.h>
66#include <net/icmp.h>
67#include <net/inet_hashtables.h>
68#include <net/tcp.h>
69#include <net/transp_v6.h>
70#include <net/ipv6.h>
71#include <net/inet_common.h>
72#include <net/timewait_sock.h>
73#include <net/xfrm.h>
74#include <net/netdma.h>
75
76#include <linux/inet.h>
77#include <linux/ipv6.h>
78#include <linux/stddef.h>
79#include <linux/proc_fs.h>
80#include <linux/seq_file.h>
81
82#include <linux/crypto.h>
83#include <linux/scatterlist.h>
84
85int sysctl_tcp_tw_reuse __read_mostly;
86int sysctl_tcp_low_latency __read_mostly;
87EXPORT_SYMBOL(sysctl_tcp_low_latency);
88
89
90#ifdef CONFIG_TCP_MD5SIG
91static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 __be32 addr);
93static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th);
95#else
96static inline
97struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
98{
99 return NULL;
100}
101#endif
102
103struct inet_hashinfo tcp_hashinfo;
104EXPORT_SYMBOL(tcp_hashinfo);
105
106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
107{
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 ip_hdr(skb)->saddr,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112}
113
114int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
115{
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
118
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
122
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
125 holder.
126
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
129 */
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
135 tp->write_seq = 1;
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 sock_hold(sktw);
139 return 1;
140 }
141
142 return 0;
143}
144EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145
146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
150 struct inet_sock *inet = inet_sk(sk);
151 struct tcp_sock *tp = tcp_sk(sk);
152 __be16 orig_sport, orig_dport;
153 __be32 daddr, nexthop;
154 struct flowi4 *fl4;
155 struct rtable *rt;
156 int err;
157 struct ip_options_rcu *inet_opt;
158
159 if (addr_len < sizeof(struct sockaddr_in))
160 return -EINVAL;
161
162 if (usin->sin_family != AF_INET)
163 return -EAFNOSUPPORT;
164
165 nexthop = daddr = usin->sin_addr.s_addr;
166 inet_opt = rcu_dereference_protected(inet->inet_opt,
167 sock_owned_by_user(sk));
168 if (inet_opt && inet_opt->opt.srr) {
169 if (!daddr)
170 return -EINVAL;
171 nexthop = inet_opt->opt.faddr;
172 }
173
174 orig_sport = inet->inet_sport;
175 orig_dport = usin->sin_port;
176 fl4 = &inet->cork.fl.u.ip4;
177 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
178 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
179 IPPROTO_TCP,
180 orig_sport, orig_dport, sk, true);
181 if (IS_ERR(rt)) {
182 err = PTR_ERR(rt);
183 if (err == -ENETUNREACH)
184 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
185 return err;
186 }
187
188 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 ip_rt_put(rt);
190 return -ENETUNREACH;
191 }
192
193 if (!inet_opt || !inet_opt->opt.srr)
194 daddr = fl4->daddr;
195
196 if (!inet->inet_saddr)
197 inet->inet_saddr = fl4->saddr;
198 inet->inet_rcv_saddr = inet->inet_saddr;
199
200 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
201 /* Reset inherited state */
202 tp->rx_opt.ts_recent = 0;
203 tp->rx_opt.ts_recent_stamp = 0;
204 tp->write_seq = 0;
205 }
206
207 if (tcp_death_row.sysctl_tw_recycle &&
208 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
209 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
210 /*
211 * VJ's idea. We save last timestamp seen from
212 * the destination in peer table, when entering state
213 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
214 * when trying new connection.
215 */
216 if (peer) {
217 inet_peer_refcheck(peer);
218 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
219 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
220 tp->rx_opt.ts_recent = peer->tcp_ts;
221 }
222 }
223 }
224
225 inet->inet_dport = usin->sin_port;
226 inet->inet_daddr = daddr;
227
228 inet_csk(sk)->icsk_ext_hdr_len = 0;
229 if (inet_opt)
230 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
231
232 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
233
234 /* Socket identity is still unknown (sport may be zero).
235 * However we set state to SYN-SENT and not releasing socket
236 * lock select source port, enter ourselves into the hash tables and
237 * complete initialization after this.
238 */
239 tcp_set_state(sk, TCP_SYN_SENT);
240 err = inet_hash_connect(&tcp_death_row, sk);
241 if (err)
242 goto failure;
243
244 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
245 inet->inet_sport, inet->inet_dport, sk);
246 if (IS_ERR(rt)) {
247 err = PTR_ERR(rt);
248 rt = NULL;
249 goto failure;
250 }
251 /* OK, now commit destination to socket. */
252 sk->sk_gso_type = SKB_GSO_TCPV4;
253 sk_setup_caps(sk, &rt->dst);
254
255 if (!tp->write_seq)
256 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
257 inet->inet_daddr,
258 inet->inet_sport,
259 usin->sin_port);
260
261 inet->inet_id = tp->write_seq ^ jiffies;
262
263 err = tcp_connect(sk);
264 rt = NULL;
265 if (err)
266 goto failure;
267
268 return 0;
269
270failure:
271 /*
272 * This unhashes the socket and releases the local port,
273 * if necessary.
274 */
275 tcp_set_state(sk, TCP_CLOSE);
276 ip_rt_put(rt);
277 sk->sk_route_caps = 0;
278 inet->inet_dport = 0;
279 return err;
280}
281EXPORT_SYMBOL(tcp_v4_connect);
282
283/*
284 * This routine does path mtu discovery as defined in RFC1191.
285 */
286static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
287{
288 struct dst_entry *dst;
289 struct inet_sock *inet = inet_sk(sk);
290
291 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
292 * send out by Linux are always <576bytes so they should go through
293 * unfragmented).
294 */
295 if (sk->sk_state == TCP_LISTEN)
296 return;
297
298 /* We don't check in the destentry if pmtu discovery is forbidden
299 * on this route. We just assume that no packet_to_big packets
300 * are send back when pmtu discovery is not active.
301 * There is a small race when the user changes this flag in the
302 * route, but I think that's acceptable.
303 */
304 if ((dst = __sk_dst_check(sk, 0)) == NULL)
305 return;
306
307 dst->ops->update_pmtu(dst, mtu);
308
309 /* Something is about to be wrong... Remember soft error
310 * for the case, if this connection will not able to recover.
311 */
312 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
313 sk->sk_err_soft = EMSGSIZE;
314
315 mtu = dst_mtu(dst);
316
317 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
318 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
319 tcp_sync_mss(sk, mtu);
320
321 /* Resend the TCP packet because it's
322 * clear that the old packet has been
323 * dropped. This is the new "fast" path mtu
324 * discovery.
325 */
326 tcp_simple_retransmit(sk);
327 } /* else let the usual retransmit timer handle it */
328}
329
330/*
331 * This routine is called by the ICMP module when it gets some
332 * sort of error condition. If err < 0 then the socket should
333 * be closed and the error returned to the user. If err > 0
334 * it's just the icmp type << 8 | icmp code. After adjustment
335 * header points to the first 8 bytes of the tcp header. We need
336 * to find the appropriate port.
337 *
338 * The locking strategy used here is very "optimistic". When
339 * someone else accesses the socket the ICMP is just dropped
340 * and for some paths there is no check at all.
341 * A more general error queue to queue errors for later handling
342 * is probably better.
343 *
344 */
345
346void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
347{
348 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
349 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
350 struct inet_connection_sock *icsk;
351 struct tcp_sock *tp;
352 struct inet_sock *inet;
353 const int type = icmp_hdr(icmp_skb)->type;
354 const int code = icmp_hdr(icmp_skb)->code;
355 struct sock *sk;
356 struct sk_buff *skb;
357 __u32 seq;
358 __u32 remaining;
359 int err;
360 struct net *net = dev_net(icmp_skb->dev);
361
362 if (icmp_skb->len < (iph->ihl << 2) + 8) {
363 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
364 return;
365 }
366
367 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
368 iph->saddr, th->source, inet_iif(icmp_skb));
369 if (!sk) {
370 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
371 return;
372 }
373 if (sk->sk_state == TCP_TIME_WAIT) {
374 inet_twsk_put(inet_twsk(sk));
375 return;
376 }
377
378 bh_lock_sock(sk);
379 /* If too many ICMPs get dropped on busy
380 * servers this needs to be solved differently.
381 */
382 if (sock_owned_by_user(sk))
383 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
384
385 if (sk->sk_state == TCP_CLOSE)
386 goto out;
387
388 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
389 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
390 goto out;
391 }
392
393 icsk = inet_csk(sk);
394 tp = tcp_sk(sk);
395 seq = ntohl(th->seq);
396 if (sk->sk_state != TCP_LISTEN &&
397 !between(seq, tp->snd_una, tp->snd_nxt)) {
398 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
399 goto out;
400 }
401
402 switch (type) {
403 case ICMP_SOURCE_QUENCH:
404 /* Just silently ignore these. */
405 goto out;
406 case ICMP_PARAMETERPROB:
407 err = EPROTO;
408 break;
409 case ICMP_DEST_UNREACH:
410 if (code > NR_ICMP_UNREACH)
411 goto out;
412
413 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
414 if (!sock_owned_by_user(sk))
415 do_pmtu_discovery(sk, iph, info);
416 goto out;
417 }
418
419 err = icmp_err_convert[code].errno;
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
423 break;
424 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
425 !icsk->icsk_backoff)
426 break;
427
428 if (sock_owned_by_user(sk))
429 break;
430
431 icsk->icsk_backoff--;
432 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
433 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
434 tcp_bound_rto(sk);
435
436 skb = tcp_write_queue_head(sk);
437 BUG_ON(!skb);
438
439 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
440 tcp_time_stamp - TCP_SKB_CB(skb)->when);
441
442 if (remaining) {
443 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
444 remaining, TCP_RTO_MAX);
445 } else {
446 /* RTO revert clocked out retransmission.
447 * Will retransmit now */
448 tcp_retransmit_timer(sk);
449 }
450
451 break;
452 case ICMP_TIME_EXCEEDED:
453 err = EHOSTUNREACH;
454 break;
455 default:
456 goto out;
457 }
458
459 switch (sk->sk_state) {
460 struct request_sock *req, **prev;
461 case TCP_LISTEN:
462 if (sock_owned_by_user(sk))
463 goto out;
464
465 req = inet_csk_search_req(sk, &prev, th->dest,
466 iph->daddr, iph->saddr);
467 if (!req)
468 goto out;
469
470 /* ICMPs are not backlogged, hence we cannot get
471 an established socket here.
472 */
473 WARN_ON(req->sk);
474
475 if (seq != tcp_rsk(req)->snt_isn) {
476 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
477 goto out;
478 }
479
480 /*
481 * Still in SYN_RECV, just remove it silently.
482 * There is no good way to pass the error to the newly
483 * created socket, and POSIX does not want network
484 * errors returned from accept().
485 */
486 inet_csk_reqsk_queue_drop(sk, req, prev);
487 goto out;
488
489 case TCP_SYN_SENT:
490 case TCP_SYN_RECV: /* Cannot happen.
491 It can f.e. if SYNs crossed.
492 */
493 if (!sock_owned_by_user(sk)) {
494 sk->sk_err = err;
495
496 sk->sk_error_report(sk);
497
498 tcp_done(sk);
499 } else {
500 sk->sk_err_soft = err;
501 }
502 goto out;
503 }
504
505 /* If we've already connected we will keep trying
506 * until we time out, or the user gives up.
507 *
508 * rfc1122 4.2.3.9 allows to consider as hard errors
509 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
510 * but it is obsoleted by pmtu discovery).
511 *
512 * Note, that in modern internet, where routing is unreliable
513 * and in each dark corner broken firewalls sit, sending random
514 * errors ordered by their masters even this two messages finally lose
515 * their original sense (even Linux sends invalid PORT_UNREACHs)
516 *
517 * Now we are in compliance with RFCs.
518 * --ANK (980905)
519 */
520
521 inet = inet_sk(sk);
522 if (!sock_owned_by_user(sk) && inet->recverr) {
523 sk->sk_err = err;
524 sk->sk_error_report(sk);
525 } else { /* Only an error on timeout */
526 sk->sk_err_soft = err;
527 }
528
529out:
530 bh_unlock_sock(sk);
531 sock_put(sk);
532}
533
534static void __tcp_v4_send_check(struct sk_buff *skb,
535 __be32 saddr, __be32 daddr)
536{
537 struct tcphdr *th = tcp_hdr(skb);
538
539 if (skb->ip_summed == CHECKSUM_PARTIAL) {
540 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
541 skb->csum_start = skb_transport_header(skb) - skb->head;
542 skb->csum_offset = offsetof(struct tcphdr, check);
543 } else {
544 th->check = tcp_v4_check(skb->len, saddr, daddr,
545 csum_partial(th,
546 th->doff << 2,
547 skb->csum));
548 }
549}
550
551/* This routine computes an IPv4 TCP checksum. */
552void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
553{
554 struct inet_sock *inet = inet_sk(sk);
555
556 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
557}
558EXPORT_SYMBOL(tcp_v4_send_check);
559
560int tcp_v4_gso_send_check(struct sk_buff *skb)
561{
562 const struct iphdr *iph;
563 struct tcphdr *th;
564
565 if (!pskb_may_pull(skb, sizeof(*th)))
566 return -EINVAL;
567
568 iph = ip_hdr(skb);
569 th = tcp_hdr(skb);
570
571 th->check = 0;
572 skb->ip_summed = CHECKSUM_PARTIAL;
573 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
574 return 0;
575}
576
577/*
578 * This routine will send an RST to the other tcp.
579 *
580 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
581 * for reset.
582 * Answer: if a packet caused RST, it is not for a socket
583 * existing in our system, if it is matched to a socket,
584 * it is just duplicate segment or bug in other side's TCP.
585 * So that we build reply only basing on parameters
586 * arrived with segment.
587 * Exception: precedence violation. We do not implement it in any case.
588 */
589
590static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
591{
592 struct tcphdr *th = tcp_hdr(skb);
593 struct {
594 struct tcphdr th;
595#ifdef CONFIG_TCP_MD5SIG
596 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
597#endif
598 } rep;
599 struct ip_reply_arg arg;
600#ifdef CONFIG_TCP_MD5SIG
601 struct tcp_md5sig_key *key;
602#endif
603 struct net *net;
604
605 /* Never send a reset in response to a reset. */
606 if (th->rst)
607 return;
608
609 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
610 return;
611
612 /* Swap the send and the receive. */
613 memset(&rep, 0, sizeof(rep));
614 rep.th.dest = th->source;
615 rep.th.source = th->dest;
616 rep.th.doff = sizeof(struct tcphdr) / 4;
617 rep.th.rst = 1;
618
619 if (th->ack) {
620 rep.th.seq = th->ack_seq;
621 } else {
622 rep.th.ack = 1;
623 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
624 skb->len - (th->doff << 2));
625 }
626
627 memset(&arg, 0, sizeof(arg));
628 arg.iov[0].iov_base = (unsigned char *)&rep;
629 arg.iov[0].iov_len = sizeof(rep.th);
630
631#ifdef CONFIG_TCP_MD5SIG
632 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
633 if (key) {
634 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
635 (TCPOPT_NOP << 16) |
636 (TCPOPT_MD5SIG << 8) |
637 TCPOLEN_MD5SIG);
638 /* Update length and the length the header thinks exists */
639 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
640 rep.th.doff = arg.iov[0].iov_len / 4;
641
642 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
643 key, ip_hdr(skb)->saddr,
644 ip_hdr(skb)->daddr, &rep.th);
645 }
646#endif
647 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
648 ip_hdr(skb)->saddr, /* XXX */
649 arg.iov[0].iov_len, IPPROTO_TCP, 0);
650 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
652
653 net = dev_net(skb_dst(skb)->dev);
654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
655 &arg, arg.iov[0].iov_len);
656
657 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
658 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
659}
660
661/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
662 outside socket context is ugly, certainly. What can I do?
663 */
664
665static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
666 u32 win, u32 ts, int oif,
667 struct tcp_md5sig_key *key,
668 int reply_flags)
669{
670 struct tcphdr *th = tcp_hdr(skb);
671 struct {
672 struct tcphdr th;
673 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
674#ifdef CONFIG_TCP_MD5SIG
675 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
676#endif
677 ];
678 } rep;
679 struct ip_reply_arg arg;
680 struct net *net = dev_net(skb_dst(skb)->dev);
681
682 memset(&rep.th, 0, sizeof(struct tcphdr));
683 memset(&arg, 0, sizeof(arg));
684
685 arg.iov[0].iov_base = (unsigned char *)&rep;
686 arg.iov[0].iov_len = sizeof(rep.th);
687 if (ts) {
688 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
689 (TCPOPT_TIMESTAMP << 8) |
690 TCPOLEN_TIMESTAMP);
691 rep.opt[1] = htonl(tcp_time_stamp);
692 rep.opt[2] = htonl(ts);
693 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
694 }
695
696 /* Swap the send and the receive. */
697 rep.th.dest = th->source;
698 rep.th.source = th->dest;
699 rep.th.doff = arg.iov[0].iov_len / 4;
700 rep.th.seq = htonl(seq);
701 rep.th.ack_seq = htonl(ack);
702 rep.th.ack = 1;
703 rep.th.window = htons(win);
704
705#ifdef CONFIG_TCP_MD5SIG
706 if (key) {
707 int offset = (ts) ? 3 : 0;
708
709 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
710 (TCPOPT_NOP << 16) |
711 (TCPOPT_MD5SIG << 8) |
712 TCPOLEN_MD5SIG);
713 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
714 rep.th.doff = arg.iov[0].iov_len/4;
715
716 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
717 key, ip_hdr(skb)->saddr,
718 ip_hdr(skb)->daddr, &rep.th);
719 }
720#endif
721 arg.flags = reply_flags;
722 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
723 ip_hdr(skb)->saddr, /* XXX */
724 arg.iov[0].iov_len, IPPROTO_TCP, 0);
725 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
726 if (oif)
727 arg.bound_dev_if = oif;
728
729 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
730 &arg, arg.iov[0].iov_len);
731
732 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
733}
734
735static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
736{
737 struct inet_timewait_sock *tw = inet_twsk(sk);
738 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
739
740 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
741 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
742 tcptw->tw_ts_recent,
743 tw->tw_bound_dev_if,
744 tcp_twsk_md5_key(tcptw),
745 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
746 );
747
748 inet_twsk_put(tw);
749}
750
751static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
752 struct request_sock *req)
753{
754 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
755 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
756 req->ts_recent,
757 0,
758 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
759 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
760}
761
762/*
763 * Send a SYN-ACK after having received a SYN.
764 * This still operates on a request_sock only, not on a big
765 * socket.
766 */
767static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
768 struct request_sock *req,
769 struct request_values *rvp)
770{
771 const struct inet_request_sock *ireq = inet_rsk(req);
772 struct flowi4 fl4;
773 int err = -1;
774 struct sk_buff * skb;
775
776 /* First, grab a route. */
777 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
778 return -1;
779
780 skb = tcp_make_synack(sk, dst, req, rvp);
781
782 if (skb) {
783 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
784
785 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
786 ireq->rmt_addr,
787 ireq->opt);
788 err = net_xmit_eval(err);
789 }
790
791 dst_release(dst);
792 return err;
793}
794
795static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
796 struct request_values *rvp)
797{
798 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
799 return tcp_v4_send_synack(sk, NULL, req, rvp);
800}
801
802/*
803 * IPv4 request_sock destructor.
804 */
805static void tcp_v4_reqsk_destructor(struct request_sock *req)
806{
807 kfree(inet_rsk(req)->opt);
808}
809
810static void syn_flood_warning(const struct sk_buff *skb)
811{
812 const char *msg;
813
814#ifdef CONFIG_SYN_COOKIES
815 if (sysctl_tcp_syncookies)
816 msg = "Sending cookies";
817 else
818#endif
819 msg = "Dropping request";
820
821 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
822 ntohs(tcp_hdr(skb)->dest), msg);
823}
824
825/*
826 * Save and compile IPv4 options into the request_sock if needed.
827 */
828static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
829 struct sk_buff *skb)
830{
831 const struct ip_options *opt = &(IPCB(skb)->opt);
832 struct ip_options_rcu *dopt = NULL;
833
834 if (opt && opt->optlen) {
835 int opt_size = sizeof(*dopt) + opt->optlen;
836
837 dopt = kmalloc(opt_size, GFP_ATOMIC);
838 if (dopt) {
839 if (ip_options_echo(&dopt->opt, skb)) {
840 kfree(dopt);
841 dopt = NULL;
842 }
843 }
844 }
845 return dopt;
846}
847
848#ifdef CONFIG_TCP_MD5SIG
849/*
850 * RFC2385 MD5 checksumming requires a mapping of
851 * IP address->MD5 Key.
852 * We need to maintain these in the sk structure.
853 */
854
855/* Find the Key structure for an address. */
856static struct tcp_md5sig_key *
857 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
858{
859 struct tcp_sock *tp = tcp_sk(sk);
860 int i;
861
862 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
863 return NULL;
864 for (i = 0; i < tp->md5sig_info->entries4; i++) {
865 if (tp->md5sig_info->keys4[i].addr == addr)
866 return &tp->md5sig_info->keys4[i].base;
867 }
868 return NULL;
869}
870
871struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
872 struct sock *addr_sk)
873{
874 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
875}
876EXPORT_SYMBOL(tcp_v4_md5_lookup);
877
878static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
879 struct request_sock *req)
880{
881 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
882}
883
884/* This can be called on a newly created socket, from other files */
885int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
886 u8 *newkey, u8 newkeylen)
887{
888 /* Add Key to the list */
889 struct tcp_md5sig_key *key;
890 struct tcp_sock *tp = tcp_sk(sk);
891 struct tcp4_md5sig_key *keys;
892
893 key = tcp_v4_md5_do_lookup(sk, addr);
894 if (key) {
895 /* Pre-existing entry - just update that one. */
896 kfree(key->key);
897 key->key = newkey;
898 key->keylen = newkeylen;
899 } else {
900 struct tcp_md5sig_info *md5sig;
901
902 if (!tp->md5sig_info) {
903 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
904 GFP_ATOMIC);
905 if (!tp->md5sig_info) {
906 kfree(newkey);
907 return -ENOMEM;
908 }
909 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
910 }
911 if (tcp_alloc_md5sig_pool(sk) == NULL) {
912 kfree(newkey);
913 return -ENOMEM;
914 }
915 md5sig = tp->md5sig_info;
916
917 if (md5sig->alloced4 == md5sig->entries4) {
918 keys = kmalloc((sizeof(*keys) *
919 (md5sig->entries4 + 1)), GFP_ATOMIC);
920 if (!keys) {
921 kfree(newkey);
922 tcp_free_md5sig_pool();
923 return -ENOMEM;
924 }
925
926 if (md5sig->entries4)
927 memcpy(keys, md5sig->keys4,
928 sizeof(*keys) * md5sig->entries4);
929
930 /* Free old key list, and reference new one */
931 kfree(md5sig->keys4);
932 md5sig->keys4 = keys;
933 md5sig->alloced4++;
934 }
935 md5sig->entries4++;
936 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
937 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
938 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
939 }
940 return 0;
941}
942EXPORT_SYMBOL(tcp_v4_md5_do_add);
943
944static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
945 u8 *newkey, u8 newkeylen)
946{
947 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
948 newkey, newkeylen);
949}
950
951int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
952{
953 struct tcp_sock *tp = tcp_sk(sk);
954 int i;
955
956 for (i = 0; i < tp->md5sig_info->entries4; i++) {
957 if (tp->md5sig_info->keys4[i].addr == addr) {
958 /* Free the key */
959 kfree(tp->md5sig_info->keys4[i].base.key);
960 tp->md5sig_info->entries4--;
961
962 if (tp->md5sig_info->entries4 == 0) {
963 kfree(tp->md5sig_info->keys4);
964 tp->md5sig_info->keys4 = NULL;
965 tp->md5sig_info->alloced4 = 0;
966 } else if (tp->md5sig_info->entries4 != i) {
967 /* Need to do some manipulation */
968 memmove(&tp->md5sig_info->keys4[i],
969 &tp->md5sig_info->keys4[i+1],
970 (tp->md5sig_info->entries4 - i) *
971 sizeof(struct tcp4_md5sig_key));
972 }
973 tcp_free_md5sig_pool();
974 return 0;
975 }
976 }
977 return -ENOENT;
978}
979EXPORT_SYMBOL(tcp_v4_md5_do_del);
980
981static void tcp_v4_clear_md5_list(struct sock *sk)
982{
983 struct tcp_sock *tp = tcp_sk(sk);
984
985 /* Free each key, then the set of key keys,
986 * the crypto element, and then decrement our
987 * hold on the last resort crypto.
988 */
989 if (tp->md5sig_info->entries4) {
990 int i;
991 for (i = 0; i < tp->md5sig_info->entries4; i++)
992 kfree(tp->md5sig_info->keys4[i].base.key);
993 tp->md5sig_info->entries4 = 0;
994 tcp_free_md5sig_pool();
995 }
996 if (tp->md5sig_info->keys4) {
997 kfree(tp->md5sig_info->keys4);
998 tp->md5sig_info->keys4 = NULL;
999 tp->md5sig_info->alloced4 = 0;
1000 }
1001}
1002
1003static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1004 int optlen)
1005{
1006 struct tcp_md5sig cmd;
1007 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1008 u8 *newkey;
1009
1010 if (optlen < sizeof(cmd))
1011 return -EINVAL;
1012
1013 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1014 return -EFAULT;
1015
1016 if (sin->sin_family != AF_INET)
1017 return -EINVAL;
1018
1019 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1020 if (!tcp_sk(sk)->md5sig_info)
1021 return -ENOENT;
1022 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1023 }
1024
1025 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1026 return -EINVAL;
1027
1028 if (!tcp_sk(sk)->md5sig_info) {
1029 struct tcp_sock *tp = tcp_sk(sk);
1030 struct tcp_md5sig_info *p;
1031
1032 p = kzalloc(sizeof(*p), sk->sk_allocation);
1033 if (!p)
1034 return -EINVAL;
1035
1036 tp->md5sig_info = p;
1037 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1038 }
1039
1040 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1041 if (!newkey)
1042 return -ENOMEM;
1043 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1044 newkey, cmd.tcpm_keylen);
1045}
1046
1047static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1048 __be32 daddr, __be32 saddr, int nbytes)
1049{
1050 struct tcp4_pseudohdr *bp;
1051 struct scatterlist sg;
1052
1053 bp = &hp->md5_blk.ip4;
1054
1055 /*
1056 * 1. the TCP pseudo-header (in the order: source IP address,
1057 * destination IP address, zero-padded protocol number, and
1058 * segment length)
1059 */
1060 bp->saddr = saddr;
1061 bp->daddr = daddr;
1062 bp->pad = 0;
1063 bp->protocol = IPPROTO_TCP;
1064 bp->len = cpu_to_be16(nbytes);
1065
1066 sg_init_one(&sg, bp, sizeof(*bp));
1067 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1068}
1069
1070static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1071 __be32 daddr, __be32 saddr, struct tcphdr *th)
1072{
1073 struct tcp_md5sig_pool *hp;
1074 struct hash_desc *desc;
1075
1076 hp = tcp_get_md5sig_pool();
1077 if (!hp)
1078 goto clear_hash_noput;
1079 desc = &hp->md5_desc;
1080
1081 if (crypto_hash_init(desc))
1082 goto clear_hash;
1083 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1084 goto clear_hash;
1085 if (tcp_md5_hash_header(hp, th))
1086 goto clear_hash;
1087 if (tcp_md5_hash_key(hp, key))
1088 goto clear_hash;
1089 if (crypto_hash_final(desc, md5_hash))
1090 goto clear_hash;
1091
1092 tcp_put_md5sig_pool();
1093 return 0;
1094
1095clear_hash:
1096 tcp_put_md5sig_pool();
1097clear_hash_noput:
1098 memset(md5_hash, 0, 16);
1099 return 1;
1100}
1101
1102int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1103 struct sock *sk, struct request_sock *req,
1104 struct sk_buff *skb)
1105{
1106 struct tcp_md5sig_pool *hp;
1107 struct hash_desc *desc;
1108 struct tcphdr *th = tcp_hdr(skb);
1109 __be32 saddr, daddr;
1110
1111 if (sk) {
1112 saddr = inet_sk(sk)->inet_saddr;
1113 daddr = inet_sk(sk)->inet_daddr;
1114 } else if (req) {
1115 saddr = inet_rsk(req)->loc_addr;
1116 daddr = inet_rsk(req)->rmt_addr;
1117 } else {
1118 const struct iphdr *iph = ip_hdr(skb);
1119 saddr = iph->saddr;
1120 daddr = iph->daddr;
1121 }
1122
1123 hp = tcp_get_md5sig_pool();
1124 if (!hp)
1125 goto clear_hash_noput;
1126 desc = &hp->md5_desc;
1127
1128 if (crypto_hash_init(desc))
1129 goto clear_hash;
1130
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1132 goto clear_hash;
1133 if (tcp_md5_hash_header(hp, th))
1134 goto clear_hash;
1135 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1136 goto clear_hash;
1137 if (tcp_md5_hash_key(hp, key))
1138 goto clear_hash;
1139 if (crypto_hash_final(desc, md5_hash))
1140 goto clear_hash;
1141
1142 tcp_put_md5sig_pool();
1143 return 0;
1144
1145clear_hash:
1146 tcp_put_md5sig_pool();
1147clear_hash_noput:
1148 memset(md5_hash, 0, 16);
1149 return 1;
1150}
1151EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1152
1153static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1154{
1155 /*
1156 * This gets called for each TCP segment that arrives
1157 * so we want to be efficient.
1158 * We have 3 drop cases:
1159 * o No MD5 hash and one expected.
1160 * o MD5 hash and we're not expecting one.
1161 * o MD5 hash and its wrong.
1162 */
1163 __u8 *hash_location = NULL;
1164 struct tcp_md5sig_key *hash_expected;
1165 const struct iphdr *iph = ip_hdr(skb);
1166 struct tcphdr *th = tcp_hdr(skb);
1167 int genhash;
1168 unsigned char newhash[16];
1169
1170 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1171 hash_location = tcp_parse_md5sig_option(th);
1172
1173 /* We've parsed the options - do we have a hash? */
1174 if (!hash_expected && !hash_location)
1175 return 0;
1176
1177 if (hash_expected && !hash_location) {
1178 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1179 return 1;
1180 }
1181
1182 if (!hash_expected && hash_location) {
1183 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1184 return 1;
1185 }
1186
1187 /* Okay, so this is hash_expected and hash_location -
1188 * so we need to calculate the checksum.
1189 */
1190 genhash = tcp_v4_md5_hash_skb(newhash,
1191 hash_expected,
1192 NULL, NULL, skb);
1193
1194 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1195 if (net_ratelimit()) {
1196 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1197 &iph->saddr, ntohs(th->source),
1198 &iph->daddr, ntohs(th->dest),
1199 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1200 }
1201 return 1;
1202 }
1203 return 0;
1204}
1205
1206#endif
1207
1208struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1209 .family = PF_INET,
1210 .obj_size = sizeof(struct tcp_request_sock),
1211 .rtx_syn_ack = tcp_v4_rtx_synack,
1212 .send_ack = tcp_v4_reqsk_send_ack,
1213 .destructor = tcp_v4_reqsk_destructor,
1214 .send_reset = tcp_v4_send_reset,
1215 .syn_ack_timeout = tcp_syn_ack_timeout,
1216};
1217
1218#ifdef CONFIG_TCP_MD5SIG
1219static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1220 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1221 .calc_md5_hash = tcp_v4_md5_hash_skb,
1222};
1223#endif
1224
1225int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1226{
1227 struct tcp_extend_values tmp_ext;
1228 struct tcp_options_received tmp_opt;
1229 u8 *hash_location;
1230 struct request_sock *req;
1231 struct inet_request_sock *ireq;
1232 struct tcp_sock *tp = tcp_sk(sk);
1233 struct dst_entry *dst = NULL;
1234 __be32 saddr = ip_hdr(skb)->saddr;
1235 __be32 daddr = ip_hdr(skb)->daddr;
1236 __u32 isn = TCP_SKB_CB(skb)->when;
1237#ifdef CONFIG_SYN_COOKIES
1238 int want_cookie = 0;
1239#else
1240#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1241#endif
1242
1243 /* Never answer to SYNs send to broadcast or multicast */
1244 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1245 goto drop;
1246
1247 /* TW buckets are converted to open requests without
1248 * limitations, they conserve resources and peer is
1249 * evidently real one.
1250 */
1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1252 if (net_ratelimit())
1253 syn_flood_warning(skb);
1254#ifdef CONFIG_SYN_COOKIES
1255 if (sysctl_tcp_syncookies) {
1256 want_cookie = 1;
1257 } else
1258#endif
1259 goto drop;
1260 }
1261
1262 /* Accept backlog is full. If we have already queued enough
1263 * of warm entries in syn queue, drop request. It is better than
1264 * clogging syn queue with openreqs with exponentially increasing
1265 * timeout.
1266 */
1267 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1268 goto drop;
1269
1270 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1271 if (!req)
1272 goto drop;
1273
1274#ifdef CONFIG_TCP_MD5SIG
1275 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1276#endif
1277
1278 tcp_clear_options(&tmp_opt);
1279 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1280 tmp_opt.user_mss = tp->rx_opt.user_mss;
1281 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1282
1283 if (tmp_opt.cookie_plus > 0 &&
1284 tmp_opt.saw_tstamp &&
1285 !tp->rx_opt.cookie_out_never &&
1286 (sysctl_tcp_cookie_size > 0 ||
1287 (tp->cookie_values != NULL &&
1288 tp->cookie_values->cookie_desired > 0))) {
1289 u8 *c;
1290 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1291 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1292
1293 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1294 goto drop_and_release;
1295
1296 /* Secret recipe starts with IP addresses */
1297 *mess++ ^= (__force u32)daddr;
1298 *mess++ ^= (__force u32)saddr;
1299
1300 /* plus variable length Initiator Cookie */
1301 c = (u8 *)mess;
1302 while (l-- > 0)
1303 *c++ ^= *hash_location++;
1304
1305#ifdef CONFIG_SYN_COOKIES
1306 want_cookie = 0; /* not our kind of cookie */
1307#endif
1308 tmp_ext.cookie_out_never = 0; /* false */
1309 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1310 } else if (!tp->rx_opt.cookie_in_always) {
1311 /* redundant indications, but ensure initialization. */
1312 tmp_ext.cookie_out_never = 1; /* true */
1313 tmp_ext.cookie_plus = 0;
1314 } else {
1315 goto drop_and_release;
1316 }
1317 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1318
1319 if (want_cookie && !tmp_opt.saw_tstamp)
1320 tcp_clear_options(&tmp_opt);
1321
1322 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1323 tcp_openreq_init(req, &tmp_opt, skb);
1324
1325 ireq = inet_rsk(req);
1326 ireq->loc_addr = daddr;
1327 ireq->rmt_addr = saddr;
1328 ireq->no_srccheck = inet_sk(sk)->transparent;
1329 ireq->opt = tcp_v4_save_options(sk, skb);
1330
1331 if (security_inet_conn_request(sk, skb, req))
1332 goto drop_and_free;
1333
1334 if (!want_cookie || tmp_opt.tstamp_ok)
1335 TCP_ECN_create_request(req, tcp_hdr(skb));
1336
1337 if (want_cookie) {
1338 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1339 req->cookie_ts = tmp_opt.tstamp_ok;
1340 } else if (!isn) {
1341 struct inet_peer *peer = NULL;
1342 struct flowi4 fl4;
1343
1344 /* VJ's idea. We save last timestamp seen
1345 * from the destination in peer table, when entering
1346 * state TIME-WAIT, and check against it before
1347 * accepting new connection request.
1348 *
1349 * If "isn" is not zero, this request hit alive
1350 * timewait bucket, so that all the necessary checks
1351 * are made in the function processing timewait state.
1352 */
1353 if (tmp_opt.saw_tstamp &&
1354 tcp_death_row.sysctl_tw_recycle &&
1355 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1356 fl4.daddr == saddr &&
1357 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1358 inet_peer_refcheck(peer);
1359 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1360 (s32)(peer->tcp_ts - req->ts_recent) >
1361 TCP_PAWS_WINDOW) {
1362 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1363 goto drop_and_release;
1364 }
1365 }
1366 /* Kill the following clause, if you dislike this way. */
1367 else if (!sysctl_tcp_syncookies &&
1368 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1369 (sysctl_max_syn_backlog >> 2)) &&
1370 (!peer || !peer->tcp_ts_stamp) &&
1371 (!dst || !dst_metric(dst, RTAX_RTT))) {
1372 /* Without syncookies last quarter of
1373 * backlog is filled with destinations,
1374 * proven to be alive.
1375 * It means that we continue to communicate
1376 * to destinations, already remembered
1377 * to the moment of synflood.
1378 */
1379 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1380 &saddr, ntohs(tcp_hdr(skb)->source));
1381 goto drop_and_release;
1382 }
1383
1384 isn = tcp_v4_init_sequence(skb);
1385 }
1386 tcp_rsk(req)->snt_isn = isn;
1387 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1388
1389 if (tcp_v4_send_synack(sk, dst, req,
1390 (struct request_values *)&tmp_ext) ||
1391 want_cookie)
1392 goto drop_and_free;
1393
1394 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1395 return 0;
1396
1397drop_and_release:
1398 dst_release(dst);
1399drop_and_free:
1400 reqsk_free(req);
1401drop:
1402 return 0;
1403}
1404EXPORT_SYMBOL(tcp_v4_conn_request);
1405
1406
1407/*
1408 * The three way handshake has completed - we got a valid synack -
1409 * now create the new socket.
1410 */
1411struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1412 struct request_sock *req,
1413 struct dst_entry *dst)
1414{
1415 struct inet_request_sock *ireq;
1416 struct inet_sock *newinet;
1417 struct tcp_sock *newtp;
1418 struct sock *newsk;
1419#ifdef CONFIG_TCP_MD5SIG
1420 struct tcp_md5sig_key *key;
1421#endif
1422 struct ip_options_rcu *inet_opt;
1423
1424 if (sk_acceptq_is_full(sk))
1425 goto exit_overflow;
1426
1427 newsk = tcp_create_openreq_child(sk, req, skb);
1428 if (!newsk)
1429 goto exit_nonewsk;
1430
1431 newsk->sk_gso_type = SKB_GSO_TCPV4;
1432
1433 newtp = tcp_sk(newsk);
1434 newinet = inet_sk(newsk);
1435 ireq = inet_rsk(req);
1436 newinet->inet_daddr = ireq->rmt_addr;
1437 newinet->inet_rcv_saddr = ireq->loc_addr;
1438 newinet->inet_saddr = ireq->loc_addr;
1439 inet_opt = ireq->opt;
1440 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1441 ireq->opt = NULL;
1442 newinet->mc_index = inet_iif(skb);
1443 newinet->mc_ttl = ip_hdr(skb)->ttl;
1444 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1445 if (inet_opt)
1446 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1447 newinet->inet_id = newtp->write_seq ^ jiffies;
1448
1449 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1450 goto put_and_exit;
1451
1452 sk_setup_caps(newsk, dst);
1453
1454 tcp_mtup_init(newsk);
1455 tcp_sync_mss(newsk, dst_mtu(dst));
1456 newtp->advmss = dst_metric_advmss(dst);
1457 if (tcp_sk(sk)->rx_opt.user_mss &&
1458 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1459 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1460
1461 tcp_initialize_rcv_mss(newsk);
1462 if (tcp_rsk(req)->snt_synack)
1463 tcp_valid_rtt_meas(newsk,
1464 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1465 newtp->total_retrans = req->retrans;
1466
1467#ifdef CONFIG_TCP_MD5SIG
1468 /* Copy over the MD5 key from the original socket */
1469 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1470 if (key != NULL) {
1471 /*
1472 * We're using one, so create a matching key
1473 * on the newsk structure. If we fail to get
1474 * memory, then we end up not copying the key
1475 * across. Shucks.
1476 */
1477 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1478 if (newkey != NULL)
1479 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1480 newkey, key->keylen);
1481 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1482 }
1483#endif
1484
1485 if (__inet_inherit_port(sk, newsk) < 0)
1486 goto put_and_exit;
1487 __inet_hash_nolisten(newsk, NULL);
1488
1489 return newsk;
1490
1491exit_overflow:
1492 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1493exit_nonewsk:
1494 dst_release(dst);
1495exit:
1496 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1497 return NULL;
1498put_and_exit:
1499 sock_put(newsk);
1500 goto exit;
1501}
1502EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1503
1504static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1505{
1506 struct tcphdr *th = tcp_hdr(skb);
1507 const struct iphdr *iph = ip_hdr(skb);
1508 struct sock *nsk;
1509 struct request_sock **prev;
1510 /* Find possible connection requests. */
1511 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1512 iph->saddr, iph->daddr);
1513 if (req)
1514 return tcp_check_req(sk, skb, req, prev);
1515
1516 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1517 th->source, iph->daddr, th->dest, inet_iif(skb));
1518
1519 if (nsk) {
1520 if (nsk->sk_state != TCP_TIME_WAIT) {
1521 bh_lock_sock(nsk);
1522 return nsk;
1523 }
1524 inet_twsk_put(inet_twsk(nsk));
1525 return NULL;
1526 }
1527
1528#ifdef CONFIG_SYN_COOKIES
1529 if (!th->syn)
1530 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1531#endif
1532 return sk;
1533}
1534
1535static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1536{
1537 const struct iphdr *iph = ip_hdr(skb);
1538
1539 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1540 if (!tcp_v4_check(skb->len, iph->saddr,
1541 iph->daddr, skb->csum)) {
1542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1543 return 0;
1544 }
1545 }
1546
1547 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1548 skb->len, IPPROTO_TCP, 0);
1549
1550 if (skb->len <= 76) {
1551 return __skb_checksum_complete(skb);
1552 }
1553 return 0;
1554}
1555
1556
1557/* The socket must have it's spinlock held when we get
1558 * here.
1559 *
1560 * We have a potential double-lock case here, so even when
1561 * doing backlog processing we use the BH locking scheme.
1562 * This is because we cannot sleep with the original spinlock
1563 * held.
1564 */
1565int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1566{
1567 struct sock *rsk;
1568#ifdef CONFIG_TCP_MD5SIG
1569 /*
1570 * We really want to reject the packet as early as possible
1571 * if:
1572 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1573 * o There is an MD5 option and we're not expecting one
1574 */
1575 if (tcp_v4_inbound_md5_hash(sk, skb))
1576 goto discard;
1577#endif
1578
1579 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1580 sock_rps_save_rxhash(sk, skb->rxhash);
1581 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1582 rsk = sk;
1583 goto reset;
1584 }
1585 return 0;
1586 }
1587
1588 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1589 goto csum_err;
1590
1591 if (sk->sk_state == TCP_LISTEN) {
1592 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1593 if (!nsk)
1594 goto discard;
1595
1596 if (nsk != sk) {
1597 sock_rps_save_rxhash(nsk, skb->rxhash);
1598 if (tcp_child_process(sk, nsk, skb)) {
1599 rsk = nsk;
1600 goto reset;
1601 }
1602 return 0;
1603 }
1604 } else
1605 sock_rps_save_rxhash(sk, skb->rxhash);
1606
1607 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1608 rsk = sk;
1609 goto reset;
1610 }
1611 return 0;
1612
1613reset:
1614 tcp_v4_send_reset(rsk, skb);
1615discard:
1616 kfree_skb(skb);
1617 /* Be careful here. If this function gets more complicated and
1618 * gcc suffers from register pressure on the x86, sk (in %ebx)
1619 * might be destroyed here. This current version compiles correctly,
1620 * but you have been warned.
1621 */
1622 return 0;
1623
1624csum_err:
1625 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1626 goto discard;
1627}
1628EXPORT_SYMBOL(tcp_v4_do_rcv);
1629
1630/*
1631 * From tcp_input.c
1632 */
1633
1634int tcp_v4_rcv(struct sk_buff *skb)
1635{
1636 const struct iphdr *iph;
1637 struct tcphdr *th;
1638 struct sock *sk;
1639 int ret;
1640 struct net *net = dev_net(skb->dev);
1641
1642 if (skb->pkt_type != PACKET_HOST)
1643 goto discard_it;
1644
1645 /* Count it even if it's bad */
1646 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1647
1648 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1649 goto discard_it;
1650
1651 th = tcp_hdr(skb);
1652
1653 if (th->doff < sizeof(struct tcphdr) / 4)
1654 goto bad_packet;
1655 if (!pskb_may_pull(skb, th->doff * 4))
1656 goto discard_it;
1657
1658 /* An explanation is required here, I think.
1659 * Packet length and doff are validated by header prediction,
1660 * provided case of th->doff==0 is eliminated.
1661 * So, we defer the checks. */
1662 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1663 goto bad_packet;
1664
1665 th = tcp_hdr(skb);
1666 iph = ip_hdr(skb);
1667 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1668 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1669 skb->len - th->doff * 4);
1670 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1671 TCP_SKB_CB(skb)->when = 0;
1672 TCP_SKB_CB(skb)->flags = iph->tos;
1673 TCP_SKB_CB(skb)->sacked = 0;
1674
1675 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1676 if (!sk)
1677 goto no_tcp_socket;
1678
1679process:
1680 if (sk->sk_state == TCP_TIME_WAIT)
1681 goto do_time_wait;
1682
1683 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1684 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1685 goto discard_and_relse;
1686 }
1687
1688 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1689 goto discard_and_relse;
1690 nf_reset(skb);
1691
1692 if (sk_filter(sk, skb))
1693 goto discard_and_relse;
1694
1695 skb->dev = NULL;
1696
1697 bh_lock_sock_nested(sk);
1698 ret = 0;
1699 if (!sock_owned_by_user(sk)) {
1700#ifdef CONFIG_NET_DMA
1701 struct tcp_sock *tp = tcp_sk(sk);
1702 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1703 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1704 if (tp->ucopy.dma_chan)
1705 ret = tcp_v4_do_rcv(sk, skb);
1706 else
1707#endif
1708 {
1709 if (!tcp_prequeue(sk, skb))
1710 ret = tcp_v4_do_rcv(sk, skb);
1711 }
1712 } else if (unlikely(sk_add_backlog(sk, skb))) {
1713 bh_unlock_sock(sk);
1714 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1715 goto discard_and_relse;
1716 }
1717 bh_unlock_sock(sk);
1718
1719 sock_put(sk);
1720
1721 return ret;
1722
1723no_tcp_socket:
1724 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1725 goto discard_it;
1726
1727 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1728bad_packet:
1729 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1730 } else {
1731 tcp_v4_send_reset(NULL, skb);
1732 }
1733
1734discard_it:
1735 /* Discard frame. */
1736 kfree_skb(skb);
1737 return 0;
1738
1739discard_and_relse:
1740 sock_put(sk);
1741 goto discard_it;
1742
1743do_time_wait:
1744 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1745 inet_twsk_put(inet_twsk(sk));
1746 goto discard_it;
1747 }
1748
1749 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1750 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1751 inet_twsk_put(inet_twsk(sk));
1752 goto discard_it;
1753 }
1754 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1755 case TCP_TW_SYN: {
1756 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1757 &tcp_hashinfo,
1758 iph->daddr, th->dest,
1759 inet_iif(skb));
1760 if (sk2) {
1761 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1762 inet_twsk_put(inet_twsk(sk));
1763 sk = sk2;
1764 goto process;
1765 }
1766 /* Fall through to ACK */
1767 }
1768 case TCP_TW_ACK:
1769 tcp_v4_timewait_ack(sk, skb);
1770 break;
1771 case TCP_TW_RST:
1772 goto no_tcp_socket;
1773 case TCP_TW_SUCCESS:;
1774 }
1775 goto discard_it;
1776}
1777
1778struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1779{
1780 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1781 struct inet_sock *inet = inet_sk(sk);
1782 struct inet_peer *peer;
1783
1784 if (!rt ||
1785 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1786 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1787 *release_it = true;
1788 } else {
1789 if (!rt->peer)
1790 rt_bind_peer(rt, inet->inet_daddr, 1);
1791 peer = rt->peer;
1792 *release_it = false;
1793 }
1794
1795 return peer;
1796}
1797EXPORT_SYMBOL(tcp_v4_get_peer);
1798
1799void *tcp_v4_tw_get_peer(struct sock *sk)
1800{
1801 struct inet_timewait_sock *tw = inet_twsk(sk);
1802
1803 return inet_getpeer_v4(tw->tw_daddr, 1);
1804}
1805EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1806
1807static struct timewait_sock_ops tcp_timewait_sock_ops = {
1808 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1809 .twsk_unique = tcp_twsk_unique,
1810 .twsk_destructor= tcp_twsk_destructor,
1811 .twsk_getpeer = tcp_v4_tw_get_peer,
1812};
1813
1814const struct inet_connection_sock_af_ops ipv4_specific = {
1815 .queue_xmit = ip_queue_xmit,
1816 .send_check = tcp_v4_send_check,
1817 .rebuild_header = inet_sk_rebuild_header,
1818 .conn_request = tcp_v4_conn_request,
1819 .syn_recv_sock = tcp_v4_syn_recv_sock,
1820 .get_peer = tcp_v4_get_peer,
1821 .net_header_len = sizeof(struct iphdr),
1822 .setsockopt = ip_setsockopt,
1823 .getsockopt = ip_getsockopt,
1824 .addr2sockaddr = inet_csk_addr2sockaddr,
1825 .sockaddr_len = sizeof(struct sockaddr_in),
1826 .bind_conflict = inet_csk_bind_conflict,
1827#ifdef CONFIG_COMPAT
1828 .compat_setsockopt = compat_ip_setsockopt,
1829 .compat_getsockopt = compat_ip_getsockopt,
1830#endif
1831};
1832EXPORT_SYMBOL(ipv4_specific);
1833
1834#ifdef CONFIG_TCP_MD5SIG
1835static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1836 .md5_lookup = tcp_v4_md5_lookup,
1837 .calc_md5_hash = tcp_v4_md5_hash_skb,
1838 .md5_add = tcp_v4_md5_add_func,
1839 .md5_parse = tcp_v4_parse_md5_keys,
1840};
1841#endif
1842
1843/* NOTE: A lot of things set to zero explicitly by call to
1844 * sk_alloc() so need not be done here.
1845 */
1846static int tcp_v4_init_sock(struct sock *sk)
1847{
1848 struct inet_connection_sock *icsk = inet_csk(sk);
1849 struct tcp_sock *tp = tcp_sk(sk);
1850
1851 skb_queue_head_init(&tp->out_of_order_queue);
1852 tcp_init_xmit_timers(sk);
1853 tcp_prequeue_init(tp);
1854
1855 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1856 tp->mdev = TCP_TIMEOUT_INIT;
1857
1858 /* So many TCP implementations out there (incorrectly) count the
1859 * initial SYN frame in their delayed-ACK and congestion control
1860 * algorithms that we must have the following bandaid to talk
1861 * efficiently to them. -DaveM
1862 */
1863 tp->snd_cwnd = TCP_INIT_CWND;
1864
1865 /* See draft-stevens-tcpca-spec-01 for discussion of the
1866 * initialization of these values.
1867 */
1868 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1869 tp->snd_cwnd_clamp = ~0;
1870 tp->mss_cache = TCP_MSS_DEFAULT;
1871
1872 tp->reordering = sysctl_tcp_reordering;
1873 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1874
1875 sk->sk_state = TCP_CLOSE;
1876
1877 sk->sk_write_space = sk_stream_write_space;
1878 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1879
1880 icsk->icsk_af_ops = &ipv4_specific;
1881 icsk->icsk_sync_mss = tcp_sync_mss;
1882#ifdef CONFIG_TCP_MD5SIG
1883 tp->af_specific = &tcp_sock_ipv4_specific;
1884#endif
1885
1886 /* TCP Cookie Transactions */
1887 if (sysctl_tcp_cookie_size > 0) {
1888 /* Default, cookies without s_data_payload. */
1889 tp->cookie_values =
1890 kzalloc(sizeof(*tp->cookie_values),
1891 sk->sk_allocation);
1892 if (tp->cookie_values != NULL)
1893 kref_init(&tp->cookie_values->kref);
1894 }
1895 /* Presumed zeroed, in order of appearance:
1896 * cookie_in_always, cookie_out_never,
1897 * s_data_constant, s_data_in, s_data_out
1898 */
1899 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1900 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1901
1902 local_bh_disable();
1903 percpu_counter_inc(&tcp_sockets_allocated);
1904 local_bh_enable();
1905
1906 return 0;
1907}
1908
1909void tcp_v4_destroy_sock(struct sock *sk)
1910{
1911 struct tcp_sock *tp = tcp_sk(sk);
1912
1913 tcp_clear_xmit_timers(sk);
1914
1915 tcp_cleanup_congestion_control(sk);
1916
1917 /* Cleanup up the write buffer. */
1918 tcp_write_queue_purge(sk);
1919
1920 /* Cleans up our, hopefully empty, out_of_order_queue. */
1921 __skb_queue_purge(&tp->out_of_order_queue);
1922
1923#ifdef CONFIG_TCP_MD5SIG
1924 /* Clean up the MD5 key list, if any */
1925 if (tp->md5sig_info) {
1926 tcp_v4_clear_md5_list(sk);
1927 kfree(tp->md5sig_info);
1928 tp->md5sig_info = NULL;
1929 }
1930#endif
1931
1932#ifdef CONFIG_NET_DMA
1933 /* Cleans up our sk_async_wait_queue */
1934 __skb_queue_purge(&sk->sk_async_wait_queue);
1935#endif
1936
1937 /* Clean prequeue, it must be empty really */
1938 __skb_queue_purge(&tp->ucopy.prequeue);
1939
1940 /* Clean up a referenced TCP bind bucket. */
1941 if (inet_csk(sk)->icsk_bind_hash)
1942 inet_put_port(sk);
1943
1944 /*
1945 * If sendmsg cached page exists, toss it.
1946 */
1947 if (sk->sk_sndmsg_page) {
1948 __free_page(sk->sk_sndmsg_page);
1949 sk->sk_sndmsg_page = NULL;
1950 }
1951
1952 /* TCP Cookie Transactions */
1953 if (tp->cookie_values != NULL) {
1954 kref_put(&tp->cookie_values->kref,
1955 tcp_cookie_values_release);
1956 tp->cookie_values = NULL;
1957 }
1958
1959 percpu_counter_dec(&tcp_sockets_allocated);
1960}
1961EXPORT_SYMBOL(tcp_v4_destroy_sock);
1962
1963#ifdef CONFIG_PROC_FS
1964/* Proc filesystem TCP sock list dumping. */
1965
1966static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1967{
1968 return hlist_nulls_empty(head) ? NULL :
1969 list_entry(head->first, struct inet_timewait_sock, tw_node);
1970}
1971
1972static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1973{
1974 return !is_a_nulls(tw->tw_node.next) ?
1975 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1976}
1977
1978/*
1979 * Get next listener socket follow cur. If cur is NULL, get first socket
1980 * starting from bucket given in st->bucket; when st->bucket is zero the
1981 * very first socket in the hash table is returned.
1982 */
1983static void *listening_get_next(struct seq_file *seq, void *cur)
1984{
1985 struct inet_connection_sock *icsk;
1986 struct hlist_nulls_node *node;
1987 struct sock *sk = cur;
1988 struct inet_listen_hashbucket *ilb;
1989 struct tcp_iter_state *st = seq->private;
1990 struct net *net = seq_file_net(seq);
1991
1992 if (!sk) {
1993 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1994 spin_lock_bh(&ilb->lock);
1995 sk = sk_nulls_head(&ilb->head);
1996 st->offset = 0;
1997 goto get_sk;
1998 }
1999 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2000 ++st->num;
2001 ++st->offset;
2002
2003 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2004 struct request_sock *req = cur;
2005
2006 icsk = inet_csk(st->syn_wait_sk);
2007 req = req->dl_next;
2008 while (1) {
2009 while (req) {
2010 if (req->rsk_ops->family == st->family) {
2011 cur = req;
2012 goto out;
2013 }
2014 req = req->dl_next;
2015 }
2016 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2017 break;
2018get_req:
2019 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2020 }
2021 sk = sk_nulls_next(st->syn_wait_sk);
2022 st->state = TCP_SEQ_STATE_LISTENING;
2023 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2024 } else {
2025 icsk = inet_csk(sk);
2026 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2027 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2028 goto start_req;
2029 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2030 sk = sk_nulls_next(sk);
2031 }
2032get_sk:
2033 sk_nulls_for_each_from(sk, node) {
2034 if (!net_eq(sock_net(sk), net))
2035 continue;
2036 if (sk->sk_family == st->family) {
2037 cur = sk;
2038 goto out;
2039 }
2040 icsk = inet_csk(sk);
2041 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2043start_req:
2044 st->uid = sock_i_uid(sk);
2045 st->syn_wait_sk = sk;
2046 st->state = TCP_SEQ_STATE_OPENREQ;
2047 st->sbucket = 0;
2048 goto get_req;
2049 }
2050 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2051 }
2052 spin_unlock_bh(&ilb->lock);
2053 st->offset = 0;
2054 if (++st->bucket < INET_LHTABLE_SIZE) {
2055 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2056 spin_lock_bh(&ilb->lock);
2057 sk = sk_nulls_head(&ilb->head);
2058 goto get_sk;
2059 }
2060 cur = NULL;
2061out:
2062 return cur;
2063}
2064
2065static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2066{
2067 struct tcp_iter_state *st = seq->private;
2068 void *rc;
2069
2070 st->bucket = 0;
2071 st->offset = 0;
2072 rc = listening_get_next(seq, NULL);
2073
2074 while (rc && *pos) {
2075 rc = listening_get_next(seq, rc);
2076 --*pos;
2077 }
2078 return rc;
2079}
2080
2081static inline int empty_bucket(struct tcp_iter_state *st)
2082{
2083 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2084 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2085}
2086
2087/*
2088 * Get first established socket starting from bucket given in st->bucket.
2089 * If st->bucket is zero, the very first socket in the hash is returned.
2090 */
2091static void *established_get_first(struct seq_file *seq)
2092{
2093 struct tcp_iter_state *st = seq->private;
2094 struct net *net = seq_file_net(seq);
2095 void *rc = NULL;
2096
2097 st->offset = 0;
2098 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2099 struct sock *sk;
2100 struct hlist_nulls_node *node;
2101 struct inet_timewait_sock *tw;
2102 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2103
2104 /* Lockless fast path for the common case of empty buckets */
2105 if (empty_bucket(st))
2106 continue;
2107
2108 spin_lock_bh(lock);
2109 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2110 if (sk->sk_family != st->family ||
2111 !net_eq(sock_net(sk), net)) {
2112 continue;
2113 }
2114 rc = sk;
2115 goto out;
2116 }
2117 st->state = TCP_SEQ_STATE_TIME_WAIT;
2118 inet_twsk_for_each(tw, node,
2119 &tcp_hashinfo.ehash[st->bucket].twchain) {
2120 if (tw->tw_family != st->family ||
2121 !net_eq(twsk_net(tw), net)) {
2122 continue;
2123 }
2124 rc = tw;
2125 goto out;
2126 }
2127 spin_unlock_bh(lock);
2128 st->state = TCP_SEQ_STATE_ESTABLISHED;
2129 }
2130out:
2131 return rc;
2132}
2133
2134static void *established_get_next(struct seq_file *seq, void *cur)
2135{
2136 struct sock *sk = cur;
2137 struct inet_timewait_sock *tw;
2138 struct hlist_nulls_node *node;
2139 struct tcp_iter_state *st = seq->private;
2140 struct net *net = seq_file_net(seq);
2141
2142 ++st->num;
2143 ++st->offset;
2144
2145 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2146 tw = cur;
2147 tw = tw_next(tw);
2148get_tw:
2149 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2150 tw = tw_next(tw);
2151 }
2152 if (tw) {
2153 cur = tw;
2154 goto out;
2155 }
2156 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2157 st->state = TCP_SEQ_STATE_ESTABLISHED;
2158
2159 /* Look for next non empty bucket */
2160 st->offset = 0;
2161 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2162 empty_bucket(st))
2163 ;
2164 if (st->bucket > tcp_hashinfo.ehash_mask)
2165 return NULL;
2166
2167 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2168 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2169 } else
2170 sk = sk_nulls_next(sk);
2171
2172 sk_nulls_for_each_from(sk, node) {
2173 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2174 goto found;
2175 }
2176
2177 st->state = TCP_SEQ_STATE_TIME_WAIT;
2178 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2179 goto get_tw;
2180found:
2181 cur = sk;
2182out:
2183 return cur;
2184}
2185
2186static void *established_get_idx(struct seq_file *seq, loff_t pos)
2187{
2188 struct tcp_iter_state *st = seq->private;
2189 void *rc;
2190
2191 st->bucket = 0;
2192 rc = established_get_first(seq);
2193
2194 while (rc && pos) {
2195 rc = established_get_next(seq, rc);
2196 --pos;
2197 }
2198 return rc;
2199}
2200
2201static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2202{
2203 void *rc;
2204 struct tcp_iter_state *st = seq->private;
2205
2206 st->state = TCP_SEQ_STATE_LISTENING;
2207 rc = listening_get_idx(seq, &pos);
2208
2209 if (!rc) {
2210 st->state = TCP_SEQ_STATE_ESTABLISHED;
2211 rc = established_get_idx(seq, pos);
2212 }
2213
2214 return rc;
2215}
2216
2217static void *tcp_seek_last_pos(struct seq_file *seq)
2218{
2219 struct tcp_iter_state *st = seq->private;
2220 int offset = st->offset;
2221 int orig_num = st->num;
2222 void *rc = NULL;
2223
2224 switch (st->state) {
2225 case TCP_SEQ_STATE_OPENREQ:
2226 case TCP_SEQ_STATE_LISTENING:
2227 if (st->bucket >= INET_LHTABLE_SIZE)
2228 break;
2229 st->state = TCP_SEQ_STATE_LISTENING;
2230 rc = listening_get_next(seq, NULL);
2231 while (offset-- && rc)
2232 rc = listening_get_next(seq, rc);
2233 if (rc)
2234 break;
2235 st->bucket = 0;
2236 /* Fallthrough */
2237 case TCP_SEQ_STATE_ESTABLISHED:
2238 case TCP_SEQ_STATE_TIME_WAIT:
2239 st->state = TCP_SEQ_STATE_ESTABLISHED;
2240 if (st->bucket > tcp_hashinfo.ehash_mask)
2241 break;
2242 rc = established_get_first(seq);
2243 while (offset-- && rc)
2244 rc = established_get_next(seq, rc);
2245 }
2246
2247 st->num = orig_num;
2248
2249 return rc;
2250}
2251
2252static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2253{
2254 struct tcp_iter_state *st = seq->private;
2255 void *rc;
2256
2257 if (*pos && *pos == st->last_pos) {
2258 rc = tcp_seek_last_pos(seq);
2259 if (rc)
2260 goto out;
2261 }
2262
2263 st->state = TCP_SEQ_STATE_LISTENING;
2264 st->num = 0;
2265 st->bucket = 0;
2266 st->offset = 0;
2267 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2268
2269out:
2270 st->last_pos = *pos;
2271 return rc;
2272}
2273
2274static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2275{
2276 struct tcp_iter_state *st = seq->private;
2277 void *rc = NULL;
2278
2279 if (v == SEQ_START_TOKEN) {
2280 rc = tcp_get_idx(seq, 0);
2281 goto out;
2282 }
2283
2284 switch (st->state) {
2285 case TCP_SEQ_STATE_OPENREQ:
2286 case TCP_SEQ_STATE_LISTENING:
2287 rc = listening_get_next(seq, v);
2288 if (!rc) {
2289 st->state = TCP_SEQ_STATE_ESTABLISHED;
2290 st->bucket = 0;
2291 st->offset = 0;
2292 rc = established_get_first(seq);
2293 }
2294 break;
2295 case TCP_SEQ_STATE_ESTABLISHED:
2296 case TCP_SEQ_STATE_TIME_WAIT:
2297 rc = established_get_next(seq, v);
2298 break;
2299 }
2300out:
2301 ++*pos;
2302 st->last_pos = *pos;
2303 return rc;
2304}
2305
2306static void tcp_seq_stop(struct seq_file *seq, void *v)
2307{
2308 struct tcp_iter_state *st = seq->private;
2309
2310 switch (st->state) {
2311 case TCP_SEQ_STATE_OPENREQ:
2312 if (v) {
2313 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2314 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2315 }
2316 case TCP_SEQ_STATE_LISTENING:
2317 if (v != SEQ_START_TOKEN)
2318 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2319 break;
2320 case TCP_SEQ_STATE_TIME_WAIT:
2321 case TCP_SEQ_STATE_ESTABLISHED:
2322 if (v)
2323 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2324 break;
2325 }
2326}
2327
2328static int tcp_seq_open(struct inode *inode, struct file *file)
2329{
2330 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2331 struct tcp_iter_state *s;
2332 int err;
2333
2334 err = seq_open_net(inode, file, &afinfo->seq_ops,
2335 sizeof(struct tcp_iter_state));
2336 if (err < 0)
2337 return err;
2338
2339 s = ((struct seq_file *)file->private_data)->private;
2340 s->family = afinfo->family;
2341 s->last_pos = 0;
2342 return 0;
2343}
2344
2345int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2346{
2347 int rc = 0;
2348 struct proc_dir_entry *p;
2349
2350 afinfo->seq_fops.open = tcp_seq_open;
2351 afinfo->seq_fops.read = seq_read;
2352 afinfo->seq_fops.llseek = seq_lseek;
2353 afinfo->seq_fops.release = seq_release_net;
2354
2355 afinfo->seq_ops.start = tcp_seq_start;
2356 afinfo->seq_ops.next = tcp_seq_next;
2357 afinfo->seq_ops.stop = tcp_seq_stop;
2358
2359 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2360 &afinfo->seq_fops, afinfo);
2361 if (!p)
2362 rc = -ENOMEM;
2363 return rc;
2364}
2365EXPORT_SYMBOL(tcp_proc_register);
2366
2367void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2368{
2369 proc_net_remove(net, afinfo->name);
2370}
2371EXPORT_SYMBOL(tcp_proc_unregister);
2372
2373static void get_openreq4(struct sock *sk, struct request_sock *req,
2374 struct seq_file *f, int i, int uid, int *len)
2375{
2376 const struct inet_request_sock *ireq = inet_rsk(req);
2377 int ttd = req->expires - jiffies;
2378
2379 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2380 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2381 i,
2382 ireq->loc_addr,
2383 ntohs(inet_sk(sk)->inet_sport),
2384 ireq->rmt_addr,
2385 ntohs(ireq->rmt_port),
2386 TCP_SYN_RECV,
2387 0, 0, /* could print option size, but that is af dependent. */
2388 1, /* timers active (only the expire timer) */
2389 jiffies_to_clock_t(ttd),
2390 req->retrans,
2391 uid,
2392 0, /* non standard timer */
2393 0, /* open_requests have no inode */
2394 atomic_read(&sk->sk_refcnt),
2395 req,
2396 len);
2397}
2398
2399static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2400{
2401 int timer_active;
2402 unsigned long timer_expires;
2403 struct tcp_sock *tp = tcp_sk(sk);
2404 const struct inet_connection_sock *icsk = inet_csk(sk);
2405 struct inet_sock *inet = inet_sk(sk);
2406 __be32 dest = inet->inet_daddr;
2407 __be32 src = inet->inet_rcv_saddr;
2408 __u16 destp = ntohs(inet->inet_dport);
2409 __u16 srcp = ntohs(inet->inet_sport);
2410 int rx_queue;
2411
2412 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2413 timer_active = 1;
2414 timer_expires = icsk->icsk_timeout;
2415 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2416 timer_active = 4;
2417 timer_expires = icsk->icsk_timeout;
2418 } else if (timer_pending(&sk->sk_timer)) {
2419 timer_active = 2;
2420 timer_expires = sk->sk_timer.expires;
2421 } else {
2422 timer_active = 0;
2423 timer_expires = jiffies;
2424 }
2425
2426 if (sk->sk_state == TCP_LISTEN)
2427 rx_queue = sk->sk_ack_backlog;
2428 else
2429 /*
2430 * because we dont lock socket, we might find a transient negative value
2431 */
2432 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2433
2434 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2435 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2436 i, src, srcp, dest, destp, sk->sk_state,
2437 tp->write_seq - tp->snd_una,
2438 rx_queue,
2439 timer_active,
2440 jiffies_to_clock_t(timer_expires - jiffies),
2441 icsk->icsk_retransmits,
2442 sock_i_uid(sk),
2443 icsk->icsk_probes_out,
2444 sock_i_ino(sk),
2445 atomic_read(&sk->sk_refcnt), sk,
2446 jiffies_to_clock_t(icsk->icsk_rto),
2447 jiffies_to_clock_t(icsk->icsk_ack.ato),
2448 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2449 tp->snd_cwnd,
2450 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2451 len);
2452}
2453
2454static void get_timewait4_sock(struct inet_timewait_sock *tw,
2455 struct seq_file *f, int i, int *len)
2456{
2457 __be32 dest, src;
2458 __u16 destp, srcp;
2459 int ttd = tw->tw_ttd - jiffies;
2460
2461 if (ttd < 0)
2462 ttd = 0;
2463
2464 dest = tw->tw_daddr;
2465 src = tw->tw_rcv_saddr;
2466 destp = ntohs(tw->tw_dport);
2467 srcp = ntohs(tw->tw_sport);
2468
2469 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2470 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2471 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2472 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2473 atomic_read(&tw->tw_refcnt), tw, len);
2474}
2475
2476#define TMPSZ 150
2477
2478static int tcp4_seq_show(struct seq_file *seq, void *v)
2479{
2480 struct tcp_iter_state *st;
2481 int len;
2482
2483 if (v == SEQ_START_TOKEN) {
2484 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2485 " sl local_address rem_address st tx_queue "
2486 "rx_queue tr tm->when retrnsmt uid timeout "
2487 "inode");
2488 goto out;
2489 }
2490 st = seq->private;
2491
2492 switch (st->state) {
2493 case TCP_SEQ_STATE_LISTENING:
2494 case TCP_SEQ_STATE_ESTABLISHED:
2495 get_tcp4_sock(v, seq, st->num, &len);
2496 break;
2497 case TCP_SEQ_STATE_OPENREQ:
2498 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2499 break;
2500 case TCP_SEQ_STATE_TIME_WAIT:
2501 get_timewait4_sock(v, seq, st->num, &len);
2502 break;
2503 }
2504 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2505out:
2506 return 0;
2507}
2508
2509static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2510 .name = "tcp",
2511 .family = AF_INET,
2512 .seq_fops = {
2513 .owner = THIS_MODULE,
2514 },
2515 .seq_ops = {
2516 .show = tcp4_seq_show,
2517 },
2518};
2519
2520static int __net_init tcp4_proc_init_net(struct net *net)
2521{
2522 return tcp_proc_register(net, &tcp4_seq_afinfo);
2523}
2524
2525static void __net_exit tcp4_proc_exit_net(struct net *net)
2526{
2527 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2528}
2529
2530static struct pernet_operations tcp4_net_ops = {
2531 .init = tcp4_proc_init_net,
2532 .exit = tcp4_proc_exit_net,
2533};
2534
2535int __init tcp4_proc_init(void)
2536{
2537 return register_pernet_subsys(&tcp4_net_ops);
2538}
2539
2540void tcp4_proc_exit(void)
2541{
2542 unregister_pernet_subsys(&tcp4_net_ops);
2543}
2544#endif /* CONFIG_PROC_FS */
2545
2546struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2547{
2548 const struct iphdr *iph = skb_gro_network_header(skb);
2549
2550 switch (skb->ip_summed) {
2551 case CHECKSUM_COMPLETE:
2552 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2553 skb->csum)) {
2554 skb->ip_summed = CHECKSUM_UNNECESSARY;
2555 break;
2556 }
2557
2558 /* fall through */
2559 case CHECKSUM_NONE:
2560 NAPI_GRO_CB(skb)->flush = 1;
2561 return NULL;
2562 }
2563
2564 return tcp_gro_receive(head, skb);
2565}
2566
2567int tcp4_gro_complete(struct sk_buff *skb)
2568{
2569 const struct iphdr *iph = ip_hdr(skb);
2570 struct tcphdr *th = tcp_hdr(skb);
2571
2572 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2573 iph->saddr, iph->daddr, 0);
2574 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2575
2576 return tcp_gro_complete(skb);
2577}
2578
2579struct proto tcp_prot = {
2580 .name = "TCP",
2581 .owner = THIS_MODULE,
2582 .close = tcp_close,
2583 .connect = tcp_v4_connect,
2584 .disconnect = tcp_disconnect,
2585 .accept = inet_csk_accept,
2586 .ioctl = tcp_ioctl,
2587 .init = tcp_v4_init_sock,
2588 .destroy = tcp_v4_destroy_sock,
2589 .shutdown = tcp_shutdown,
2590 .setsockopt = tcp_setsockopt,
2591 .getsockopt = tcp_getsockopt,
2592 .recvmsg = tcp_recvmsg,
2593 .sendmsg = tcp_sendmsg,
2594 .sendpage = tcp_sendpage,
2595 .backlog_rcv = tcp_v4_do_rcv,
2596 .hash = inet_hash,
2597 .unhash = inet_unhash,
2598 .get_port = inet_csk_get_port,
2599 .enter_memory_pressure = tcp_enter_memory_pressure,
2600 .sockets_allocated = &tcp_sockets_allocated,
2601 .orphan_count = &tcp_orphan_count,
2602 .memory_allocated = &tcp_memory_allocated,
2603 .memory_pressure = &tcp_memory_pressure,
2604 .sysctl_mem = sysctl_tcp_mem,
2605 .sysctl_wmem = sysctl_tcp_wmem,
2606 .sysctl_rmem = sysctl_tcp_rmem,
2607 .max_header = MAX_TCP_HEADER,
2608 .obj_size = sizeof(struct tcp_sock),
2609 .slab_flags = SLAB_DESTROY_BY_RCU,
2610 .twsk_prot = &tcp_timewait_sock_ops,
2611 .rsk_prot = &tcp_request_sock_ops,
2612 .h.hashinfo = &tcp_hashinfo,
2613 .no_autobind = true,
2614#ifdef CONFIG_COMPAT
2615 .compat_setsockopt = compat_tcp_setsockopt,
2616 .compat_getsockopt = compat_tcp_getsockopt,
2617#endif
2618};
2619EXPORT_SYMBOL(tcp_prot);
2620
2621
2622static int __net_init tcp_sk_init(struct net *net)
2623{
2624 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2625 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2626}
2627
2628static void __net_exit tcp_sk_exit(struct net *net)
2629{
2630 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2631}
2632
2633static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2634{
2635 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2636}
2637
2638static struct pernet_operations __net_initdata tcp_sk_ops = {
2639 .init = tcp_sk_init,
2640 .exit = tcp_sk_exit,
2641 .exit_batch = tcp_sk_exit_batch,
2642};
2643
2644void __init tcp_v4_init(void)
2645{
2646 inet_hashinfo_init(&tcp_hashinfo);
2647 if (register_pernet_subsys(&tcp_sk_ops))
2648 panic("Failed to create the TCP control socket.\n");
2649}
This page took 0.19288 seconds and 5 git commands to generate.