crypto: Move md5_transform to lib/md5.c
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
5a0e3ad6 63#include <linux/slab.h>
1da177e4 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4 66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4 73#include <net/xfrm.h>
1a2449a8 74#include <net/netdma.h>
1da177e4
LT
75
76#include <linux/inet.h>
77#include <linux/ipv6.h>
78#include <linux/stddef.h>
79#include <linux/proc_fs.h>
80#include <linux/seq_file.h>
81
cfb6eeb4
YH
82#include <linux/crypto.h>
83#include <linux/scatterlist.h>
84
ab32ea5d
BH
85int sysctl_tcp_tw_reuse __read_mostly;
86int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 87EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 88
1da177e4 89
cfb6eeb4 90#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
91static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 __be32 addr);
49a72dfb
AL
93static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th);
9501f972
YH
95#else
96static inline
97struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
98{
99 return NULL;
100}
cfb6eeb4
YH
101#endif
102
5caea4ea 103struct inet_hashinfo tcp_hashinfo;
4bc2f18b 104EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 105
a94f723d 106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 107{
eddc9ec5
ACM
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 ip_hdr(skb)->saddr,
aa8223c7
ACM
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
1da177e4
LT
112}
113
6d6ee43e
ACM
114int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
115{
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
118
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
122
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
125 holder.
126
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
129 */
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
135 tp->write_seq = 1;
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 sock_hold(sktw);
139 return 1;
140 }
141
142 return 0;
143}
6d6ee43e
ACM
144EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145
1da177e4
LT
146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{
2d7192d6 149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
150 struct inet_sock *inet = inet_sk(sk);
151 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 152 __be16 orig_sport, orig_dport;
bada8adc 153 __be32 daddr, nexthop;
da905bd1 154 struct flowi4 *fl4;
2d7192d6 155 struct rtable *rt;
1da177e4 156 int err;
f6d8bd05 157 struct ip_options_rcu *inet_opt;
1da177e4
LT
158
159 if (addr_len < sizeof(struct sockaddr_in))
160 return -EINVAL;
161
162 if (usin->sin_family != AF_INET)
163 return -EAFNOSUPPORT;
164
165 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
166 inet_opt = rcu_dereference_protected(inet->inet_opt,
167 sock_owned_by_user(sk));
168 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
169 if (!daddr)
170 return -EINVAL;
f6d8bd05 171 nexthop = inet_opt->opt.faddr;
1da177e4
LT
172 }
173
dca8b089
DM
174 orig_sport = inet->inet_sport;
175 orig_dport = usin->sin_port;
da905bd1
DM
176 fl4 = &inet->cork.fl.u.ip4;
177 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
178 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
179 IPPROTO_TCP,
180 orig_sport, orig_dport, sk, true);
181 if (IS_ERR(rt)) {
182 err = PTR_ERR(rt);
183 if (err == -ENETUNREACH)
7c73a6fa 184 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 185 return err;
584bdf8c 186 }
1da177e4
LT
187
188 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 ip_rt_put(rt);
190 return -ENETUNREACH;
191 }
192
f6d8bd05 193 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 194 daddr = fl4->daddr;
1da177e4 195
c720c7e8 196 if (!inet->inet_saddr)
da905bd1 197 inet->inet_saddr = fl4->saddr;
c720c7e8 198 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 199
c720c7e8 200 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
201 /* Reset inherited state */
202 tp->rx_opt.ts_recent = 0;
203 tp->rx_opt.ts_recent_stamp = 0;
204 tp->write_seq = 0;
205 }
206
295ff7ed 207 if (tcp_death_row.sysctl_tw_recycle &&
da905bd1 208 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
ed2361e6 209 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
7174259e
ACM
210 /*
211 * VJ's idea. We save last timestamp seen from
212 * the destination in peer table, when entering state
213 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
214 * when trying new connection.
1da177e4 215 */
317fe0e6
ED
216 if (peer) {
217 inet_peer_refcheck(peer);
218 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
219 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
220 tp->rx_opt.ts_recent = peer->tcp_ts;
221 }
1da177e4
LT
222 }
223 }
224
c720c7e8
ED
225 inet->inet_dport = usin->sin_port;
226 inet->inet_daddr = daddr;
1da177e4 227
d83d8461 228 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
229 if (inet_opt)
230 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 231
bee7ca9e 232 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
233
234 /* Socket identity is still unknown (sport may be zero).
235 * However we set state to SYN-SENT and not releasing socket
236 * lock select source port, enter ourselves into the hash tables and
237 * complete initialization after this.
238 */
239 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 240 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
241 if (err)
242 goto failure;
243
da905bd1 244 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
245 inet->inet_sport, inet->inet_dport, sk);
246 if (IS_ERR(rt)) {
247 err = PTR_ERR(rt);
248 rt = NULL;
1da177e4 249 goto failure;
b23dd4fe 250 }
1da177e4 251 /* OK, now commit destination to socket. */
bcd76111 252 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 253 sk_setup_caps(sk, &rt->dst);
1da177e4
LT
254
255 if (!tp->write_seq)
c720c7e8
ED
256 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
257 inet->inet_daddr,
258 inet->inet_sport,
1da177e4
LT
259 usin->sin_port);
260
c720c7e8 261 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4
LT
262
263 err = tcp_connect(sk);
264 rt = NULL;
265 if (err)
266 goto failure;
267
268 return 0;
269
270failure:
7174259e
ACM
271 /*
272 * This unhashes the socket and releases the local port,
273 * if necessary.
274 */
1da177e4
LT
275 tcp_set_state(sk, TCP_CLOSE);
276 ip_rt_put(rt);
277 sk->sk_route_caps = 0;
c720c7e8 278 inet->inet_dport = 0;
1da177e4
LT
279 return err;
280}
4bc2f18b 281EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 282
1da177e4
LT
283/*
284 * This routine does path mtu discovery as defined in RFC1191.
285 */
b71d1d42 286static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
1da177e4
LT
287{
288 struct dst_entry *dst;
289 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
290
291 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
292 * send out by Linux are always <576bytes so they should go through
293 * unfragmented).
294 */
295 if (sk->sk_state == TCP_LISTEN)
296 return;
297
298 /* We don't check in the destentry if pmtu discovery is forbidden
299 * on this route. We just assume that no packet_to_big packets
300 * are send back when pmtu discovery is not active.
e905a9ed 301 * There is a small race when the user changes this flag in the
1da177e4
LT
302 * route, but I think that's acceptable.
303 */
304 if ((dst = __sk_dst_check(sk, 0)) == NULL)
305 return;
306
307 dst->ops->update_pmtu(dst, mtu);
308
309 /* Something is about to be wrong... Remember soft error
310 * for the case, if this connection will not able to recover.
311 */
312 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
313 sk->sk_err_soft = EMSGSIZE;
314
315 mtu = dst_mtu(dst);
316
317 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 318 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
319 tcp_sync_mss(sk, mtu);
320
321 /* Resend the TCP packet because it's
322 * clear that the old packet has been
323 * dropped. This is the new "fast" path mtu
324 * discovery.
325 */
326 tcp_simple_retransmit(sk);
327 } /* else let the usual retransmit timer handle it */
328}
329
330/*
331 * This routine is called by the ICMP module when it gets some
332 * sort of error condition. If err < 0 then the socket should
333 * be closed and the error returned to the user. If err > 0
334 * it's just the icmp type << 8 | icmp code. After adjustment
335 * header points to the first 8 bytes of the tcp header. We need
336 * to find the appropriate port.
337 *
338 * The locking strategy used here is very "optimistic". When
339 * someone else accesses the socket the ICMP is just dropped
340 * and for some paths there is no check at all.
341 * A more general error queue to queue errors for later handling
342 * is probably better.
343 *
344 */
345
4d1a2d9e 346void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 347{
b71d1d42 348 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 349 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 350 struct inet_connection_sock *icsk;
1da177e4
LT
351 struct tcp_sock *tp;
352 struct inet_sock *inet;
4d1a2d9e
DL
353 const int type = icmp_hdr(icmp_skb)->type;
354 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 355 struct sock *sk;
f1ecd5d9 356 struct sk_buff *skb;
1da177e4 357 __u32 seq;
f1ecd5d9 358 __u32 remaining;
1da177e4 359 int err;
4d1a2d9e 360 struct net *net = dev_net(icmp_skb->dev);
1da177e4 361
4d1a2d9e 362 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 363 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
364 return;
365 }
366
fd54d716 367 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 368 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 369 if (!sk) {
dcfc23ca 370 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
371 return;
372 }
373 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 374 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
375 return;
376 }
377
378 bh_lock_sock(sk);
379 /* If too many ICMPs get dropped on busy
380 * servers this needs to be solved differently.
381 */
382 if (sock_owned_by_user(sk))
de0744af 383 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
384
385 if (sk->sk_state == TCP_CLOSE)
386 goto out;
387
97e3ecd1 388 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
389 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
390 goto out;
391 }
392
f1ecd5d9 393 icsk = inet_csk(sk);
1da177e4
LT
394 tp = tcp_sk(sk);
395 seq = ntohl(th->seq);
396 if (sk->sk_state != TCP_LISTEN &&
397 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 398 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
399 goto out;
400 }
401
402 switch (type) {
403 case ICMP_SOURCE_QUENCH:
404 /* Just silently ignore these. */
405 goto out;
406 case ICMP_PARAMETERPROB:
407 err = EPROTO;
408 break;
409 case ICMP_DEST_UNREACH:
410 if (code > NR_ICMP_UNREACH)
411 goto out;
412
413 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
414 if (!sock_owned_by_user(sk))
415 do_pmtu_discovery(sk, iph, info);
416 goto out;
417 }
418
419 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
423 break;
424 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
425 !icsk->icsk_backoff)
426 break;
427
8f49c270
DM
428 if (sock_owned_by_user(sk))
429 break;
430
f1ecd5d9 431 icsk->icsk_backoff--;
9ad7c049
JC
432 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
433 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
f1ecd5d9
DL
434 tcp_bound_rto(sk);
435
436 skb = tcp_write_queue_head(sk);
437 BUG_ON(!skb);
438
439 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
440 tcp_time_stamp - TCP_SKB_CB(skb)->when);
441
442 if (remaining) {
443 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
444 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
445 } else {
446 /* RTO revert clocked out retransmission.
447 * Will retransmit now */
448 tcp_retransmit_timer(sk);
449 }
450
1da177e4
LT
451 break;
452 case ICMP_TIME_EXCEEDED:
453 err = EHOSTUNREACH;
454 break;
455 default:
456 goto out;
457 }
458
459 switch (sk->sk_state) {
60236fdd 460 struct request_sock *req, **prev;
1da177e4
LT
461 case TCP_LISTEN:
462 if (sock_owned_by_user(sk))
463 goto out;
464
463c84b9
ACM
465 req = inet_csk_search_req(sk, &prev, th->dest,
466 iph->daddr, iph->saddr);
1da177e4
LT
467 if (!req)
468 goto out;
469
470 /* ICMPs are not backlogged, hence we cannot get
471 an established socket here.
472 */
547b792c 473 WARN_ON(req->sk);
1da177e4 474
2e6599cb 475 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 476 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
477 goto out;
478 }
479
480 /*
481 * Still in SYN_RECV, just remove it silently.
482 * There is no good way to pass the error to the newly
483 * created socket, and POSIX does not want network
484 * errors returned from accept().
485 */
463c84b9 486 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
487 goto out;
488
489 case TCP_SYN_SENT:
490 case TCP_SYN_RECV: /* Cannot happen.
491 It can f.e. if SYNs crossed.
492 */
493 if (!sock_owned_by_user(sk)) {
1da177e4
LT
494 sk->sk_err = err;
495
496 sk->sk_error_report(sk);
497
498 tcp_done(sk);
499 } else {
500 sk->sk_err_soft = err;
501 }
502 goto out;
503 }
504
505 /* If we've already connected we will keep trying
506 * until we time out, or the user gives up.
507 *
508 * rfc1122 4.2.3.9 allows to consider as hard errors
509 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
510 * but it is obsoleted by pmtu discovery).
511 *
512 * Note, that in modern internet, where routing is unreliable
513 * and in each dark corner broken firewalls sit, sending random
514 * errors ordered by their masters even this two messages finally lose
515 * their original sense (even Linux sends invalid PORT_UNREACHs)
516 *
517 * Now we are in compliance with RFCs.
518 * --ANK (980905)
519 */
520
521 inet = inet_sk(sk);
522 if (!sock_owned_by_user(sk) && inet->recverr) {
523 sk->sk_err = err;
524 sk->sk_error_report(sk);
525 } else { /* Only an error on timeout */
526 sk->sk_err_soft = err;
527 }
528
529out:
530 bh_unlock_sock(sk);
531 sock_put(sk);
532}
533
419f9f89
HX
534static void __tcp_v4_send_check(struct sk_buff *skb,
535 __be32 saddr, __be32 daddr)
1da177e4 536{
aa8223c7 537 struct tcphdr *th = tcp_hdr(skb);
1da177e4 538
84fa7933 539 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 540 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 541 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 542 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 543 } else {
419f9f89 544 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 545 csum_partial(th,
1da177e4
LT
546 th->doff << 2,
547 skb->csum));
548 }
549}
550
419f9f89 551/* This routine computes an IPv4 TCP checksum. */
bb296246 552void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89
HX
553{
554 struct inet_sock *inet = inet_sk(sk);
555
556 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
557}
4bc2f18b 558EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 559
a430a43d
HX
560int tcp_v4_gso_send_check(struct sk_buff *skb)
561{
eddc9ec5 562 const struct iphdr *iph;
a430a43d
HX
563 struct tcphdr *th;
564
565 if (!pskb_may_pull(skb, sizeof(*th)))
566 return -EINVAL;
567
eddc9ec5 568 iph = ip_hdr(skb);
aa8223c7 569 th = tcp_hdr(skb);
a430a43d
HX
570
571 th->check = 0;
84fa7933 572 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 573 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
574 return 0;
575}
576
1da177e4
LT
577/*
578 * This routine will send an RST to the other tcp.
579 *
580 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
581 * for reset.
582 * Answer: if a packet caused RST, it is not for a socket
583 * existing in our system, if it is matched to a socket,
584 * it is just duplicate segment or bug in other side's TCP.
585 * So that we build reply only basing on parameters
586 * arrived with segment.
587 * Exception: precedence violation. We do not implement it in any case.
588 */
589
cfb6eeb4 590static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 591{
aa8223c7 592 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
593 struct {
594 struct tcphdr th;
595#ifdef CONFIG_TCP_MD5SIG
714e85be 596 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
597#endif
598 } rep;
1da177e4 599 struct ip_reply_arg arg;
cfb6eeb4
YH
600#ifdef CONFIG_TCP_MD5SIG
601 struct tcp_md5sig_key *key;
602#endif
a86b1e30 603 struct net *net;
1da177e4
LT
604
605 /* Never send a reset in response to a reset. */
606 if (th->rst)
607 return;
608
511c3f92 609 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
610 return;
611
612 /* Swap the send and the receive. */
cfb6eeb4
YH
613 memset(&rep, 0, sizeof(rep));
614 rep.th.dest = th->source;
615 rep.th.source = th->dest;
616 rep.th.doff = sizeof(struct tcphdr) / 4;
617 rep.th.rst = 1;
1da177e4
LT
618
619 if (th->ack) {
cfb6eeb4 620 rep.th.seq = th->ack_seq;
1da177e4 621 } else {
cfb6eeb4
YH
622 rep.th.ack = 1;
623 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
624 skb->len - (th->doff << 2));
1da177e4
LT
625 }
626
7174259e 627 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
628 arg.iov[0].iov_base = (unsigned char *)&rep;
629 arg.iov[0].iov_len = sizeof(rep.th);
630
631#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 632 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
633 if (key) {
634 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
635 (TCPOPT_NOP << 16) |
636 (TCPOPT_MD5SIG << 8) |
637 TCPOLEN_MD5SIG);
638 /* Update length and the length the header thinks exists */
639 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
640 rep.th.doff = arg.iov[0].iov_len / 4;
641
49a72dfb 642 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
643 key, ip_hdr(skb)->saddr,
644 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
645 }
646#endif
eddc9ec5
ACM
647 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
648 ip_hdr(skb)->saddr, /* XXX */
52cd5750 649 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 650 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 652
adf30907 653 net = dev_net(skb_dst(skb)->dev);
0a5ebb80 654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
7feb49c8 655 &arg, arg.iov[0].iov_len);
1da177e4 656
63231bdd
PE
657 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
658 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
659}
660
661/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
662 outside socket context is ugly, certainly. What can I do?
663 */
664
9501f972
YH
665static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
666 u32 win, u32 ts, int oif,
88ef4a5a
KK
667 struct tcp_md5sig_key *key,
668 int reply_flags)
1da177e4 669{
aa8223c7 670 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
671 struct {
672 struct tcphdr th;
714e85be 673 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 674#ifdef CONFIG_TCP_MD5SIG
714e85be 675 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
676#endif
677 ];
1da177e4
LT
678 } rep;
679 struct ip_reply_arg arg;
adf30907 680 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
681
682 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 683 memset(&arg, 0, sizeof(arg));
1da177e4
LT
684
685 arg.iov[0].iov_base = (unsigned char *)&rep;
686 arg.iov[0].iov_len = sizeof(rep.th);
687 if (ts) {
cfb6eeb4
YH
688 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
689 (TCPOPT_TIMESTAMP << 8) |
690 TCPOLEN_TIMESTAMP);
691 rep.opt[1] = htonl(tcp_time_stamp);
692 rep.opt[2] = htonl(ts);
cb48cfe8 693 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
694 }
695
696 /* Swap the send and the receive. */
697 rep.th.dest = th->source;
698 rep.th.source = th->dest;
699 rep.th.doff = arg.iov[0].iov_len / 4;
700 rep.th.seq = htonl(seq);
701 rep.th.ack_seq = htonl(ack);
702 rep.th.ack = 1;
703 rep.th.window = htons(win);
704
cfb6eeb4 705#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
706 if (key) {
707 int offset = (ts) ? 3 : 0;
708
709 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
710 (TCPOPT_NOP << 16) |
711 (TCPOPT_MD5SIG << 8) |
712 TCPOLEN_MD5SIG);
713 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
714 rep.th.doff = arg.iov[0].iov_len/4;
715
49a72dfb 716 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
717 key, ip_hdr(skb)->saddr,
718 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
719 }
720#endif
88ef4a5a 721 arg.flags = reply_flags;
eddc9ec5
ACM
722 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
723 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
724 arg.iov[0].iov_len, IPPROTO_TCP, 0);
725 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
726 if (oif)
727 arg.bound_dev_if = oif;
1da177e4 728
0a5ebb80 729 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
7feb49c8 730 &arg, arg.iov[0].iov_len);
1da177e4 731
63231bdd 732 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
733}
734
735static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
736{
8feaf0c0 737 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 738 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 739
9501f972 740 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 741 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
742 tcptw->tw_ts_recent,
743 tw->tw_bound_dev_if,
88ef4a5a
KK
744 tcp_twsk_md5_key(tcptw),
745 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
9501f972 746 );
1da177e4 747
8feaf0c0 748 inet_twsk_put(tw);
1da177e4
LT
749}
750
6edafaaf 751static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 752 struct request_sock *req)
1da177e4 753{
9501f972 754 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 755 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
756 req->ts_recent,
757 0,
88ef4a5a
KK
758 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
759 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
1da177e4
LT
760}
761
1da177e4 762/*
9bf1d83e 763 * Send a SYN-ACK after having received a SYN.
60236fdd 764 * This still operates on a request_sock only, not on a big
1da177e4
LT
765 * socket.
766 */
72659ecc
OP
767static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
768 struct request_sock *req,
769 struct request_values *rvp)
1da177e4 770{
2e6599cb 771 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 772 struct flowi4 fl4;
1da177e4
LT
773 int err = -1;
774 struct sk_buff * skb;
775
776 /* First, grab a route. */
6bd023f3 777 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 778 return -1;
1da177e4 779
e6b4d113 780 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
781
782 if (skb) {
419f9f89 783 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 784
2e6599cb
ACM
785 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
786 ireq->rmt_addr,
787 ireq->opt);
b9df3cb8 788 err = net_xmit_eval(err);
1da177e4
LT
789 }
790
1da177e4
LT
791 dst_release(dst);
792 return err;
793}
794
72659ecc 795static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 796 struct request_values *rvp)
fd80eb94 797{
72659ecc
OP
798 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
799 return tcp_v4_send_synack(sk, NULL, req, rvp);
fd80eb94
DL
800}
801
1da177e4 802/*
60236fdd 803 * IPv4 request_sock destructor.
1da177e4 804 */
60236fdd 805static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 806{
a51482bd 807 kfree(inet_rsk(req)->opt);
1da177e4
LT
808}
809
2a1d4bd4 810static void syn_flood_warning(const struct sk_buff *skb)
1da177e4 811{
2a1d4bd4 812 const char *msg;
1da177e4 813
2a1d4bd4
FW
814#ifdef CONFIG_SYN_COOKIES
815 if (sysctl_tcp_syncookies)
816 msg = "Sending cookies";
817 else
80e40daa 818#endif
2a1d4bd4
FW
819 msg = "Dropping request";
820
821 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
822 ntohs(tcp_hdr(skb)->dest), msg);
823}
1da177e4
LT
824
825/*
60236fdd 826 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 827 */
f6d8bd05
ED
828static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
829 struct sk_buff *skb)
1da177e4 830{
f6d8bd05
ED
831 const struct ip_options *opt = &(IPCB(skb)->opt);
832 struct ip_options_rcu *dopt = NULL;
1da177e4
LT
833
834 if (opt && opt->optlen) {
f6d8bd05
ED
835 int opt_size = sizeof(*dopt) + opt->optlen;
836
1da177e4
LT
837 dopt = kmalloc(opt_size, GFP_ATOMIC);
838 if (dopt) {
f6d8bd05 839 if (ip_options_echo(&dopt->opt, skb)) {
1da177e4
LT
840 kfree(dopt);
841 dopt = NULL;
842 }
843 }
844 }
845 return dopt;
846}
847
cfb6eeb4
YH
848#ifdef CONFIG_TCP_MD5SIG
849/*
850 * RFC2385 MD5 checksumming requires a mapping of
851 * IP address->MD5 Key.
852 * We need to maintain these in the sk structure.
853 */
854
855/* Find the Key structure for an address. */
7174259e
ACM
856static struct tcp_md5sig_key *
857 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
858{
859 struct tcp_sock *tp = tcp_sk(sk);
860 int i;
861
862 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
863 return NULL;
864 for (i = 0; i < tp->md5sig_info->entries4; i++) {
865 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 866 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
867 }
868 return NULL;
869}
870
871struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
872 struct sock *addr_sk)
873{
c720c7e8 874 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
cfb6eeb4 875}
cfb6eeb4
YH
876EXPORT_SYMBOL(tcp_v4_md5_lookup);
877
f5b99bcd
AB
878static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
879 struct request_sock *req)
cfb6eeb4
YH
880{
881 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
882}
883
884/* This can be called on a newly created socket, from other files */
885int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
886 u8 *newkey, u8 newkeylen)
887{
888 /* Add Key to the list */
b0a713e9 889 struct tcp_md5sig_key *key;
cfb6eeb4
YH
890 struct tcp_sock *tp = tcp_sk(sk);
891 struct tcp4_md5sig_key *keys;
892
b0a713e9 893 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
894 if (key) {
895 /* Pre-existing entry - just update that one. */
b0a713e9
MD
896 kfree(key->key);
897 key->key = newkey;
898 key->keylen = newkeylen;
cfb6eeb4 899 } else {
f6685938
ACM
900 struct tcp_md5sig_info *md5sig;
901
cfb6eeb4 902 if (!tp->md5sig_info) {
f6685938
ACM
903 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
904 GFP_ATOMIC);
cfb6eeb4
YH
905 if (!tp->md5sig_info) {
906 kfree(newkey);
907 return -ENOMEM;
908 }
a465419b 909 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 910 }
aa133076 911 if (tcp_alloc_md5sig_pool(sk) == NULL) {
cfb6eeb4
YH
912 kfree(newkey);
913 return -ENOMEM;
914 }
f6685938
ACM
915 md5sig = tp->md5sig_info;
916
917 if (md5sig->alloced4 == md5sig->entries4) {
918 keys = kmalloc((sizeof(*keys) *
e905a9ed 919 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
920 if (!keys) {
921 kfree(newkey);
922 tcp_free_md5sig_pool();
923 return -ENOMEM;
924 }
925
f6685938
ACM
926 if (md5sig->entries4)
927 memcpy(keys, md5sig->keys4,
928 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
929
930 /* Free old key list, and reference new one */
a80cc20d 931 kfree(md5sig->keys4);
f6685938
ACM
932 md5sig->keys4 = keys;
933 md5sig->alloced4++;
cfb6eeb4 934 }
f6685938 935 md5sig->entries4++;
f8ab18d2
DM
936 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
937 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
938 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
939 }
940 return 0;
941}
cfb6eeb4
YH
942EXPORT_SYMBOL(tcp_v4_md5_do_add);
943
944static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
945 u8 *newkey, u8 newkeylen)
946{
c720c7e8 947 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
cfb6eeb4
YH
948 newkey, newkeylen);
949}
950
951int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
952{
953 struct tcp_sock *tp = tcp_sk(sk);
954 int i;
955
956 for (i = 0; i < tp->md5sig_info->entries4; i++) {
957 if (tp->md5sig_info->keys4[i].addr == addr) {
958 /* Free the key */
f8ab18d2 959 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
960 tp->md5sig_info->entries4--;
961
962 if (tp->md5sig_info->entries4 == 0) {
963 kfree(tp->md5sig_info->keys4);
964 tp->md5sig_info->keys4 = NULL;
8228a18d 965 tp->md5sig_info->alloced4 = 0;
7174259e 966 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 967 /* Need to do some manipulation */
354faf09
YH
968 memmove(&tp->md5sig_info->keys4[i],
969 &tp->md5sig_info->keys4[i+1],
970 (tp->md5sig_info->entries4 - i) *
971 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
972 }
973 tcp_free_md5sig_pool();
974 return 0;
975 }
976 }
977 return -ENOENT;
978}
cfb6eeb4
YH
979EXPORT_SYMBOL(tcp_v4_md5_do_del);
980
7174259e 981static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
982{
983 struct tcp_sock *tp = tcp_sk(sk);
984
985 /* Free each key, then the set of key keys,
986 * the crypto element, and then decrement our
987 * hold on the last resort crypto.
988 */
989 if (tp->md5sig_info->entries4) {
990 int i;
991 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 992 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
993 tp->md5sig_info->entries4 = 0;
994 tcp_free_md5sig_pool();
995 }
996 if (tp->md5sig_info->keys4) {
997 kfree(tp->md5sig_info->keys4);
998 tp->md5sig_info->keys4 = NULL;
999 tp->md5sig_info->alloced4 = 0;
1000 }
1001}
1002
7174259e
ACM
1003static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1004 int optlen)
cfb6eeb4
YH
1005{
1006 struct tcp_md5sig cmd;
1007 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1008 u8 *newkey;
1009
1010 if (optlen < sizeof(cmd))
1011 return -EINVAL;
1012
7174259e 1013 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1014 return -EFAULT;
1015
1016 if (sin->sin_family != AF_INET)
1017 return -EINVAL;
1018
1019 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1020 if (!tcp_sk(sk)->md5sig_info)
1021 return -ENOENT;
1022 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1023 }
1024
1025 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1026 return -EINVAL;
1027
1028 if (!tcp_sk(sk)->md5sig_info) {
1029 struct tcp_sock *tp = tcp_sk(sk);
aa133076 1030 struct tcp_md5sig_info *p;
cfb6eeb4 1031
aa133076 1032 p = kzalloc(sizeof(*p), sk->sk_allocation);
cfb6eeb4
YH
1033 if (!p)
1034 return -EINVAL;
1035
1036 tp->md5sig_info = p;
a465419b 1037 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1038 }
1039
aa133076 1040 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
cfb6eeb4
YH
1041 if (!newkey)
1042 return -ENOMEM;
cfb6eeb4
YH
1043 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1044 newkey, cmd.tcpm_keylen);
1045}
1046
49a72dfb
AL
1047static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1048 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1049{
cfb6eeb4 1050 struct tcp4_pseudohdr *bp;
49a72dfb 1051 struct scatterlist sg;
cfb6eeb4
YH
1052
1053 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1054
1055 /*
49a72dfb 1056 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1057 * destination IP address, zero-padded protocol number, and
1058 * segment length)
1059 */
1060 bp->saddr = saddr;
1061 bp->daddr = daddr;
1062 bp->pad = 0;
076fb722 1063 bp->protocol = IPPROTO_TCP;
49a72dfb 1064 bp->len = cpu_to_be16(nbytes);
c7da57a1 1065
49a72dfb
AL
1066 sg_init_one(&sg, bp, sizeof(*bp));
1067 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1068}
1069
1070static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1071 __be32 daddr, __be32 saddr, struct tcphdr *th)
1072{
1073 struct tcp_md5sig_pool *hp;
1074 struct hash_desc *desc;
1075
1076 hp = tcp_get_md5sig_pool();
1077 if (!hp)
1078 goto clear_hash_noput;
1079 desc = &hp->md5_desc;
1080
1081 if (crypto_hash_init(desc))
1082 goto clear_hash;
1083 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1084 goto clear_hash;
1085 if (tcp_md5_hash_header(hp, th))
1086 goto clear_hash;
1087 if (tcp_md5_hash_key(hp, key))
1088 goto clear_hash;
1089 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1090 goto clear_hash;
1091
cfb6eeb4 1092 tcp_put_md5sig_pool();
cfb6eeb4 1093 return 0;
49a72dfb 1094
cfb6eeb4
YH
1095clear_hash:
1096 tcp_put_md5sig_pool();
1097clear_hash_noput:
1098 memset(md5_hash, 0, 16);
49a72dfb 1099 return 1;
cfb6eeb4
YH
1100}
1101
49a72dfb
AL
1102int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1103 struct sock *sk, struct request_sock *req,
1104 struct sk_buff *skb)
cfb6eeb4 1105{
49a72dfb
AL
1106 struct tcp_md5sig_pool *hp;
1107 struct hash_desc *desc;
1108 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1109 __be32 saddr, daddr;
1110
1111 if (sk) {
c720c7e8
ED
1112 saddr = inet_sk(sk)->inet_saddr;
1113 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1114 } else if (req) {
1115 saddr = inet_rsk(req)->loc_addr;
1116 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1117 } else {
49a72dfb
AL
1118 const struct iphdr *iph = ip_hdr(skb);
1119 saddr = iph->saddr;
1120 daddr = iph->daddr;
cfb6eeb4 1121 }
49a72dfb
AL
1122
1123 hp = tcp_get_md5sig_pool();
1124 if (!hp)
1125 goto clear_hash_noput;
1126 desc = &hp->md5_desc;
1127
1128 if (crypto_hash_init(desc))
1129 goto clear_hash;
1130
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1132 goto clear_hash;
1133 if (tcp_md5_hash_header(hp, th))
1134 goto clear_hash;
1135 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1136 goto clear_hash;
1137 if (tcp_md5_hash_key(hp, key))
1138 goto clear_hash;
1139 if (crypto_hash_final(desc, md5_hash))
1140 goto clear_hash;
1141
1142 tcp_put_md5sig_pool();
1143 return 0;
1144
1145clear_hash:
1146 tcp_put_md5sig_pool();
1147clear_hash_noput:
1148 memset(md5_hash, 0, 16);
1149 return 1;
cfb6eeb4 1150}
49a72dfb 1151EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1152
7174259e 1153static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1154{
1155 /*
1156 * This gets called for each TCP segment that arrives
1157 * so we want to be efficient.
1158 * We have 3 drop cases:
1159 * o No MD5 hash and one expected.
1160 * o MD5 hash and we're not expecting one.
1161 * o MD5 hash and its wrong.
1162 */
1163 __u8 *hash_location = NULL;
1164 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1165 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1166 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1167 int genhash;
cfb6eeb4
YH
1168 unsigned char newhash[16];
1169
1170 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1171 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1172
cfb6eeb4
YH
1173 /* We've parsed the options - do we have a hash? */
1174 if (!hash_expected && !hash_location)
1175 return 0;
1176
1177 if (hash_expected && !hash_location) {
785957d3 1178 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1179 return 1;
1180 }
1181
1182 if (!hash_expected && hash_location) {
785957d3 1183 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1184 return 1;
1185 }
1186
1187 /* Okay, so this is hash_expected and hash_location -
1188 * so we need to calculate the checksum.
1189 */
49a72dfb
AL
1190 genhash = tcp_v4_md5_hash_skb(newhash,
1191 hash_expected,
1192 NULL, NULL, skb);
cfb6eeb4
YH
1193
1194 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1195 if (net_ratelimit()) {
673d57e7
HH
1196 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1197 &iph->saddr, ntohs(th->source),
1198 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1199 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1200 }
1201 return 1;
1202 }
1203 return 0;
1204}
1205
1206#endif
1207
72a3effa 1208struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1209 .family = PF_INET,
2e6599cb 1210 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1211 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1212 .send_ack = tcp_v4_reqsk_send_ack,
1213 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1214 .send_reset = tcp_v4_send_reset,
72659ecc 1215 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1216};
1217
cfb6eeb4 1218#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1219static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1220 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1221 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1222};
b6332e6c 1223#endif
cfb6eeb4 1224
1da177e4
LT
1225int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1226{
4957faad 1227 struct tcp_extend_values tmp_ext;
1da177e4 1228 struct tcp_options_received tmp_opt;
4957faad 1229 u8 *hash_location;
60236fdd 1230 struct request_sock *req;
e6b4d113 1231 struct inet_request_sock *ireq;
4957faad 1232 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1233 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1234 __be32 saddr = ip_hdr(skb)->saddr;
1235 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1236 __u32 isn = TCP_SKB_CB(skb)->when;
1da177e4
LT
1237#ifdef CONFIG_SYN_COOKIES
1238 int want_cookie = 0;
1239#else
1240#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1241#endif
1242
1243 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1244 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1245 goto drop;
1246
1247 /* TW buckets are converted to open requests without
1248 * limitations, they conserve resources and peer is
1249 * evidently real one.
1250 */
463c84b9 1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
2a1d4bd4
FW
1252 if (net_ratelimit())
1253 syn_flood_warning(skb);
1da177e4
LT
1254#ifdef CONFIG_SYN_COOKIES
1255 if (sysctl_tcp_syncookies) {
1256 want_cookie = 1;
1257 } else
1258#endif
1259 goto drop;
1260 }
1261
1262 /* Accept backlog is full. If we have already queued enough
1263 * of warm entries in syn queue, drop request. It is better than
1264 * clogging syn queue with openreqs with exponentially increasing
1265 * timeout.
1266 */
463c84b9 1267 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1268 goto drop;
1269
ce4a7d0d 1270 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1271 if (!req)
1272 goto drop;
1273
cfb6eeb4
YH
1274#ifdef CONFIG_TCP_MD5SIG
1275 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1276#endif
1277
1da177e4 1278 tcp_clear_options(&tmp_opt);
bee7ca9e 1279 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1280 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1281 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1282
1283 if (tmp_opt.cookie_plus > 0 &&
1284 tmp_opt.saw_tstamp &&
1285 !tp->rx_opt.cookie_out_never &&
1286 (sysctl_tcp_cookie_size > 0 ||
1287 (tp->cookie_values != NULL &&
1288 tp->cookie_values->cookie_desired > 0))) {
1289 u8 *c;
1290 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1291 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1292
1293 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1294 goto drop_and_release;
1295
1296 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1297 *mess++ ^= (__force u32)daddr;
1298 *mess++ ^= (__force u32)saddr;
1da177e4 1299
4957faad
WAS
1300 /* plus variable length Initiator Cookie */
1301 c = (u8 *)mess;
1302 while (l-- > 0)
1303 *c++ ^= *hash_location++;
1304
1305#ifdef CONFIG_SYN_COOKIES
1306 want_cookie = 0; /* not our kind of cookie */
1307#endif
1308 tmp_ext.cookie_out_never = 0; /* false */
1309 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1310 } else if (!tp->rx_opt.cookie_in_always) {
1311 /* redundant indications, but ensure initialization. */
1312 tmp_ext.cookie_out_never = 1; /* true */
1313 tmp_ext.cookie_plus = 0;
1314 } else {
1315 goto drop_and_release;
1316 }
1317 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1318
4dfc2817 1319 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1320 tcp_clear_options(&tmp_opt);
1da177e4 1321
1da177e4 1322 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1323 tcp_openreq_init(req, &tmp_opt, skb);
1324
bb5b7c11
DM
1325 ireq = inet_rsk(req);
1326 ireq->loc_addr = daddr;
1327 ireq->rmt_addr = saddr;
1328 ireq->no_srccheck = inet_sk(sk)->transparent;
1329 ireq->opt = tcp_v4_save_options(sk, skb);
1330
284904aa 1331 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1332 goto drop_and_free;
284904aa 1333
172d69e6 1334 if (!want_cookie || tmp_opt.tstamp_ok)
aa8223c7 1335 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1336
1337 if (want_cookie) {
1da177e4 1338 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1339 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1340 } else if (!isn) {
1341 struct inet_peer *peer = NULL;
6bd023f3 1342 struct flowi4 fl4;
1da177e4
LT
1343
1344 /* VJ's idea. We save last timestamp seen
1345 * from the destination in peer table, when entering
1346 * state TIME-WAIT, and check against it before
1347 * accepting new connection request.
1348 *
1349 * If "isn" is not zero, this request hit alive
1350 * timewait bucket, so that all the necessary checks
1351 * are made in the function processing timewait state.
1352 */
1353 if (tmp_opt.saw_tstamp &&
295ff7ed 1354 tcp_death_row.sysctl_tw_recycle &&
6bd023f3 1355 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
ed2361e6
DM
1356 fl4.daddr == saddr &&
1357 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
317fe0e6 1358 inet_peer_refcheck(peer);
2c1409a0 1359 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1da177e4
LT
1360 (s32)(peer->tcp_ts - req->ts_recent) >
1361 TCP_PAWS_WINDOW) {
de0744af 1362 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1363 goto drop_and_release;
1da177e4
LT
1364 }
1365 }
1366 /* Kill the following clause, if you dislike this way. */
1367 else if (!sysctl_tcp_syncookies &&
463c84b9 1368 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1369 (sysctl_max_syn_backlog >> 2)) &&
1370 (!peer || !peer->tcp_ts_stamp) &&
1371 (!dst || !dst_metric(dst, RTAX_RTT))) {
1372 /* Without syncookies last quarter of
1373 * backlog is filled with destinations,
1374 * proven to be alive.
1375 * It means that we continue to communicate
1376 * to destinations, already remembered
1377 * to the moment of synflood.
1378 */
673d57e7
HH
1379 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1380 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1381 goto drop_and_release;
1da177e4
LT
1382 }
1383
a94f723d 1384 isn = tcp_v4_init_sequence(skb);
1da177e4 1385 }
2e6599cb 1386 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1387 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1388
72659ecc
OP
1389 if (tcp_v4_send_synack(sk, dst, req,
1390 (struct request_values *)&tmp_ext) ||
4957faad 1391 want_cookie)
1da177e4
LT
1392 goto drop_and_free;
1393
7cd04fa7 1394 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1395 return 0;
1396
7cd04fa7
DL
1397drop_and_release:
1398 dst_release(dst);
1da177e4 1399drop_and_free:
60236fdd 1400 reqsk_free(req);
1da177e4 1401drop:
1da177e4
LT
1402 return 0;
1403}
4bc2f18b 1404EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1405
1406
1407/*
1408 * The three way handshake has completed - we got a valid synack -
1409 * now create the new socket.
1410 */
1411struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1412 struct request_sock *req,
1da177e4
LT
1413 struct dst_entry *dst)
1414{
2e6599cb 1415 struct inet_request_sock *ireq;
1da177e4
LT
1416 struct inet_sock *newinet;
1417 struct tcp_sock *newtp;
1418 struct sock *newsk;
cfb6eeb4
YH
1419#ifdef CONFIG_TCP_MD5SIG
1420 struct tcp_md5sig_key *key;
1421#endif
f6d8bd05 1422 struct ip_options_rcu *inet_opt;
1da177e4
LT
1423
1424 if (sk_acceptq_is_full(sk))
1425 goto exit_overflow;
1426
1da177e4
LT
1427 newsk = tcp_create_openreq_child(sk, req, skb);
1428 if (!newsk)
093d2823 1429 goto exit_nonewsk;
1da177e4 1430
bcd76111 1431 newsk->sk_gso_type = SKB_GSO_TCPV4;
1da177e4
LT
1432
1433 newtp = tcp_sk(newsk);
1434 newinet = inet_sk(newsk);
2e6599cb 1435 ireq = inet_rsk(req);
c720c7e8
ED
1436 newinet->inet_daddr = ireq->rmt_addr;
1437 newinet->inet_rcv_saddr = ireq->loc_addr;
1438 newinet->inet_saddr = ireq->loc_addr;
f6d8bd05
ED
1439 inet_opt = ireq->opt;
1440 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1441 ireq->opt = NULL;
463c84b9 1442 newinet->mc_index = inet_iif(skb);
eddc9ec5 1443 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1444 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1445 if (inet_opt)
1446 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1447 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1448
0e734419
DM
1449 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1450 goto put_and_exit;
1451
1452 sk_setup_caps(newsk, dst);
1453
5d424d5a 1454 tcp_mtup_init(newsk);
1da177e4 1455 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1456 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1457 if (tcp_sk(sk)->rx_opt.user_mss &&
1458 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1459 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1460
1da177e4 1461 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1462 if (tcp_rsk(req)->snt_synack)
1463 tcp_valid_rtt_meas(newsk,
1464 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1465 newtp->total_retrans = req->retrans;
1da177e4 1466
cfb6eeb4
YH
1467#ifdef CONFIG_TCP_MD5SIG
1468 /* Copy over the MD5 key from the original socket */
c720c7e8
ED
1469 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1470 if (key != NULL) {
cfb6eeb4
YH
1471 /*
1472 * We're using one, so create a matching key
1473 * on the newsk structure. If we fail to get
1474 * memory, then we end up not copying the key
1475 * across. Shucks.
1476 */
f6685938
ACM
1477 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1478 if (newkey != NULL)
c720c7e8 1479 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
cfb6eeb4 1480 newkey, key->keylen);
a465419b 1481 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1482 }
1483#endif
1484
0e734419
DM
1485 if (__inet_inherit_port(sk, newsk) < 0)
1486 goto put_and_exit;
9327f705 1487 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1488
1489 return newsk;
1490
1491exit_overflow:
de0744af 1492 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1493exit_nonewsk:
1494 dst_release(dst);
1da177e4 1495exit:
de0744af 1496 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1497 return NULL;
0e734419
DM
1498put_and_exit:
1499 sock_put(newsk);
1500 goto exit;
1da177e4 1501}
4bc2f18b 1502EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1503
1504static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1505{
aa8223c7 1506 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1507 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1508 struct sock *nsk;
60236fdd 1509 struct request_sock **prev;
1da177e4 1510 /* Find possible connection requests. */
463c84b9
ACM
1511 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1512 iph->saddr, iph->daddr);
1da177e4
LT
1513 if (req)
1514 return tcp_check_req(sk, skb, req, prev);
1515
3b1e0a65 1516 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1517 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1518
1519 if (nsk) {
1520 if (nsk->sk_state != TCP_TIME_WAIT) {
1521 bh_lock_sock(nsk);
1522 return nsk;
1523 }
9469c7b4 1524 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1525 return NULL;
1526 }
1527
1528#ifdef CONFIG_SYN_COOKIES
af9b4738 1529 if (!th->syn)
1da177e4
LT
1530 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1531#endif
1532 return sk;
1533}
1534
b51655b9 1535static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1536{
eddc9ec5
ACM
1537 const struct iphdr *iph = ip_hdr(skb);
1538
84fa7933 1539 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1540 if (!tcp_v4_check(skb->len, iph->saddr,
1541 iph->daddr, skb->csum)) {
fb286bb2 1542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1543 return 0;
fb286bb2 1544 }
1da177e4 1545 }
fb286bb2 1546
eddc9ec5 1547 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1548 skb->len, IPPROTO_TCP, 0);
1549
1da177e4 1550 if (skb->len <= 76) {
fb286bb2 1551 return __skb_checksum_complete(skb);
1da177e4
LT
1552 }
1553 return 0;
1554}
1555
1556
1557/* The socket must have it's spinlock held when we get
1558 * here.
1559 *
1560 * We have a potential double-lock case here, so even when
1561 * doing backlog processing we use the BH locking scheme.
1562 * This is because we cannot sleep with the original spinlock
1563 * held.
1564 */
1565int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1566{
cfb6eeb4
YH
1567 struct sock *rsk;
1568#ifdef CONFIG_TCP_MD5SIG
1569 /*
1570 * We really want to reject the packet as early as possible
1571 * if:
1572 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1573 * o There is an MD5 option and we're not expecting one
1574 */
7174259e 1575 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1576 goto discard;
1577#endif
1578
1da177e4 1579 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
ca55158c 1580 sock_rps_save_rxhash(sk, skb->rxhash);
aa8223c7 1581 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1582 rsk = sk;
1da177e4 1583 goto reset;
cfb6eeb4 1584 }
1da177e4
LT
1585 return 0;
1586 }
1587
ab6a5bb6 1588 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1589 goto csum_err;
1590
1591 if (sk->sk_state == TCP_LISTEN) {
1592 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1593 if (!nsk)
1594 goto discard;
1595
1596 if (nsk != sk) {
1eddcead 1597 sock_rps_save_rxhash(nsk, skb->rxhash);
cfb6eeb4
YH
1598 if (tcp_child_process(sk, nsk, skb)) {
1599 rsk = nsk;
1da177e4 1600 goto reset;
cfb6eeb4 1601 }
1da177e4
LT
1602 return 0;
1603 }
ca55158c
ED
1604 } else
1605 sock_rps_save_rxhash(sk, skb->rxhash);
1606
aa8223c7 1607 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1608 rsk = sk;
1da177e4 1609 goto reset;
cfb6eeb4 1610 }
1da177e4
LT
1611 return 0;
1612
1613reset:
cfb6eeb4 1614 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1615discard:
1616 kfree_skb(skb);
1617 /* Be careful here. If this function gets more complicated and
1618 * gcc suffers from register pressure on the x86, sk (in %ebx)
1619 * might be destroyed here. This current version compiles correctly,
1620 * but you have been warned.
1621 */
1622 return 0;
1623
1624csum_err:
63231bdd 1625 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1626 goto discard;
1627}
4bc2f18b 1628EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
1629
1630/*
1631 * From tcp_input.c
1632 */
1633
1634int tcp_v4_rcv(struct sk_buff *skb)
1635{
eddc9ec5 1636 const struct iphdr *iph;
1da177e4
LT
1637 struct tcphdr *th;
1638 struct sock *sk;
1639 int ret;
a86b1e30 1640 struct net *net = dev_net(skb->dev);
1da177e4
LT
1641
1642 if (skb->pkt_type != PACKET_HOST)
1643 goto discard_it;
1644
1645 /* Count it even if it's bad */
63231bdd 1646 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1647
1648 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1649 goto discard_it;
1650
aa8223c7 1651 th = tcp_hdr(skb);
1da177e4
LT
1652
1653 if (th->doff < sizeof(struct tcphdr) / 4)
1654 goto bad_packet;
1655 if (!pskb_may_pull(skb, th->doff * 4))
1656 goto discard_it;
1657
1658 /* An explanation is required here, I think.
1659 * Packet length and doff are validated by header prediction,
caa20d9a 1660 * provided case of th->doff==0 is eliminated.
1da177e4 1661 * So, we defer the checks. */
60476372 1662 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1663 goto bad_packet;
1664
aa8223c7 1665 th = tcp_hdr(skb);
eddc9ec5 1666 iph = ip_hdr(skb);
1da177e4
LT
1667 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1668 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1669 skb->len - th->doff * 4);
1670 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1671 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1672 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1673 TCP_SKB_CB(skb)->sacked = 0;
1674
9a1f27c4 1675 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1676 if (!sk)
1677 goto no_tcp_socket;
1678
bb134d5d
ED
1679process:
1680 if (sk->sk_state == TCP_TIME_WAIT)
1681 goto do_time_wait;
1682
6cce09f8
ED
1683 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1684 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1685 goto discard_and_relse;
6cce09f8 1686 }
d218d111 1687
1da177e4
LT
1688 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1689 goto discard_and_relse;
b59c2701 1690 nf_reset(skb);
1da177e4 1691
fda9ef5d 1692 if (sk_filter(sk, skb))
1da177e4
LT
1693 goto discard_and_relse;
1694
1695 skb->dev = NULL;
1696
c6366184 1697 bh_lock_sock_nested(sk);
1da177e4
LT
1698 ret = 0;
1699 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1700#ifdef CONFIG_NET_DMA
1701 struct tcp_sock *tp = tcp_sk(sk);
1702 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1703 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1704 if (tp->ucopy.dma_chan)
1da177e4 1705 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1706 else
1707#endif
1708 {
1709 if (!tcp_prequeue(sk, skb))
ae8d7f88 1710 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1711 }
6cce09f8 1712 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1713 bh_unlock_sock(sk);
6cce09f8 1714 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1715 goto discard_and_relse;
1716 }
1da177e4
LT
1717 bh_unlock_sock(sk);
1718
1719 sock_put(sk);
1720
1721 return ret;
1722
1723no_tcp_socket:
1724 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1725 goto discard_it;
1726
1727 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1728bad_packet:
63231bdd 1729 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1730 } else {
cfb6eeb4 1731 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1732 }
1733
1734discard_it:
1735 /* Discard frame. */
1736 kfree_skb(skb);
e905a9ed 1737 return 0;
1da177e4
LT
1738
1739discard_and_relse:
1740 sock_put(sk);
1741 goto discard_it;
1742
1743do_time_wait:
1744 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1745 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1746 goto discard_it;
1747 }
1748
1749 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1750 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1751 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1752 goto discard_it;
1753 }
9469c7b4 1754 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1755 case TCP_TW_SYN: {
c346dca1 1756 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1757 &tcp_hashinfo,
eddc9ec5 1758 iph->daddr, th->dest,
463c84b9 1759 inet_iif(skb));
1da177e4 1760 if (sk2) {
9469c7b4
YH
1761 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1762 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1763 sk = sk2;
1764 goto process;
1765 }
1766 /* Fall through to ACK */
1767 }
1768 case TCP_TW_ACK:
1769 tcp_v4_timewait_ack(sk, skb);
1770 break;
1771 case TCP_TW_RST:
1772 goto no_tcp_socket;
1773 case TCP_TW_SUCCESS:;
1774 }
1775 goto discard_it;
1776}
1777
3f419d2d 1778struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1da177e4 1779{
3f419d2d 1780 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1da177e4 1781 struct inet_sock *inet = inet_sk(sk);
3f419d2d 1782 struct inet_peer *peer;
1da177e4 1783
c5216cc7
DM
1784 if (!rt ||
1785 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
b534ecf1 1786 peer = inet_getpeer_v4(inet->inet_daddr, 1);
3f419d2d 1787 *release_it = true;
1da177e4
LT
1788 } else {
1789 if (!rt->peer)
a48eff12 1790 rt_bind_peer(rt, inet->inet_daddr, 1);
1da177e4 1791 peer = rt->peer;
3f419d2d 1792 *release_it = false;
1da177e4
LT
1793 }
1794
3f419d2d 1795 return peer;
1da177e4 1796}
3f419d2d 1797EXPORT_SYMBOL(tcp_v4_get_peer);
1da177e4 1798
ccb7c410 1799void *tcp_v4_tw_get_peer(struct sock *sk)
1da177e4 1800{
ccb7c410 1801 struct inet_timewait_sock *tw = inet_twsk(sk);
1da177e4 1802
ccb7c410 1803 return inet_getpeer_v4(tw->tw_daddr, 1);
1da177e4 1804}
ccb7c410
DM
1805EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1806
1807static struct timewait_sock_ops tcp_timewait_sock_ops = {
1808 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1809 .twsk_unique = tcp_twsk_unique,
1810 .twsk_destructor= tcp_twsk_destructor,
1811 .twsk_getpeer = tcp_v4_tw_get_peer,
1812};
1da177e4 1813
3b401a81 1814const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1815 .queue_xmit = ip_queue_xmit,
1816 .send_check = tcp_v4_send_check,
1817 .rebuild_header = inet_sk_rebuild_header,
1818 .conn_request = tcp_v4_conn_request,
1819 .syn_recv_sock = tcp_v4_syn_recv_sock,
3f419d2d 1820 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1821 .net_header_len = sizeof(struct iphdr),
1822 .setsockopt = ip_setsockopt,
1823 .getsockopt = ip_getsockopt,
1824 .addr2sockaddr = inet_csk_addr2sockaddr,
1825 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1826 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1827#ifdef CONFIG_COMPAT
543d9cfe
ACM
1828 .compat_setsockopt = compat_ip_setsockopt,
1829 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1830#endif
1da177e4 1831};
4bc2f18b 1832EXPORT_SYMBOL(ipv4_specific);
1da177e4 1833
cfb6eeb4 1834#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1835static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1836 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1837 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1838 .md5_add = tcp_v4_md5_add_func,
1839 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1840};
b6332e6c 1841#endif
cfb6eeb4 1842
1da177e4
LT
1843/* NOTE: A lot of things set to zero explicitly by call to
1844 * sk_alloc() so need not be done here.
1845 */
1846static int tcp_v4_init_sock(struct sock *sk)
1847{
6687e988 1848 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1849 struct tcp_sock *tp = tcp_sk(sk);
1850
1851 skb_queue_head_init(&tp->out_of_order_queue);
1852 tcp_init_xmit_timers(sk);
1853 tcp_prequeue_init(tp);
1854
6687e988 1855 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1856 tp->mdev = TCP_TIMEOUT_INIT;
1857
1858 /* So many TCP implementations out there (incorrectly) count the
1859 * initial SYN frame in their delayed-ACK and congestion control
1860 * algorithms that we must have the following bandaid to talk
1861 * efficiently to them. -DaveM
1862 */
9ad7c049 1863 tp->snd_cwnd = TCP_INIT_CWND;
1da177e4
LT
1864
1865 /* See draft-stevens-tcpca-spec-01 for discussion of the
1866 * initialization of these values.
1867 */
0b6a05c1 1868 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1869 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1870 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1871
1872 tp->reordering = sysctl_tcp_reordering;
6687e988 1873 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1874
1875 sk->sk_state = TCP_CLOSE;
1876
1877 sk->sk_write_space = sk_stream_write_space;
1878 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1879
8292a17a 1880 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1881 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1882#ifdef CONFIG_TCP_MD5SIG
1883 tp->af_specific = &tcp_sock_ipv4_specific;
1884#endif
1da177e4 1885
435cf559
WAS
1886 /* TCP Cookie Transactions */
1887 if (sysctl_tcp_cookie_size > 0) {
1888 /* Default, cookies without s_data_payload. */
1889 tp->cookie_values =
1890 kzalloc(sizeof(*tp->cookie_values),
1891 sk->sk_allocation);
1892 if (tp->cookie_values != NULL)
1893 kref_init(&tp->cookie_values->kref);
1894 }
1895 /* Presumed zeroed, in order of appearance:
1896 * cookie_in_always, cookie_out_never,
1897 * s_data_constant, s_data_in, s_data_out
1898 */
1da177e4
LT
1899 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1900 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1901
eb4dea58 1902 local_bh_disable();
1748376b 1903 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1904 local_bh_enable();
1da177e4
LT
1905
1906 return 0;
1907}
1908
7d06b2e0 1909void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1910{
1911 struct tcp_sock *tp = tcp_sk(sk);
1912
1913 tcp_clear_xmit_timers(sk);
1914
6687e988 1915 tcp_cleanup_congestion_control(sk);
317a76f9 1916
1da177e4 1917 /* Cleanup up the write buffer. */
fe067e8a 1918 tcp_write_queue_purge(sk);
1da177e4
LT
1919
1920 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1921 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1922
cfb6eeb4
YH
1923#ifdef CONFIG_TCP_MD5SIG
1924 /* Clean up the MD5 key list, if any */
1925 if (tp->md5sig_info) {
1926 tcp_v4_clear_md5_list(sk);
1927 kfree(tp->md5sig_info);
1928 tp->md5sig_info = NULL;
1929 }
1930#endif
1931
1a2449a8
CL
1932#ifdef CONFIG_NET_DMA
1933 /* Cleans up our sk_async_wait_queue */
e905a9ed 1934 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1935#endif
1936
1da177e4
LT
1937 /* Clean prequeue, it must be empty really */
1938 __skb_queue_purge(&tp->ucopy.prequeue);
1939
1940 /* Clean up a referenced TCP bind bucket. */
463c84b9 1941 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1942 inet_put_port(sk);
1da177e4
LT
1943
1944 /*
1945 * If sendmsg cached page exists, toss it.
1946 */
1947 if (sk->sk_sndmsg_page) {
1948 __free_page(sk->sk_sndmsg_page);
1949 sk->sk_sndmsg_page = NULL;
1950 }
1951
435cf559
WAS
1952 /* TCP Cookie Transactions */
1953 if (tp->cookie_values != NULL) {
1954 kref_put(&tp->cookie_values->kref,
1955 tcp_cookie_values_release);
1956 tp->cookie_values = NULL;
1957 }
1958
1748376b 1959 percpu_counter_dec(&tcp_sockets_allocated);
1da177e4 1960}
1da177e4
LT
1961EXPORT_SYMBOL(tcp_v4_destroy_sock);
1962
1963#ifdef CONFIG_PROC_FS
1964/* Proc filesystem TCP sock list dumping. */
1965
3ab5aee7 1966static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1967{
3ab5aee7 1968 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1969 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1970}
1971
8feaf0c0 1972static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1973{
3ab5aee7
ED
1974 return !is_a_nulls(tw->tw_node.next) ?
1975 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1976}
1977
a8b690f9
TH
1978/*
1979 * Get next listener socket follow cur. If cur is NULL, get first socket
1980 * starting from bucket given in st->bucket; when st->bucket is zero the
1981 * very first socket in the hash table is returned.
1982 */
1da177e4
LT
1983static void *listening_get_next(struct seq_file *seq, void *cur)
1984{
463c84b9 1985 struct inet_connection_sock *icsk;
c25eb3bf 1986 struct hlist_nulls_node *node;
1da177e4 1987 struct sock *sk = cur;
5caea4ea 1988 struct inet_listen_hashbucket *ilb;
5799de0b 1989 struct tcp_iter_state *st = seq->private;
a4146b1b 1990 struct net *net = seq_file_net(seq);
1da177e4
LT
1991
1992 if (!sk) {
a8b690f9 1993 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1994 spin_lock_bh(&ilb->lock);
c25eb3bf 1995 sk = sk_nulls_head(&ilb->head);
a8b690f9 1996 st->offset = 0;
1da177e4
LT
1997 goto get_sk;
1998 }
5caea4ea 1999 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2000 ++st->num;
a8b690f9 2001 ++st->offset;
1da177e4
LT
2002
2003 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2004 struct request_sock *req = cur;
1da177e4 2005
72a3effa 2006 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2007 req = req->dl_next;
2008 while (1) {
2009 while (req) {
bdccc4ca 2010 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2011 cur = req;
2012 goto out;
2013 }
2014 req = req->dl_next;
2015 }
72a3effa 2016 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2017 break;
2018get_req:
463c84b9 2019 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 2020 }
1bde5ac4 2021 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 2022 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2023 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2024 } else {
e905a9ed 2025 icsk = inet_csk(sk);
463c84b9
ACM
2026 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2027 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2028 goto start_req;
463c84b9 2029 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 2030 sk = sk_nulls_next(sk);
1da177e4
LT
2031 }
2032get_sk:
c25eb3bf 2033 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
2034 if (!net_eq(sock_net(sk), net))
2035 continue;
2036 if (sk->sk_family == st->family) {
1da177e4
LT
2037 cur = sk;
2038 goto out;
2039 }
e905a9ed 2040 icsk = inet_csk(sk);
463c84b9
ACM
2041 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2043start_req:
2044 st->uid = sock_i_uid(sk);
2045 st->syn_wait_sk = sk;
2046 st->state = TCP_SEQ_STATE_OPENREQ;
2047 st->sbucket = 0;
2048 goto get_req;
2049 }
463c84b9 2050 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2051 }
5caea4ea 2052 spin_unlock_bh(&ilb->lock);
a8b690f9 2053 st->offset = 0;
0f7ff927 2054 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2055 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2056 spin_lock_bh(&ilb->lock);
c25eb3bf 2057 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2058 goto get_sk;
2059 }
2060 cur = NULL;
2061out:
2062 return cur;
2063}
2064
2065static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2066{
a8b690f9
TH
2067 struct tcp_iter_state *st = seq->private;
2068 void *rc;
2069
2070 st->bucket = 0;
2071 st->offset = 0;
2072 rc = listening_get_next(seq, NULL);
1da177e4
LT
2073
2074 while (rc && *pos) {
2075 rc = listening_get_next(seq, rc);
2076 --*pos;
2077 }
2078 return rc;
2079}
2080
6eac5604
AK
2081static inline int empty_bucket(struct tcp_iter_state *st)
2082{
3ab5aee7
ED
2083 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2084 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2085}
2086
a8b690f9
TH
2087/*
2088 * Get first established socket starting from bucket given in st->bucket.
2089 * If st->bucket is zero, the very first socket in the hash is returned.
2090 */
1da177e4
LT
2091static void *established_get_first(struct seq_file *seq)
2092{
5799de0b 2093 struct tcp_iter_state *st = seq->private;
a4146b1b 2094 struct net *net = seq_file_net(seq);
1da177e4
LT
2095 void *rc = NULL;
2096
a8b690f9
TH
2097 st->offset = 0;
2098 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2099 struct sock *sk;
3ab5aee7 2100 struct hlist_nulls_node *node;
8feaf0c0 2101 struct inet_timewait_sock *tw;
9db66bdc 2102 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2103
6eac5604
AK
2104 /* Lockless fast path for the common case of empty buckets */
2105 if (empty_bucket(st))
2106 continue;
2107
9db66bdc 2108 spin_lock_bh(lock);
3ab5aee7 2109 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2110 if (sk->sk_family != st->family ||
878628fb 2111 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2112 continue;
2113 }
2114 rc = sk;
2115 goto out;
2116 }
2117 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2118 inet_twsk_for_each(tw, node,
dbca9b27 2119 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2120 if (tw->tw_family != st->family ||
878628fb 2121 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2122 continue;
2123 }
2124 rc = tw;
2125 goto out;
2126 }
9db66bdc 2127 spin_unlock_bh(lock);
1da177e4
LT
2128 st->state = TCP_SEQ_STATE_ESTABLISHED;
2129 }
2130out:
2131 return rc;
2132}
2133
2134static void *established_get_next(struct seq_file *seq, void *cur)
2135{
2136 struct sock *sk = cur;
8feaf0c0 2137 struct inet_timewait_sock *tw;
3ab5aee7 2138 struct hlist_nulls_node *node;
5799de0b 2139 struct tcp_iter_state *st = seq->private;
a4146b1b 2140 struct net *net = seq_file_net(seq);
1da177e4
LT
2141
2142 ++st->num;
a8b690f9 2143 ++st->offset;
1da177e4
LT
2144
2145 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2146 tw = cur;
2147 tw = tw_next(tw);
2148get_tw:
878628fb 2149 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2150 tw = tw_next(tw);
2151 }
2152 if (tw) {
2153 cur = tw;
2154 goto out;
2155 }
9db66bdc 2156 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2157 st->state = TCP_SEQ_STATE_ESTABLISHED;
2158
6eac5604 2159 /* Look for next non empty bucket */
a8b690f9 2160 st->offset = 0;
f373b53b 2161 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2162 empty_bucket(st))
2163 ;
f373b53b 2164 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2165 return NULL;
2166
9db66bdc 2167 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2168 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2169 } else
3ab5aee7 2170 sk = sk_nulls_next(sk);
1da177e4 2171
3ab5aee7 2172 sk_nulls_for_each_from(sk, node) {
878628fb 2173 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2174 goto found;
2175 }
2176
2177 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2178 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2179 goto get_tw;
2180found:
2181 cur = sk;
2182out:
2183 return cur;
2184}
2185
2186static void *established_get_idx(struct seq_file *seq, loff_t pos)
2187{
a8b690f9
TH
2188 struct tcp_iter_state *st = seq->private;
2189 void *rc;
2190
2191 st->bucket = 0;
2192 rc = established_get_first(seq);
1da177e4
LT
2193
2194 while (rc && pos) {
2195 rc = established_get_next(seq, rc);
2196 --pos;
7174259e 2197 }
1da177e4
LT
2198 return rc;
2199}
2200
2201static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2202{
2203 void *rc;
5799de0b 2204 struct tcp_iter_state *st = seq->private;
1da177e4 2205
1da177e4
LT
2206 st->state = TCP_SEQ_STATE_LISTENING;
2207 rc = listening_get_idx(seq, &pos);
2208
2209 if (!rc) {
1da177e4
LT
2210 st->state = TCP_SEQ_STATE_ESTABLISHED;
2211 rc = established_get_idx(seq, pos);
2212 }
2213
2214 return rc;
2215}
2216
a8b690f9
TH
2217static void *tcp_seek_last_pos(struct seq_file *seq)
2218{
2219 struct tcp_iter_state *st = seq->private;
2220 int offset = st->offset;
2221 int orig_num = st->num;
2222 void *rc = NULL;
2223
2224 switch (st->state) {
2225 case TCP_SEQ_STATE_OPENREQ:
2226 case TCP_SEQ_STATE_LISTENING:
2227 if (st->bucket >= INET_LHTABLE_SIZE)
2228 break;
2229 st->state = TCP_SEQ_STATE_LISTENING;
2230 rc = listening_get_next(seq, NULL);
2231 while (offset-- && rc)
2232 rc = listening_get_next(seq, rc);
2233 if (rc)
2234 break;
2235 st->bucket = 0;
2236 /* Fallthrough */
2237 case TCP_SEQ_STATE_ESTABLISHED:
2238 case TCP_SEQ_STATE_TIME_WAIT:
2239 st->state = TCP_SEQ_STATE_ESTABLISHED;
2240 if (st->bucket > tcp_hashinfo.ehash_mask)
2241 break;
2242 rc = established_get_first(seq);
2243 while (offset-- && rc)
2244 rc = established_get_next(seq, rc);
2245 }
2246
2247 st->num = orig_num;
2248
2249 return rc;
2250}
2251
1da177e4
LT
2252static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2253{
5799de0b 2254 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2255 void *rc;
2256
2257 if (*pos && *pos == st->last_pos) {
2258 rc = tcp_seek_last_pos(seq);
2259 if (rc)
2260 goto out;
2261 }
2262
1da177e4
LT
2263 st->state = TCP_SEQ_STATE_LISTENING;
2264 st->num = 0;
a8b690f9
TH
2265 st->bucket = 0;
2266 st->offset = 0;
2267 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2268
2269out:
2270 st->last_pos = *pos;
2271 return rc;
1da177e4
LT
2272}
2273
2274static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2275{
a8b690f9 2276 struct tcp_iter_state *st = seq->private;
1da177e4 2277 void *rc = NULL;
1da177e4
LT
2278
2279 if (v == SEQ_START_TOKEN) {
2280 rc = tcp_get_idx(seq, 0);
2281 goto out;
2282 }
1da177e4
LT
2283
2284 switch (st->state) {
2285 case TCP_SEQ_STATE_OPENREQ:
2286 case TCP_SEQ_STATE_LISTENING:
2287 rc = listening_get_next(seq, v);
2288 if (!rc) {
1da177e4 2289 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2290 st->bucket = 0;
2291 st->offset = 0;
1da177e4
LT
2292 rc = established_get_first(seq);
2293 }
2294 break;
2295 case TCP_SEQ_STATE_ESTABLISHED:
2296 case TCP_SEQ_STATE_TIME_WAIT:
2297 rc = established_get_next(seq, v);
2298 break;
2299 }
2300out:
2301 ++*pos;
a8b690f9 2302 st->last_pos = *pos;
1da177e4
LT
2303 return rc;
2304}
2305
2306static void tcp_seq_stop(struct seq_file *seq, void *v)
2307{
5799de0b 2308 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2309
2310 switch (st->state) {
2311 case TCP_SEQ_STATE_OPENREQ:
2312 if (v) {
463c84b9
ACM
2313 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2314 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2315 }
2316 case TCP_SEQ_STATE_LISTENING:
2317 if (v != SEQ_START_TOKEN)
5caea4ea 2318 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2319 break;
2320 case TCP_SEQ_STATE_TIME_WAIT:
2321 case TCP_SEQ_STATE_ESTABLISHED:
2322 if (v)
9db66bdc 2323 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2324 break;
2325 }
2326}
2327
2328static int tcp_seq_open(struct inode *inode, struct file *file)
2329{
2330 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2331 struct tcp_iter_state *s;
52d6f3f1 2332 int err;
1da177e4 2333
52d6f3f1
DL
2334 err = seq_open_net(inode, file, &afinfo->seq_ops,
2335 sizeof(struct tcp_iter_state));
2336 if (err < 0)
2337 return err;
f40c8174 2338
52d6f3f1 2339 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2340 s->family = afinfo->family;
a8b690f9 2341 s->last_pos = 0;
f40c8174
DL
2342 return 0;
2343}
2344
6f8b13bc 2345int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2346{
2347 int rc = 0;
2348 struct proc_dir_entry *p;
2349
68fcadd1
DL
2350 afinfo->seq_fops.open = tcp_seq_open;
2351 afinfo->seq_fops.read = seq_read;
2352 afinfo->seq_fops.llseek = seq_lseek;
2353 afinfo->seq_fops.release = seq_release_net;
7174259e 2354
9427c4b3
DL
2355 afinfo->seq_ops.start = tcp_seq_start;
2356 afinfo->seq_ops.next = tcp_seq_next;
2357 afinfo->seq_ops.stop = tcp_seq_stop;
2358
84841c3c
DL
2359 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2360 &afinfo->seq_fops, afinfo);
2361 if (!p)
1da177e4
LT
2362 rc = -ENOMEM;
2363 return rc;
2364}
4bc2f18b 2365EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2366
6f8b13bc 2367void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2368{
6f8b13bc 2369 proc_net_remove(net, afinfo->name);
1da177e4 2370}
4bc2f18b 2371EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2372
60236fdd 2373static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2374 struct seq_file *f, int i, int uid, int *len)
1da177e4 2375{
2e6599cb 2376 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2377 int ttd = req->expires - jiffies;
2378
5e659e4c 2379 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2380 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
1da177e4 2381 i,
2e6599cb 2382 ireq->loc_addr,
c720c7e8 2383 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2384 ireq->rmt_addr,
2385 ntohs(ireq->rmt_port),
1da177e4
LT
2386 TCP_SYN_RECV,
2387 0, 0, /* could print option size, but that is af dependent. */
2388 1, /* timers active (only the expire timer) */
2389 jiffies_to_clock_t(ttd),
2390 req->retrans,
2391 uid,
2392 0, /* non standard timer */
2393 0, /* open_requests have no inode */
2394 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2395 req,
2396 len);
1da177e4
LT
2397}
2398
5e659e4c 2399static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2400{
2401 int timer_active;
2402 unsigned long timer_expires;
cf4c6bf8
IJ
2403 struct tcp_sock *tp = tcp_sk(sk);
2404 const struct inet_connection_sock *icsk = inet_csk(sk);
2405 struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2406 __be32 dest = inet->inet_daddr;
2407 __be32 src = inet->inet_rcv_saddr;
2408 __u16 destp = ntohs(inet->inet_dport);
2409 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2410 int rx_queue;
1da177e4 2411
463c84b9 2412 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2413 timer_active = 1;
463c84b9
ACM
2414 timer_expires = icsk->icsk_timeout;
2415 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2416 timer_active = 4;
463c84b9 2417 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2418 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2419 timer_active = 2;
cf4c6bf8 2420 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2421 } else {
2422 timer_active = 0;
2423 timer_expires = jiffies;
2424 }
2425
49d09007
ED
2426 if (sk->sk_state == TCP_LISTEN)
2427 rx_queue = sk->sk_ack_backlog;
2428 else
2429 /*
2430 * because we dont lock socket, we might find a transient negative value
2431 */
2432 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2433
5e659e4c 2434 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
71338aa7 2435 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
cf4c6bf8 2436 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2437 tp->write_seq - tp->snd_una,
49d09007 2438 rx_queue,
1da177e4
LT
2439 timer_active,
2440 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2441 icsk->icsk_retransmits,
cf4c6bf8 2442 sock_i_uid(sk),
6687e988 2443 icsk->icsk_probes_out,
cf4c6bf8
IJ
2444 sock_i_ino(sk),
2445 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2446 jiffies_to_clock_t(icsk->icsk_rto),
2447 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2448 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2449 tp->snd_cwnd,
0b6a05c1 2450 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2451 len);
1da177e4
LT
2452}
2453
7174259e 2454static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2455 struct seq_file *f, int i, int *len)
1da177e4 2456{
23f33c2d 2457 __be32 dest, src;
1da177e4
LT
2458 __u16 destp, srcp;
2459 int ttd = tw->tw_ttd - jiffies;
2460
2461 if (ttd < 0)
2462 ttd = 0;
2463
2464 dest = tw->tw_daddr;
2465 src = tw->tw_rcv_saddr;
2466 destp = ntohs(tw->tw_dport);
2467 srcp = ntohs(tw->tw_sport);
2468
5e659e4c 2469 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2470 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
1da177e4
LT
2471 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2472 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2473 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2474}
2475
2476#define TMPSZ 150
2477
2478static int tcp4_seq_show(struct seq_file *seq, void *v)
2479{
5799de0b 2480 struct tcp_iter_state *st;
5e659e4c 2481 int len;
1da177e4
LT
2482
2483 if (v == SEQ_START_TOKEN) {
2484 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2485 " sl local_address rem_address st tx_queue "
2486 "rx_queue tr tm->when retrnsmt uid timeout "
2487 "inode");
2488 goto out;
2489 }
2490 st = seq->private;
2491
2492 switch (st->state) {
2493 case TCP_SEQ_STATE_LISTENING:
2494 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2495 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2496 break;
2497 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2498 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2499 break;
2500 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2501 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2502 break;
2503 }
5e659e4c 2504 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2505out:
2506 return 0;
2507}
2508
1da177e4 2509static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2510 .name = "tcp",
2511 .family = AF_INET,
5f4472c5
DL
2512 .seq_fops = {
2513 .owner = THIS_MODULE,
2514 },
9427c4b3
DL
2515 .seq_ops = {
2516 .show = tcp4_seq_show,
2517 },
1da177e4
LT
2518};
2519
2c8c1e72 2520static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2521{
2522 return tcp_proc_register(net, &tcp4_seq_afinfo);
2523}
2524
2c8c1e72 2525static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2526{
2527 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2528}
2529
2530static struct pernet_operations tcp4_net_ops = {
2531 .init = tcp4_proc_init_net,
2532 .exit = tcp4_proc_exit_net,
2533};
2534
1da177e4
LT
2535int __init tcp4_proc_init(void)
2536{
757764f6 2537 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2538}
2539
2540void tcp4_proc_exit(void)
2541{
757764f6 2542 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2543}
2544#endif /* CONFIG_PROC_FS */
2545
bf296b12
HX
2546struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2547{
b71d1d42 2548 const struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2549
2550 switch (skb->ip_summed) {
2551 case CHECKSUM_COMPLETE:
86911732 2552 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2553 skb->csum)) {
2554 skb->ip_summed = CHECKSUM_UNNECESSARY;
2555 break;
2556 }
2557
2558 /* fall through */
2559 case CHECKSUM_NONE:
2560 NAPI_GRO_CB(skb)->flush = 1;
2561 return NULL;
2562 }
2563
2564 return tcp_gro_receive(head, skb);
2565}
bf296b12
HX
2566
2567int tcp4_gro_complete(struct sk_buff *skb)
2568{
b71d1d42 2569 const struct iphdr *iph = ip_hdr(skb);
bf296b12
HX
2570 struct tcphdr *th = tcp_hdr(skb);
2571
2572 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2573 iph->saddr, iph->daddr, 0);
2574 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2575
2576 return tcp_gro_complete(skb);
2577}
bf296b12 2578
1da177e4
LT
2579struct proto tcp_prot = {
2580 .name = "TCP",
2581 .owner = THIS_MODULE,
2582 .close = tcp_close,
2583 .connect = tcp_v4_connect,
2584 .disconnect = tcp_disconnect,
463c84b9 2585 .accept = inet_csk_accept,
1da177e4
LT
2586 .ioctl = tcp_ioctl,
2587 .init = tcp_v4_init_sock,
2588 .destroy = tcp_v4_destroy_sock,
2589 .shutdown = tcp_shutdown,
2590 .setsockopt = tcp_setsockopt,
2591 .getsockopt = tcp_getsockopt,
1da177e4 2592 .recvmsg = tcp_recvmsg,
7ba42910
CG
2593 .sendmsg = tcp_sendmsg,
2594 .sendpage = tcp_sendpage,
1da177e4 2595 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2596 .hash = inet_hash,
2597 .unhash = inet_unhash,
2598 .get_port = inet_csk_get_port,
1da177e4
LT
2599 .enter_memory_pressure = tcp_enter_memory_pressure,
2600 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2601 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2602 .memory_allocated = &tcp_memory_allocated,
2603 .memory_pressure = &tcp_memory_pressure,
2604 .sysctl_mem = sysctl_tcp_mem,
2605 .sysctl_wmem = sysctl_tcp_wmem,
2606 .sysctl_rmem = sysctl_tcp_rmem,
2607 .max_header = MAX_TCP_HEADER,
2608 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2609 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2610 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2611 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2612 .h.hashinfo = &tcp_hashinfo,
7ba42910 2613 .no_autobind = true,
543d9cfe
ACM
2614#ifdef CONFIG_COMPAT
2615 .compat_setsockopt = compat_tcp_setsockopt,
2616 .compat_getsockopt = compat_tcp_getsockopt,
2617#endif
1da177e4 2618};
4bc2f18b 2619EXPORT_SYMBOL(tcp_prot);
1da177e4 2620
046ee902
DL
2621
2622static int __net_init tcp_sk_init(struct net *net)
2623{
2624 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2625 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2626}
2627
2628static void __net_exit tcp_sk_exit(struct net *net)
2629{
2630 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
b099ce26
EB
2631}
2632
2633static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2634{
2635 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2636}
2637
2638static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2639 .init = tcp_sk_init,
2640 .exit = tcp_sk_exit,
2641 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2642};
2643
9b0f976f 2644void __init tcp_v4_init(void)
1da177e4 2645{
5caea4ea 2646 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2647 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2648 panic("Failed to create the TCP control socket.\n");
1da177e4 2649}
This page took 1.144164 seconds and 5 git commands to generate.