Commit | Line | Data |
---|---|---|
3f421baa ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Support for INET connection oriented protocols. | |
7 | * | |
8 | * Authors: See the TCP sources | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or(at your option) any later version. | |
14 | */ | |
15 | ||
3f421baa ACM |
16 | #include <linux/module.h> |
17 | #include <linux/jhash.h> | |
18 | ||
19 | #include <net/inet_connection_sock.h> | |
20 | #include <net/inet_hashtables.h> | |
21 | #include <net/inet_timewait_sock.h> | |
22 | #include <net/ip.h> | |
23 | #include <net/route.h> | |
24 | #include <net/tcp_states.h> | |
a019d6fe | 25 | #include <net/xfrm.h> |
fa76ce73 | 26 | #include <net/tcp.h> |
c125e80b | 27 | #include <net/sock_reuseport.h> |
3f421baa ACM |
28 | |
29 | #ifdef INET_CSK_DEBUG | |
30 | const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; | |
31 | EXPORT_SYMBOL(inet_csk_timer_bug_msg); | |
32 | #endif | |
33 | ||
0bbf87d8 | 34 | void inet_get_local_port_range(struct net *net, int *low, int *high) |
227b60f5 | 35 | { |
95c96174 ED |
36 | unsigned int seq; |
37 | ||
227b60f5 | 38 | do { |
c9d8f1a6 | 39 | seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); |
227b60f5 | 40 | |
c9d8f1a6 CW |
41 | *low = net->ipv4.ip_local_ports.range[0]; |
42 | *high = net->ipv4.ip_local_ports.range[1]; | |
43 | } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); | |
227b60f5 SH |
44 | } |
45 | EXPORT_SYMBOL(inet_get_local_port_range); | |
3f421baa | 46 | |
971af18b | 47 | int inet_csk_bind_conflict(const struct sock *sk, |
aacd9289 | 48 | const struct inet_bind_bucket *tb, bool relax) |
3f421baa | 49 | { |
3f421baa | 50 | struct sock *sk2; |
3f421baa | 51 | int reuse = sk->sk_reuse; |
da5e3630 TH |
52 | int reuseport = sk->sk_reuseport; |
53 | kuid_t uid = sock_i_uid((struct sock *)sk); | |
3f421baa | 54 | |
7477fd2e PE |
55 | /* |
56 | * Unlike other sk lookup places we do not check | |
57 | * for sk_net here, since _all_ the socks listed | |
58 | * in tb->owners list belong to the same net - the | |
59 | * one this bucket belongs to. | |
60 | */ | |
61 | ||
b67bfe0d | 62 | sk_for_each_bound(sk2, &tb->owners) { |
3f421baa ACM |
63 | if (sk != sk2 && |
64 | !inet_v6_ipv6only(sk2) && | |
65 | (!sk->sk_bound_dev_if || | |
66 | !sk2->sk_bound_dev_if || | |
67 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | |
da5e3630 TH |
68 | if ((!reuse || !sk2->sk_reuse || |
69 | sk2->sk_state == TCP_LISTEN) && | |
70 | (!reuseport || !sk2->sk_reuseport || | |
c125e80b CG |
71 | rcu_access_pointer(sk->sk_reuseport_cb) || |
72 | (sk2->sk_state != TCP_TIME_WAIT && | |
da5e3630 | 73 | !uid_eq(uid, sock_i_uid(sk2))))) { |
50805466 ED |
74 | |
75 | if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || | |
76 | sk2->sk_rcv_saddr == sk->sk_rcv_saddr) | |
3f421baa | 77 | break; |
8d238b25 | 78 | } |
aacd9289 AC |
79 | if (!relax && reuse && sk2->sk_reuse && |
80 | sk2->sk_state != TCP_LISTEN) { | |
aacd9289 | 81 | |
50805466 ED |
82 | if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || |
83 | sk2->sk_rcv_saddr == sk->sk_rcv_saddr) | |
aacd9289 AC |
84 | break; |
85 | } | |
3f421baa ACM |
86 | } |
87 | } | |
b67bfe0d | 88 | return sk2 != NULL; |
3f421baa | 89 | } |
971af18b ACM |
90 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); |
91 | ||
3f421baa ACM |
92 | /* Obtain a reference to a local port for the given sock, |
93 | * if snum is zero it means select any available local port. | |
ea8add2b | 94 | * We try to allocate an odd port (and leave even ports for connect()) |
3f421baa | 95 | */ |
ab1e0a13 | 96 | int inet_csk_get_port(struct sock *sk, unsigned short snum) |
3f421baa | 97 | { |
ea8add2b ED |
98 | bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; |
99 | struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; | |
100 | int ret = 1, attempts = 5, port = snum; | |
101 | int smallest_size = -1, smallest_port; | |
3f421baa | 102 | struct inet_bind_hashbucket *head; |
3b1e0a65 | 103 | struct net *net = sock_net(sk); |
ea8add2b ED |
104 | int i, low, high, attempt_half; |
105 | struct inet_bind_bucket *tb; | |
da5e3630 | 106 | kuid_t uid = sock_i_uid(sk); |
ea8add2b | 107 | u32 remaining, offset; |
3f421baa | 108 | |
ea8add2b ED |
109 | if (port) { |
110 | have_port: | |
111 | head = &hinfo->bhash[inet_bhashfn(net, port, | |
112 | hinfo->bhash_size)]; | |
113 | spin_lock_bh(&head->lock); | |
114 | inet_bind_bucket_for_each(tb, &head->chain) | |
115 | if (net_eq(ib_net(tb), net) && tb->port == port) | |
116 | goto tb_found; | |
227b60f5 | 117 | |
ea8add2b ED |
118 | goto tb_not_found; |
119 | } | |
a9d8f911 | 120 | again: |
ea8add2b ED |
121 | attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; |
122 | other_half_scan: | |
123 | inet_get_local_port_range(net, &low, &high); | |
124 | high++; /* [32768, 60999] -> [32768, 61000[ */ | |
125 | if (high - low < 4) | |
126 | attempt_half = 0; | |
127 | if (attempt_half) { | |
128 | int half = low + (((high - low) >> 2) << 1); | |
129 | ||
130 | if (attempt_half == 1) | |
131 | high = half; | |
132 | else | |
133 | low = half; | |
134 | } | |
135 | remaining = high - low; | |
136 | if (likely(remaining > 1)) | |
137 | remaining &= ~1U; | |
3f421baa | 138 | |
ea8add2b ED |
139 | offset = prandom_u32() % remaining; |
140 | /* __inet_hash_connect() favors ports having @low parity | |
141 | * We do the opposite to not pollute connect() users. | |
142 | */ | |
143 | offset |= 1U; | |
144 | smallest_size = -1; | |
145 | smallest_port = low; /* avoid compiler warning */ | |
146 | ||
147 | other_parity_scan: | |
148 | port = low + offset; | |
149 | for (i = 0; i < remaining; i += 2, port += 2) { | |
150 | if (unlikely(port >= high)) | |
151 | port -= remaining; | |
152 | if (inet_is_local_reserved_port(net, port)) | |
153 | continue; | |
154 | head = &hinfo->bhash[inet_bhashfn(net, port, | |
155 | hinfo->bhash_size)]; | |
156 | spin_lock_bh(&head->lock); | |
157 | inet_bind_bucket_for_each(tb, &head->chain) | |
158 | if (net_eq(ib_net(tb), net) && tb->port == port) { | |
159 | if (((tb->fastreuse > 0 && reuse) || | |
160 | (tb->fastreuseport > 0 && | |
161 | sk->sk_reuseport && | |
162 | !rcu_access_pointer(sk->sk_reuseport_cb) && | |
163 | uid_eq(tb->fastuid, uid))) && | |
164 | (tb->num_owners < smallest_size || smallest_size == -1)) { | |
165 | smallest_size = tb->num_owners; | |
166 | smallest_port = port; | |
a9d8f911 | 167 | } |
ea8add2b ED |
168 | if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) |
169 | goto tb_found; | |
170 | goto next_port; | |
946f9eb2 | 171 | } |
ea8add2b ED |
172 | goto tb_not_found; |
173 | next_port: | |
174 | spin_unlock_bh(&head->lock); | |
175 | cond_resched(); | |
176 | } | |
177 | ||
178 | if (smallest_size != -1) { | |
179 | port = smallest_port; | |
180 | goto have_port; | |
3f421baa | 181 | } |
ea8add2b ED |
182 | offset--; |
183 | if (!(offset & 1)) | |
184 | goto other_parity_scan; | |
185 | ||
186 | if (attempt_half == 1) { | |
187 | /* OK we now try the upper half of the range */ | |
188 | attempt_half = 2; | |
189 | goto other_half_scan; | |
190 | } | |
191 | return ret; | |
192 | ||
193 | tb_not_found: | |
194 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, | |
195 | net, head, port); | |
196 | if (!tb) | |
197 | goto fail_unlock; | |
3f421baa ACM |
198 | tb_found: |
199 | if (!hlist_empty(&tb->owners)) { | |
4a17fd52 PE |
200 | if (sk->sk_reuse == SK_FORCE_REUSE) |
201 | goto success; | |
202 | ||
ea8add2b | 203 | if (((tb->fastreuse > 0 && reuse) || |
da5e3630 | 204 | (tb->fastreuseport > 0 && |
e5fbfc1c | 205 | !rcu_access_pointer(sk->sk_reuseport_cb) && |
ea8add2b ED |
206 | sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && |
207 | smallest_size == -1) | |
3f421baa | 208 | goto success; |
ea8add2b ED |
209 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { |
210 | if ((reuse || | |
211 | (tb->fastreuseport > 0 && | |
212 | sk->sk_reuseport && | |
213 | !rcu_access_pointer(sk->sk_reuseport_cb) && | |
214 | uid_eq(tb->fastuid, uid))) && | |
215 | smallest_size != -1 && --attempts >= 0) { | |
216 | spin_unlock_bh(&head->lock); | |
217 | goto again; | |
a9d8f911 | 218 | } |
ea8add2b | 219 | goto fail_unlock; |
3f421baa | 220 | } |
ea8add2b | 221 | if (!reuse) |
3f421baa | 222 | tb->fastreuse = 0; |
ea8add2b ED |
223 | if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) |
224 | tb->fastreuseport = 0; | |
225 | } else { | |
226 | tb->fastreuse = reuse; | |
da5e3630 TH |
227 | if (sk->sk_reuseport) { |
228 | tb->fastreuseport = 1; | |
229 | tb->fastuid = uid; | |
ea8add2b | 230 | } else { |
da5e3630 | 231 | tb->fastreuseport = 0; |
ea8add2b | 232 | } |
da5e3630 | 233 | } |
3f421baa ACM |
234 | success: |
235 | if (!inet_csk(sk)->icsk_bind_hash) | |
ea8add2b | 236 | inet_bind_hash(sk, tb, port); |
547b792c | 237 | WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); |
e905a9ed | 238 | ret = 0; |
3f421baa ACM |
239 | |
240 | fail_unlock: | |
ea8add2b | 241 | spin_unlock_bh(&head->lock); |
3f421baa ACM |
242 | return ret; |
243 | } | |
3f421baa ACM |
244 | EXPORT_SYMBOL_GPL(inet_csk_get_port); |
245 | ||
246 | /* | |
247 | * Wait for an incoming connection, avoid race conditions. This must be called | |
248 | * with the socket locked. | |
249 | */ | |
250 | static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |
251 | { | |
252 | struct inet_connection_sock *icsk = inet_csk(sk); | |
253 | DEFINE_WAIT(wait); | |
254 | int err; | |
255 | ||
256 | /* | |
257 | * True wake-one mechanism for incoming connections: only | |
258 | * one process gets woken up, not the 'whole herd'. | |
259 | * Since we do not 'race & poll' for established sockets | |
260 | * anymore, the common case will execute the loop only once. | |
261 | * | |
262 | * Subtle issue: "add_wait_queue_exclusive()" will be added | |
263 | * after any current non-exclusive waiters, and we know that | |
264 | * it will always _stay_ after any new non-exclusive waiters | |
265 | * because all non-exclusive waiters are added at the | |
266 | * beginning of the wait-queue. As such, it's ok to "drop" | |
267 | * our exclusiveness temporarily when we get woken up without | |
268 | * having to remove and re-insert us on the wait queue. | |
269 | */ | |
270 | for (;;) { | |
aa395145 | 271 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
3f421baa ACM |
272 | TASK_INTERRUPTIBLE); |
273 | release_sock(sk); | |
274 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
275 | timeo = schedule_timeout(timeo); | |
cb7cf8a3 | 276 | sched_annotate_sleep(); |
3f421baa ACM |
277 | lock_sock(sk); |
278 | err = 0; | |
279 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
280 | break; | |
281 | err = -EINVAL; | |
282 | if (sk->sk_state != TCP_LISTEN) | |
283 | break; | |
284 | err = sock_intr_errno(timeo); | |
285 | if (signal_pending(current)) | |
286 | break; | |
287 | err = -EAGAIN; | |
288 | if (!timeo) | |
289 | break; | |
290 | } | |
aa395145 | 291 | finish_wait(sk_sleep(sk), &wait); |
3f421baa ACM |
292 | return err; |
293 | } | |
294 | ||
295 | /* | |
296 | * This will accept the next outstanding connection. | |
297 | */ | |
298 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) | |
299 | { | |
300 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 301 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
8336886f | 302 | struct request_sock *req; |
e3d95ad7 | 303 | struct sock *newsk; |
3f421baa ACM |
304 | int error; |
305 | ||
306 | lock_sock(sk); | |
307 | ||
308 | /* We need to make sure that this socket is listening, | |
309 | * and that it has something pending. | |
310 | */ | |
311 | error = -EINVAL; | |
312 | if (sk->sk_state != TCP_LISTEN) | |
313 | goto out_err; | |
314 | ||
315 | /* Find already established connection */ | |
8336886f | 316 | if (reqsk_queue_empty(queue)) { |
3f421baa ACM |
317 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
318 | ||
319 | /* If this is a non blocking socket don't sleep */ | |
320 | error = -EAGAIN; | |
321 | if (!timeo) | |
322 | goto out_err; | |
323 | ||
324 | error = inet_csk_wait_for_connect(sk, timeo); | |
325 | if (error) | |
326 | goto out_err; | |
327 | } | |
fff1f300 | 328 | req = reqsk_queue_remove(queue, sk); |
8336886f JC |
329 | newsk = req->sk; |
330 | ||
e3d95ad7 | 331 | if (sk->sk_protocol == IPPROTO_TCP && |
0536fcc0 ED |
332 | tcp_rsk(req)->tfo_listener) { |
333 | spin_lock_bh(&queue->fastopenq.lock); | |
9439ce00 | 334 | if (tcp_rsk(req)->tfo_listener) { |
8336886f JC |
335 | /* We are still waiting for the final ACK from 3WHS |
336 | * so can't free req now. Instead, we set req->sk to | |
337 | * NULL to signify that the child socket is taken | |
338 | * so reqsk_fastopen_remove() will free the req | |
339 | * when 3WHS finishes (or is aborted). | |
340 | */ | |
341 | req->sk = NULL; | |
342 | req = NULL; | |
343 | } | |
0536fcc0 | 344 | spin_unlock_bh(&queue->fastopenq.lock); |
8336886f | 345 | } |
3f421baa ACM |
346 | out: |
347 | release_sock(sk); | |
8336886f | 348 | if (req) |
13854e5a | 349 | reqsk_put(req); |
3f421baa ACM |
350 | return newsk; |
351 | out_err: | |
352 | newsk = NULL; | |
8336886f | 353 | req = NULL; |
3f421baa ACM |
354 | *err = error; |
355 | goto out; | |
356 | } | |
3f421baa ACM |
357 | EXPORT_SYMBOL(inet_csk_accept); |
358 | ||
359 | /* | |
360 | * Using different timers for retransmit, delayed acks and probes | |
e905a9ed | 361 | * We may wish use just one timer maintaining a list of expire jiffies |
3f421baa ACM |
362 | * to optimize. |
363 | */ | |
364 | void inet_csk_init_xmit_timers(struct sock *sk, | |
365 | void (*retransmit_handler)(unsigned long), | |
366 | void (*delack_handler)(unsigned long), | |
367 | void (*keepalive_handler)(unsigned long)) | |
368 | { | |
369 | struct inet_connection_sock *icsk = inet_csk(sk); | |
370 | ||
b24b8a24 PE |
371 | setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, |
372 | (unsigned long)sk); | |
373 | setup_timer(&icsk->icsk_delack_timer, delack_handler, | |
374 | (unsigned long)sk); | |
375 | setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); | |
3f421baa ACM |
376 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
377 | } | |
3f421baa ACM |
378 | EXPORT_SYMBOL(inet_csk_init_xmit_timers); |
379 | ||
380 | void inet_csk_clear_xmit_timers(struct sock *sk) | |
381 | { | |
382 | struct inet_connection_sock *icsk = inet_csk(sk); | |
383 | ||
384 | icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; | |
385 | ||
386 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
387 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | |
388 | sk_stop_timer(sk, &sk->sk_timer); | |
389 | } | |
3f421baa ACM |
390 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); |
391 | ||
392 | void inet_csk_delete_keepalive_timer(struct sock *sk) | |
393 | { | |
394 | sk_stop_timer(sk, &sk->sk_timer); | |
395 | } | |
3f421baa ACM |
396 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); |
397 | ||
398 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) | |
399 | { | |
400 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | |
401 | } | |
3f421baa ACM |
402 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |
403 | ||
e5895bc6 | 404 | struct dst_entry *inet_csk_route_req(const struct sock *sk, |
6bd023f3 | 405 | struct flowi4 *fl4, |
ba3f7f04 | 406 | const struct request_sock *req) |
3f421baa | 407 | { |
3f421baa | 408 | const struct inet_request_sock *ireq = inet_rsk(req); |
8b929ab1 ED |
409 | struct net *net = read_pnet(&ireq->ireq_net); |
410 | struct ip_options_rcu *opt = ireq->opt; | |
411 | struct rtable *rt; | |
3f421baa | 412 | |
8b929ab1 | 413 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
e79d9bc7 | 414 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
8b929ab1 | 415 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
634fb979 | 416 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, |
8b929ab1 ED |
417 | ireq->ir_loc_addr, ireq->ir_rmt_port, |
418 | htons(ireq->ir_num)); | |
6bd023f3 DM |
419 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
420 | rt = ip_route_output_flow(net, fl4, sk); | |
b23dd4fe | 421 | if (IS_ERR(rt)) |
857a6e0a | 422 | goto no_route; |
155e8336 | 423 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
857a6e0a | 424 | goto route_err; |
d8d1f30b | 425 | return &rt->dst; |
857a6e0a IJ |
426 | |
427 | route_err: | |
428 | ip_rt_put(rt); | |
429 | no_route: | |
b45386ef | 430 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
857a6e0a | 431 | return NULL; |
3f421baa | 432 | } |
3f421baa ACM |
433 | EXPORT_SYMBOL_GPL(inet_csk_route_req); |
434 | ||
a2432c4f | 435 | struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, |
77357a95 DM |
436 | struct sock *newsk, |
437 | const struct request_sock *req) | |
438 | { | |
439 | const struct inet_request_sock *ireq = inet_rsk(req); | |
8b929ab1 | 440 | struct net *net = read_pnet(&ireq->ireq_net); |
77357a95 | 441 | struct inet_sock *newinet = inet_sk(newsk); |
1a7b27c9 | 442 | struct ip_options_rcu *opt; |
77357a95 DM |
443 | struct flowi4 *fl4; |
444 | struct rtable *rt; | |
445 | ||
446 | fl4 = &newinet->cork.fl.u.ip4; | |
1a7b27c9 CP |
447 | |
448 | rcu_read_lock(); | |
449 | opt = rcu_dereference(newinet->inet_opt); | |
8b929ab1 | 450 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
77357a95 DM |
451 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
452 | sk->sk_protocol, inet_sk_flowi_flags(sk), | |
634fb979 | 453 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, |
8b929ab1 ED |
454 | ireq->ir_loc_addr, ireq->ir_rmt_port, |
455 | htons(ireq->ir_num)); | |
77357a95 DM |
456 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
457 | rt = ip_route_output_flow(net, fl4, sk); | |
458 | if (IS_ERR(rt)) | |
459 | goto no_route; | |
155e8336 | 460 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
77357a95 | 461 | goto route_err; |
1a7b27c9 | 462 | rcu_read_unlock(); |
77357a95 DM |
463 | return &rt->dst; |
464 | ||
465 | route_err: | |
466 | ip_rt_put(rt); | |
467 | no_route: | |
1a7b27c9 | 468 | rcu_read_unlock(); |
b45386ef | 469 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
77357a95 DM |
470 | return NULL; |
471 | } | |
472 | EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); | |
473 | ||
dfd56b8b | 474 | #if IS_ENABLED(CONFIG_IPV6) |
3f421baa ACM |
475 | #define AF_INET_FAMILY(fam) ((fam) == AF_INET) |
476 | #else | |
fa76ce73 | 477 | #define AF_INET_FAMILY(fam) true |
3f421baa ACM |
478 | #endif |
479 | ||
0c3d79bc JA |
480 | /* Decide when to expire the request and when to resend SYN-ACK */ |
481 | static inline void syn_ack_recalc(struct request_sock *req, const int thresh, | |
482 | const int max_retries, | |
483 | const u8 rskq_defer_accept, | |
484 | int *expire, int *resend) | |
485 | { | |
486 | if (!rskq_defer_accept) { | |
e6c022a4 | 487 | *expire = req->num_timeout >= thresh; |
0c3d79bc JA |
488 | *resend = 1; |
489 | return; | |
490 | } | |
e6c022a4 ED |
491 | *expire = req->num_timeout >= thresh && |
492 | (!inet_rsk(req)->acked || req->num_timeout >= max_retries); | |
0c3d79bc JA |
493 | /* |
494 | * Do not resend while waiting for data after ACK, | |
495 | * start to resend on end of deferring period to give | |
496 | * last chance for data or ACK to create established socket. | |
497 | */ | |
498 | *resend = !inet_rsk(req)->acked || | |
e6c022a4 | 499 | req->num_timeout >= rskq_defer_accept - 1; |
0c3d79bc JA |
500 | } |
501 | ||
1b70e977 | 502 | int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) |
e6c022a4 | 503 | { |
1a2c6181 | 504 | int err = req->rsk_ops->rtx_syn_ack(parent, req); |
e6c022a4 ED |
505 | |
506 | if (!err) | |
507 | req->num_retrans++; | |
508 | return err; | |
509 | } | |
510 | EXPORT_SYMBOL(inet_rtx_syn_ack); | |
511 | ||
079096f1 | 512 | /* return true if req was found in the ehash table */ |
b357a364 ED |
513 | static bool reqsk_queue_unlink(struct request_sock_queue *queue, |
514 | struct request_sock *req) | |
515 | { | |
079096f1 | 516 | struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo; |
5e0724d0 | 517 | bool found = false; |
b357a364 | 518 | |
5e0724d0 ED |
519 | if (sk_hashed(req_to_sk(req))) { |
520 | spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash); | |
b357a364 | 521 | |
5e0724d0 ED |
522 | spin_lock(lock); |
523 | found = __sk_nulls_del_node_init_rcu(req_to_sk(req)); | |
524 | spin_unlock(lock); | |
525 | } | |
83fccfc3 | 526 | if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) |
b357a364 ED |
527 | reqsk_put(req); |
528 | return found; | |
529 | } | |
530 | ||
531 | void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) | |
532 | { | |
533 | if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { | |
534 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); | |
535 | reqsk_put(req); | |
536 | } | |
537 | } | |
538 | EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); | |
539 | ||
f03f2e15 ED |
540 | void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) |
541 | { | |
542 | inet_csk_reqsk_queue_drop(sk, req); | |
543 | reqsk_put(req); | |
544 | } | |
545 | EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put); | |
546 | ||
fa76ce73 | 547 | static void reqsk_timer_handler(unsigned long data) |
a019d6fe | 548 | { |
fa76ce73 ED |
549 | struct request_sock *req = (struct request_sock *)data; |
550 | struct sock *sk_listener = req->rsk_listener; | |
7c083ecb | 551 | struct net *net = sock_net(sk_listener); |
fa76ce73 | 552 | struct inet_connection_sock *icsk = inet_csk(sk_listener); |
a019d6fe | 553 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
2b41fab7 | 554 | int qlen, expire = 0, resend = 0; |
fa76ce73 | 555 | int max_retries, thresh; |
2b41fab7 | 556 | u8 defer_accept; |
a019d6fe | 557 | |
00fd38d9 | 558 | if (sk_state_load(sk_listener) != TCP_LISTEN) |
079096f1 | 559 | goto drop; |
a019d6fe | 560 | |
7c083ecb | 561 | max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; |
fa76ce73 | 562 | thresh = max_retries; |
a019d6fe ACM |
563 | /* Normally all the openreqs are young and become mature |
564 | * (i.e. converted to established socket) for first timeout. | |
fd4f2cea | 565 | * If synack was not acknowledged for 1 second, it means |
a019d6fe ACM |
566 | * one of the following things: synack was lost, ack was lost, |
567 | * rtt is high or nobody planned to ack (i.e. synflood). | |
568 | * When server is a bit loaded, queue is populated with old | |
569 | * open requests, reducing effective size of queue. | |
570 | * When server is well loaded, queue size reduces to zero | |
571 | * after several minutes of work. It is not synflood, | |
572 | * it is normal operation. The solution is pruning | |
573 | * too old entries overriding normal timeout, when | |
574 | * situation becomes dangerous. | |
575 | * | |
576 | * Essentially, we reserve half of room for young | |
577 | * embrions; and abort old ones without pity, if old | |
578 | * ones are about to clog our table. | |
579 | */ | |
aac065c5 | 580 | qlen = reqsk_queue_len(queue); |
acb4a6bf | 581 | if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) { |
aac065c5 | 582 | int young = reqsk_queue_len_young(queue) << 1; |
a019d6fe ACM |
583 | |
584 | while (thresh > 2) { | |
2b41fab7 | 585 | if (qlen < young) |
a019d6fe ACM |
586 | break; |
587 | thresh--; | |
588 | young <<= 1; | |
589 | } | |
590 | } | |
2b41fab7 ED |
591 | defer_accept = READ_ONCE(queue->rskq_defer_accept); |
592 | if (defer_accept) | |
593 | max_retries = defer_accept; | |
594 | syn_ack_recalc(req, thresh, max_retries, defer_accept, | |
fa76ce73 | 595 | &expire, &resend); |
42cb80a2 | 596 | req->rsk_ops->syn_ack_timeout(req); |
fa76ce73 ED |
597 | if (!expire && |
598 | (!resend || | |
599 | !inet_rtx_syn_ack(sk_listener, req) || | |
600 | inet_rsk(req)->acked)) { | |
601 | unsigned long timeo; | |
602 | ||
603 | if (req->num_timeout++ == 0) | |
aac065c5 | 604 | atomic_dec(&queue->young); |
fa76ce73 ED |
605 | timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); |
606 | mod_timer_pinned(&req->rsk_timer, jiffies + timeo); | |
607 | return; | |
608 | } | |
079096f1 | 609 | drop: |
f03f2e15 | 610 | inet_csk_reqsk_queue_drop_and_put(sk_listener, req); |
fa76ce73 | 611 | } |
ec0a1966 | 612 | |
079096f1 ED |
613 | static void reqsk_queue_hash_req(struct request_sock *req, |
614 | unsigned long timeout) | |
fa76ce73 | 615 | { |
fa76ce73 ED |
616 | req->num_retrans = 0; |
617 | req->num_timeout = 0; | |
618 | req->sk = NULL; | |
a019d6fe | 619 | |
29c68526 ED |
620 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); |
621 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); | |
29c68526 | 622 | |
079096f1 | 623 | inet_ehash_insert(req_to_sk(req), NULL); |
fa76ce73 ED |
624 | /* before letting lookups find us, make sure all req fields |
625 | * are committed to memory and refcnt initialized. | |
626 | */ | |
627 | smp_wmb(); | |
ca6fb065 | 628 | atomic_set(&req->rsk_refcnt, 2 + 1); |
079096f1 | 629 | } |
a019d6fe | 630 | |
079096f1 ED |
631 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, |
632 | unsigned long timeout) | |
633 | { | |
634 | reqsk_queue_hash_req(req, timeout); | |
635 | inet_csk_reqsk_queue_added(sk); | |
a019d6fe | 636 | } |
079096f1 | 637 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); |
a019d6fe | 638 | |
e56c57d0 ED |
639 | /** |
640 | * inet_csk_clone_lock - clone an inet socket, and lock its clone | |
641 | * @sk: the socket to clone | |
642 | * @req: request_sock | |
643 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) | |
644 | * | |
645 | * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) | |
646 | */ | |
647 | struct sock *inet_csk_clone_lock(const struct sock *sk, | |
648 | const struct request_sock *req, | |
649 | const gfp_t priority) | |
9f1d2604 | 650 | { |
e56c57d0 | 651 | struct sock *newsk = sk_clone_lock(sk, priority); |
9f1d2604 | 652 | |
00db4124 | 653 | if (newsk) { |
9f1d2604 ACM |
654 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
655 | ||
656 | newsk->sk_state = TCP_SYN_RECV; | |
657 | newicsk->icsk_bind_hash = NULL; | |
658 | ||
634fb979 | 659 | inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; |
b44084c2 ED |
660 | inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; |
661 | inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); | |
9f1d2604 ACM |
662 | newsk->sk_write_space = sk_stream_write_space; |
663 | ||
85017869 ED |
664 | /* listeners have SOCK_RCU_FREE, not the children */ |
665 | sock_reset_flag(newsk, SOCK_RCU_FREE); | |
666 | ||
84f39b08 | 667 | newsk->sk_mark = inet_rsk(req)->ir_mark; |
33cf7c90 ED |
668 | atomic64_set(&newsk->sk_cookie, |
669 | atomic64_read(&inet_rsk(req)->ir_cookie)); | |
84f39b08 | 670 | |
9f1d2604 | 671 | newicsk->icsk_retransmits = 0; |
6687e988 ACM |
672 | newicsk->icsk_backoff = 0; |
673 | newicsk->icsk_probes_out = 0; | |
9f1d2604 ACM |
674 | |
675 | /* Deinitialize accept_queue to trap illegal accesses. */ | |
676 | memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); | |
4237c75c VY |
677 | |
678 | security_inet_csk_clone(newsk, req); | |
9f1d2604 ACM |
679 | } |
680 | return newsk; | |
681 | } | |
e56c57d0 | 682 | EXPORT_SYMBOL_GPL(inet_csk_clone_lock); |
a019d6fe ACM |
683 | |
684 | /* | |
685 | * At this point, there should be no process reference to this | |
686 | * socket, and thus no user references at all. Therefore we | |
687 | * can assume the socket waitqueue is inactive and nobody will | |
688 | * try to jump onto it. | |
689 | */ | |
690 | void inet_csk_destroy_sock(struct sock *sk) | |
691 | { | |
547b792c IJ |
692 | WARN_ON(sk->sk_state != TCP_CLOSE); |
693 | WARN_ON(!sock_flag(sk, SOCK_DEAD)); | |
a019d6fe ACM |
694 | |
695 | /* It cannot be in hash table! */ | |
547b792c | 696 | WARN_ON(!sk_unhashed(sk)); |
a019d6fe | 697 | |
c720c7e8 ED |
698 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
699 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); | |
a019d6fe ACM |
700 | |
701 | sk->sk_prot->destroy(sk); | |
702 | ||
703 | sk_stream_kill_queues(sk); | |
704 | ||
705 | xfrm_sk_free_policy(sk); | |
706 | ||
707 | sk_refcnt_debug_release(sk); | |
708 | ||
777c6ae5 | 709 | local_bh_disable(); |
dd24c001 | 710 | percpu_counter_dec(sk->sk_prot->orphan_count); |
777c6ae5 | 711 | local_bh_enable(); |
a019d6fe ACM |
712 | sock_put(sk); |
713 | } | |
a019d6fe ACM |
714 | EXPORT_SYMBOL(inet_csk_destroy_sock); |
715 | ||
e337e24d CP |
716 | /* This function allows to force a closure of a socket after the call to |
717 | * tcp/dccp_create_openreq_child(). | |
718 | */ | |
719 | void inet_csk_prepare_forced_close(struct sock *sk) | |
c10cb5fc | 720 | __releases(&sk->sk_lock.slock) |
e337e24d CP |
721 | { |
722 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | |
723 | bh_unlock_sock(sk); | |
724 | sock_put(sk); | |
725 | ||
726 | /* The below has to be done to allow calling inet_csk_destroy_sock */ | |
727 | sock_set_flag(sk, SOCK_DEAD); | |
728 | percpu_counter_inc(sk->sk_prot->orphan_count); | |
729 | inet_sk(sk)->inet_num = 0; | |
730 | } | |
731 | EXPORT_SYMBOL(inet_csk_prepare_forced_close); | |
732 | ||
f985c65c | 733 | int inet_csk_listen_start(struct sock *sk, int backlog) |
a019d6fe | 734 | { |
a019d6fe | 735 | struct inet_connection_sock *icsk = inet_csk(sk); |
10cbc8f1 | 736 | struct inet_sock *inet = inet_sk(sk); |
086c653f | 737 | int err = -EADDRINUSE; |
a019d6fe | 738 | |
ef547f2a | 739 | reqsk_queue_alloc(&icsk->icsk_accept_queue); |
a019d6fe | 740 | |
f985c65c | 741 | sk->sk_max_ack_backlog = backlog; |
a019d6fe ACM |
742 | sk->sk_ack_backlog = 0; |
743 | inet_csk_delack_init(sk); | |
744 | ||
745 | /* There is race window here: we announce ourselves listening, | |
746 | * but this transition is still not validated by get_port(). | |
747 | * It is OK, because this socket enters to hash table only | |
748 | * after validation is complete. | |
749 | */ | |
00fd38d9 | 750 | sk_state_store(sk, TCP_LISTEN); |
c720c7e8 ED |
751 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
752 | inet->inet_sport = htons(inet->inet_num); | |
a019d6fe ACM |
753 | |
754 | sk_dst_reset(sk); | |
086c653f | 755 | err = sk->sk_prot->hash(sk); |
a019d6fe | 756 | |
086c653f CG |
757 | if (likely(!err)) |
758 | return 0; | |
a019d6fe ACM |
759 | } |
760 | ||
761 | sk->sk_state = TCP_CLOSE; | |
086c653f | 762 | return err; |
a019d6fe | 763 | } |
a019d6fe ACM |
764 | EXPORT_SYMBOL_GPL(inet_csk_listen_start); |
765 | ||
ebb516af ED |
766 | static void inet_child_forget(struct sock *sk, struct request_sock *req, |
767 | struct sock *child) | |
768 | { | |
769 | sk->sk_prot->disconnect(child, O_NONBLOCK); | |
770 | ||
771 | sock_orphan(child); | |
772 | ||
773 | percpu_counter_inc(sk->sk_prot->orphan_count); | |
774 | ||
775 | if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { | |
776 | BUG_ON(tcp_sk(child)->fastopen_rsk != req); | |
777 | BUG_ON(sk != req->rsk_listener); | |
778 | ||
779 | /* Paranoid, to prevent race condition if | |
780 | * an inbound pkt destined for child is | |
781 | * blocked by sock lock in tcp_v4_rcv(). | |
782 | * Also to satisfy an assertion in | |
783 | * tcp_v4_destroy_sock(). | |
784 | */ | |
785 | tcp_sk(child)->fastopen_rsk = NULL; | |
786 | } | |
787 | inet_csk_destroy_sock(child); | |
788 | reqsk_put(req); | |
789 | } | |
790 | ||
7716682c ED |
791 | struct sock *inet_csk_reqsk_queue_add(struct sock *sk, |
792 | struct request_sock *req, | |
793 | struct sock *child) | |
ebb516af ED |
794 | { |
795 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | |
796 | ||
797 | spin_lock(&queue->rskq_lock); | |
798 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | |
799 | inet_child_forget(sk, req, child); | |
7716682c | 800 | child = NULL; |
ebb516af ED |
801 | } else { |
802 | req->sk = child; | |
803 | req->dl_next = NULL; | |
804 | if (queue->rskq_accept_head == NULL) | |
805 | queue->rskq_accept_head = req; | |
806 | else | |
807 | queue->rskq_accept_tail->dl_next = req; | |
808 | queue->rskq_accept_tail = req; | |
809 | sk_acceptq_added(sk); | |
810 | } | |
811 | spin_unlock(&queue->rskq_lock); | |
7716682c | 812 | return child; |
ebb516af ED |
813 | } |
814 | EXPORT_SYMBOL(inet_csk_reqsk_queue_add); | |
815 | ||
5e0724d0 ED |
816 | struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, |
817 | struct request_sock *req, bool own_req) | |
818 | { | |
819 | if (own_req) { | |
820 | inet_csk_reqsk_queue_drop(sk, req); | |
821 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); | |
7716682c ED |
822 | if (inet_csk_reqsk_queue_add(sk, req, child)) |
823 | return child; | |
5e0724d0 ED |
824 | } |
825 | /* Too bad, another child took ownership of the request, undo. */ | |
826 | bh_unlock_sock(child); | |
827 | sock_put(child); | |
828 | return NULL; | |
829 | } | |
830 | EXPORT_SYMBOL(inet_csk_complete_hashdance); | |
831 | ||
a019d6fe ACM |
832 | /* |
833 | * This routine closes sockets which have been at least partially | |
834 | * opened, but not yet accepted. | |
835 | */ | |
836 | void inet_csk_listen_stop(struct sock *sk) | |
837 | { | |
838 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 839 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
fff1f300 | 840 | struct request_sock *next, *req; |
a019d6fe ACM |
841 | |
842 | /* Following specs, it would be better either to send FIN | |
843 | * (and enter FIN-WAIT-1, it is normal close) | |
844 | * or to send active reset (abort). | |
845 | * Certainly, it is pretty dangerous while synflood, but it is | |
846 | * bad justification for our negligence 8) | |
847 | * To be honest, we are not able to make either | |
848 | * of the variants now. --ANK | |
849 | */ | |
fff1f300 | 850 | while ((req = reqsk_queue_remove(queue, sk)) != NULL) { |
a019d6fe ACM |
851 | struct sock *child = req->sk; |
852 | ||
a019d6fe ACM |
853 | local_bh_disable(); |
854 | bh_lock_sock(child); | |
547b792c | 855 | WARN_ON(sock_owned_by_user(child)); |
a019d6fe ACM |
856 | sock_hold(child); |
857 | ||
ebb516af | 858 | inet_child_forget(sk, req, child); |
a019d6fe ACM |
859 | bh_unlock_sock(child); |
860 | local_bh_enable(); | |
861 | sock_put(child); | |
862 | ||
92d6f176 | 863 | cond_resched(); |
a019d6fe | 864 | } |
0536fcc0 | 865 | if (queue->fastopenq.rskq_rst_head) { |
8336886f | 866 | /* Free all the reqs queued in rskq_rst_head. */ |
0536fcc0 | 867 | spin_lock_bh(&queue->fastopenq.lock); |
fff1f300 | 868 | req = queue->fastopenq.rskq_rst_head; |
0536fcc0 ED |
869 | queue->fastopenq.rskq_rst_head = NULL; |
870 | spin_unlock_bh(&queue->fastopenq.lock); | |
fff1f300 ED |
871 | while (req != NULL) { |
872 | next = req->dl_next; | |
13854e5a | 873 | reqsk_put(req); |
fff1f300 | 874 | req = next; |
8336886f JC |
875 | } |
876 | } | |
ebb516af | 877 | WARN_ON_ONCE(sk->sk_ack_backlog); |
a019d6fe | 878 | } |
a019d6fe | 879 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); |
af05dc93 ACM |
880 | |
881 | void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |
882 | { | |
883 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | |
884 | const struct inet_sock *inet = inet_sk(sk); | |
885 | ||
886 | sin->sin_family = AF_INET; | |
c720c7e8 ED |
887 | sin->sin_addr.s_addr = inet->inet_daddr; |
888 | sin->sin_port = inet->inet_dport; | |
af05dc93 | 889 | } |
af05dc93 | 890 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); |
c4d93909 | 891 | |
dec73ff0 ACM |
892 | #ifdef CONFIG_COMPAT |
893 | int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, | |
894 | char __user *optval, int __user *optlen) | |
895 | { | |
dbeff12b | 896 | const struct inet_connection_sock *icsk = inet_csk(sk); |
dec73ff0 | 897 | |
00db4124 | 898 | if (icsk->icsk_af_ops->compat_getsockopt) |
dec73ff0 ACM |
899 | return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, |
900 | optval, optlen); | |
901 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
902 | optval, optlen); | |
903 | } | |
dec73ff0 ACM |
904 | EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); |
905 | ||
906 | int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, | |
b7058842 | 907 | char __user *optval, unsigned int optlen) |
dec73ff0 | 908 | { |
dbeff12b | 909 | const struct inet_connection_sock *icsk = inet_csk(sk); |
dec73ff0 | 910 | |
00db4124 | 911 | if (icsk->icsk_af_ops->compat_setsockopt) |
dec73ff0 ACM |
912 | return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, |
913 | optval, optlen); | |
914 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
915 | optval, optlen); | |
916 | } | |
dec73ff0 ACM |
917 | EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); |
918 | #endif | |
80d0a69f DM |
919 | |
920 | static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) | |
921 | { | |
5abf7f7e ED |
922 | const struct inet_sock *inet = inet_sk(sk); |
923 | const struct ip_options_rcu *inet_opt; | |
80d0a69f DM |
924 | __be32 daddr = inet->inet_daddr; |
925 | struct flowi4 *fl4; | |
926 | struct rtable *rt; | |
927 | ||
928 | rcu_read_lock(); | |
929 | inet_opt = rcu_dereference(inet->inet_opt); | |
930 | if (inet_opt && inet_opt->opt.srr) | |
931 | daddr = inet_opt->opt.faddr; | |
932 | fl4 = &fl->u.ip4; | |
933 | rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, | |
934 | inet->inet_saddr, inet->inet_dport, | |
935 | inet->inet_sport, sk->sk_protocol, | |
936 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); | |
937 | if (IS_ERR(rt)) | |
938 | rt = NULL; | |
939 | if (rt) | |
940 | sk_setup_caps(sk, &rt->dst); | |
941 | rcu_read_unlock(); | |
942 | ||
943 | return &rt->dst; | |
944 | } | |
945 | ||
946 | struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) | |
947 | { | |
948 | struct dst_entry *dst = __sk_dst_check(sk, 0); | |
949 | struct inet_sock *inet = inet_sk(sk); | |
950 | ||
951 | if (!dst) { | |
952 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
953 | if (!dst) | |
954 | goto out; | |
955 | } | |
6700c270 | 956 | dst->ops->update_pmtu(dst, sk, NULL, mtu); |
80d0a69f DM |
957 | |
958 | dst = __sk_dst_check(sk, 0); | |
959 | if (!dst) | |
960 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
961 | out: | |
962 | return dst; | |
963 | } | |
964 | EXPORT_SYMBOL_GPL(inet_csk_update_pmtu); |