net: add real socket cookies
[deliverable/linux.git] / net / ipv4 / inet_timewait_sock.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic TIME_WAIT sockets functions
7 *
8 * From code orinally in TCP
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <net/inet_hashtables.h>
16 #include <net/inet_timewait_sock.h>
17 #include <net/ip.h>
18
19
20 /**
21 * inet_twsk_unhash - unhash a timewait socket from established hash
22 * @tw: timewait socket
23 *
24 * unhash a timewait socket from established hash, if hashed.
25 * ehash lock must be held by caller.
26 * Returns 1 if caller should call inet_twsk_put() after lock release.
27 */
28 int inet_twsk_unhash(struct inet_timewait_sock *tw)
29 {
30 if (hlist_nulls_unhashed(&tw->tw_node))
31 return 0;
32
33 hlist_nulls_del_rcu(&tw->tw_node);
34 sk_nulls_node_init(&tw->tw_node);
35 /*
36 * We cannot call inet_twsk_put() ourself under lock,
37 * caller must call it for us.
38 */
39 return 1;
40 }
41
42 /**
43 * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
44 * @tw: timewait socket
45 * @hashinfo: hashinfo pointer
46 *
47 * unhash a timewait socket from bind hash, if hashed.
48 * bind hash lock must be held by caller.
49 * Returns 1 if caller should call inet_twsk_put() after lock release.
50 */
51 int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
52 struct inet_hashinfo *hashinfo)
53 {
54 struct inet_bind_bucket *tb = tw->tw_tb;
55
56 if (!tb)
57 return 0;
58
59 __hlist_del(&tw->tw_bind_node);
60 tw->tw_tb = NULL;
61 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
62 /*
63 * We cannot call inet_twsk_put() ourself under lock,
64 * caller must call it for us.
65 */
66 return 1;
67 }
68
69 /* Must be called with locally disabled BHs. */
70 static void __inet_twsk_kill(struct inet_timewait_sock *tw,
71 struct inet_hashinfo *hashinfo)
72 {
73 struct inet_bind_hashbucket *bhead;
74 int refcnt;
75 /* Unlink from established hashes. */
76 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
77
78 spin_lock(lock);
79 refcnt = inet_twsk_unhash(tw);
80 spin_unlock(lock);
81
82 /* Disassociate with bind bucket. */
83 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
84 hashinfo->bhash_size)];
85
86 spin_lock(&bhead->lock);
87 refcnt += inet_twsk_bind_unhash(tw, hashinfo);
88 spin_unlock(&bhead->lock);
89
90 BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
91 atomic_sub(refcnt, &tw->tw_refcnt);
92 }
93
94 void inet_twsk_free(struct inet_timewait_sock *tw)
95 {
96 struct module *owner = tw->tw_prot->owner;
97 twsk_destructor((struct sock *)tw);
98 #ifdef SOCK_REFCNT_DEBUG
99 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
100 #endif
101 release_net(twsk_net(tw));
102 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
103 module_put(owner);
104 }
105
106 void inet_twsk_put(struct inet_timewait_sock *tw)
107 {
108 if (atomic_dec_and_test(&tw->tw_refcnt))
109 inet_twsk_free(tw);
110 }
111 EXPORT_SYMBOL_GPL(inet_twsk_put);
112
113 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
114 struct hlist_nulls_head *list)
115 {
116 hlist_nulls_add_head_rcu(&tw->tw_node, list);
117 }
118
119 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
120 struct hlist_head *list)
121 {
122 hlist_add_head(&tw->tw_bind_node, list);
123 }
124
125 /*
126 * Enter the time wait state. This is called with locally disabled BH.
127 * Essentially we whip up a timewait bucket, copy the relevant info into it
128 * from the SK, and mess with hash chains and list linkage.
129 */
130 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
131 struct inet_hashinfo *hashinfo)
132 {
133 const struct inet_sock *inet = inet_sk(sk);
134 const struct inet_connection_sock *icsk = inet_csk(sk);
135 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
136 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
137 struct inet_bind_hashbucket *bhead;
138 /* Step 1: Put TW into bind hash. Original socket stays there too.
139 Note, that any socket with inet->num != 0 MUST be bound in
140 binding cache, even if it is closed.
141 */
142 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
143 hashinfo->bhash_size)];
144 spin_lock(&bhead->lock);
145 tw->tw_tb = icsk->icsk_bind_hash;
146 WARN_ON(!icsk->icsk_bind_hash);
147 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
148 spin_unlock(&bhead->lock);
149
150 spin_lock(lock);
151
152 /*
153 * Step 2: Hash TW into tcp ehash chain.
154 * Notes :
155 * - tw_refcnt is set to 3 because :
156 * - We have one reference from bhash chain.
157 * - We have one reference from ehash chain.
158 * We can use atomic_set() because prior spin_lock()/spin_unlock()
159 * committed into memory all tw fields.
160 */
161 atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
162 inet_twsk_add_node_rcu(tw, &ehead->chain);
163
164 /* Step 3: Remove SK from hash chain */
165 if (__sk_nulls_del_node_init_rcu(sk))
166 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
167
168 spin_unlock(lock);
169 }
170 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
171
172 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
173 {
174 struct inet_timewait_sock *tw =
175 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
176 GFP_ATOMIC);
177 if (tw != NULL) {
178 const struct inet_sock *inet = inet_sk(sk);
179
180 kmemcheck_annotate_bitfield(tw, flags);
181
182 /* Give us an identity. */
183 tw->tw_daddr = inet->inet_daddr;
184 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
185 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
186 tw->tw_tos = inet->tos;
187 tw->tw_num = inet->inet_num;
188 tw->tw_state = TCP_TIME_WAIT;
189 tw->tw_substate = state;
190 tw->tw_sport = inet->inet_sport;
191 tw->tw_dport = inet->inet_dport;
192 tw->tw_family = sk->sk_family;
193 tw->tw_reuse = sk->sk_reuse;
194 tw->tw_hash = sk->sk_hash;
195 tw->tw_ipv6only = 0;
196 tw->tw_transparent = inet->transparent;
197 tw->tw_prot = sk->sk_prot_creator;
198 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
199 twsk_net_set(tw, hold_net(sock_net(sk)));
200 /*
201 * Because we use RCU lookups, we should not set tw_refcnt
202 * to a non null value before everything is setup for this
203 * timewait socket.
204 */
205 atomic_set(&tw->tw_refcnt, 0);
206 inet_twsk_dead_node_init(tw);
207 __module_get(tw->tw_prot->owner);
208 }
209
210 return tw;
211 }
212 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
213
214 /* Returns non-zero if quota exceeded. */
215 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
216 const int slot)
217 {
218 struct inet_timewait_sock *tw;
219 unsigned int killed;
220 int ret;
221
222 /* NOTE: compare this to previous version where lock
223 * was released after detaching chain. It was racy,
224 * because tw buckets are scheduled in not serialized context
225 * in 2.3 (with netfilter), and with softnet it is common, because
226 * soft irqs are not sequenced.
227 */
228 killed = 0;
229 ret = 0;
230 rescan:
231 inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
232 __inet_twsk_del_dead_node(tw);
233 spin_unlock(&twdr->death_lock);
234 __inet_twsk_kill(tw, twdr->hashinfo);
235 #ifdef CONFIG_NET_NS
236 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
237 #endif
238 inet_twsk_put(tw);
239 killed++;
240 spin_lock(&twdr->death_lock);
241 if (killed > INET_TWDR_TWKILL_QUOTA) {
242 ret = 1;
243 break;
244 }
245
246 /* While we dropped twdr->death_lock, another cpu may have
247 * killed off the next TW bucket in the list, therefore
248 * do a fresh re-read of the hlist head node with the
249 * lock reacquired. We still use the hlist traversal
250 * macro in order to get the prefetches.
251 */
252 goto rescan;
253 }
254
255 twdr->tw_count -= killed;
256 #ifndef CONFIG_NET_NS
257 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
258 #endif
259 return ret;
260 }
261
262 void inet_twdr_hangman(unsigned long data)
263 {
264 struct inet_timewait_death_row *twdr;
265 unsigned int need_timer;
266
267 twdr = (struct inet_timewait_death_row *)data;
268 spin_lock(&twdr->death_lock);
269
270 if (twdr->tw_count == 0)
271 goto out;
272
273 need_timer = 0;
274 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
275 twdr->thread_slots |= (1 << twdr->slot);
276 schedule_work(&twdr->twkill_work);
277 need_timer = 1;
278 } else {
279 /* We purged the entire slot, anything left? */
280 if (twdr->tw_count)
281 need_timer = 1;
282 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
283 }
284 if (need_timer)
285 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
286 out:
287 spin_unlock(&twdr->death_lock);
288 }
289 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
290
291 void inet_twdr_twkill_work(struct work_struct *work)
292 {
293 struct inet_timewait_death_row *twdr =
294 container_of(work, struct inet_timewait_death_row, twkill_work);
295 int i;
296
297 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
298 (sizeof(twdr->thread_slots) * 8));
299
300 while (twdr->thread_slots) {
301 spin_lock_bh(&twdr->death_lock);
302 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
303 if (!(twdr->thread_slots & (1 << i)))
304 continue;
305
306 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
307 if (need_resched()) {
308 spin_unlock_bh(&twdr->death_lock);
309 schedule();
310 spin_lock_bh(&twdr->death_lock);
311 }
312 }
313
314 twdr->thread_slots &= ~(1 << i);
315 }
316 spin_unlock_bh(&twdr->death_lock);
317 }
318 }
319 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
320
321 /* These are always called from BH context. See callers in
322 * tcp_input.c to verify this.
323 */
324
325 /* This is for handling early-kills of TIME_WAIT sockets. */
326 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
327 struct inet_timewait_death_row *twdr)
328 {
329 spin_lock(&twdr->death_lock);
330 if (inet_twsk_del_dead_node(tw)) {
331 inet_twsk_put(tw);
332 if (--twdr->tw_count == 0)
333 del_timer(&twdr->tw_timer);
334 }
335 spin_unlock(&twdr->death_lock);
336 __inet_twsk_kill(tw, twdr->hashinfo);
337 }
338 EXPORT_SYMBOL(inet_twsk_deschedule);
339
340 void inet_twsk_schedule(struct inet_timewait_sock *tw,
341 struct inet_timewait_death_row *twdr,
342 const int timeo, const int timewait_len)
343 {
344 struct hlist_head *list;
345 int slot;
346
347 /* timeout := RTO * 3.5
348 *
349 * 3.5 = 1+2+0.5 to wait for two retransmits.
350 *
351 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
352 * our ACK acking that FIN can be lost. If N subsequent retransmitted
353 * FINs (or previous seqments) are lost (probability of such event
354 * is p^(N+1), where p is probability to lose single packet and
355 * time to detect the loss is about RTO*(2^N - 1) with exponential
356 * backoff). Normal timewait length is calculated so, that we
357 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
358 * [ BTW Linux. following BSD, violates this requirement waiting
359 * only for 60sec, we should wait at least for 240 secs.
360 * Well, 240 consumes too much of resources 8)
361 * ]
362 * This interval is not reduced to catch old duplicate and
363 * responces to our wandering segments living for two MSLs.
364 * However, if we use PAWS to detect
365 * old duplicates, we can reduce the interval to bounds required
366 * by RTO, rather than MSL. So, if peer understands PAWS, we
367 * kill tw bucket after 3.5*RTO (it is important that this number
368 * is greater than TS tick!) and detect old duplicates with help
369 * of PAWS.
370 */
371 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
372
373 spin_lock(&twdr->death_lock);
374
375 /* Unlink it, if it was scheduled */
376 if (inet_twsk_del_dead_node(tw))
377 twdr->tw_count--;
378 else
379 atomic_inc(&tw->tw_refcnt);
380
381 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
382 /* Schedule to slow timer */
383 if (timeo >= timewait_len) {
384 slot = INET_TWDR_TWKILL_SLOTS - 1;
385 } else {
386 slot = DIV_ROUND_UP(timeo, twdr->period);
387 if (slot >= INET_TWDR_TWKILL_SLOTS)
388 slot = INET_TWDR_TWKILL_SLOTS - 1;
389 }
390 tw->tw_ttd = inet_tw_time_stamp() + timeo;
391 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
392 list = &twdr->cells[slot];
393 } else {
394 tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
395
396 if (twdr->twcal_hand < 0) {
397 twdr->twcal_hand = 0;
398 twdr->twcal_jiffie = jiffies;
399 twdr->twcal_timer.expires = twdr->twcal_jiffie +
400 (slot << INET_TWDR_RECYCLE_TICK);
401 add_timer(&twdr->twcal_timer);
402 } else {
403 if (time_after(twdr->twcal_timer.expires,
404 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
405 mod_timer(&twdr->twcal_timer,
406 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
407 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
408 }
409 list = &twdr->twcal_row[slot];
410 }
411
412 hlist_add_head(&tw->tw_death_node, list);
413
414 if (twdr->tw_count++ == 0)
415 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
416 spin_unlock(&twdr->death_lock);
417 }
418 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
419
420 void inet_twdr_twcal_tick(unsigned long data)
421 {
422 struct inet_timewait_death_row *twdr;
423 int n, slot;
424 unsigned long j;
425 unsigned long now = jiffies;
426 int killed = 0;
427 int adv = 0;
428
429 twdr = (struct inet_timewait_death_row *)data;
430
431 spin_lock(&twdr->death_lock);
432 if (twdr->twcal_hand < 0)
433 goto out;
434
435 slot = twdr->twcal_hand;
436 j = twdr->twcal_jiffie;
437
438 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
439 if (time_before_eq(j, now)) {
440 struct hlist_node *safe;
441 struct inet_timewait_sock *tw;
442
443 inet_twsk_for_each_inmate_safe(tw, safe,
444 &twdr->twcal_row[slot]) {
445 __inet_twsk_del_dead_node(tw);
446 __inet_twsk_kill(tw, twdr->hashinfo);
447 #ifdef CONFIG_NET_NS
448 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
449 #endif
450 inet_twsk_put(tw);
451 killed++;
452 }
453 } else {
454 if (!adv) {
455 adv = 1;
456 twdr->twcal_jiffie = j;
457 twdr->twcal_hand = slot;
458 }
459
460 if (!hlist_empty(&twdr->twcal_row[slot])) {
461 mod_timer(&twdr->twcal_timer, j);
462 goto out;
463 }
464 }
465 j += 1 << INET_TWDR_RECYCLE_TICK;
466 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
467 }
468 twdr->twcal_hand = -1;
469
470 out:
471 if ((twdr->tw_count -= killed) == 0)
472 del_timer(&twdr->tw_timer);
473 #ifndef CONFIG_NET_NS
474 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
475 #endif
476 spin_unlock(&twdr->death_lock);
477 }
478 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
479
480 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
481 struct inet_timewait_death_row *twdr, int family)
482 {
483 struct inet_timewait_sock *tw;
484 struct sock *sk;
485 struct hlist_nulls_node *node;
486 unsigned int slot;
487
488 for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
489 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
490 restart_rcu:
491 rcu_read_lock();
492 restart:
493 sk_nulls_for_each_rcu(sk, node, &head->chain) {
494 if (sk->sk_state != TCP_TIME_WAIT)
495 continue;
496 tw = inet_twsk(sk);
497 if ((tw->tw_family != family) ||
498 atomic_read(&twsk_net(tw)->count))
499 continue;
500
501 if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
502 continue;
503
504 if (unlikely((tw->tw_family != family) ||
505 atomic_read(&twsk_net(tw)->count))) {
506 inet_twsk_put(tw);
507 goto restart;
508 }
509
510 rcu_read_unlock();
511 local_bh_disable();
512 inet_twsk_deschedule(tw, twdr);
513 local_bh_enable();
514 inet_twsk_put(tw);
515 goto restart_rcu;
516 }
517 /* If the nulls value we got at the end of this lookup is
518 * not the expected one, we must restart lookup.
519 * We probably met an item that was moved to another chain.
520 */
521 if (get_nulls_value(node) != slot)
522 goto restart;
523 rcu_read_unlock();
524 }
525 }
526 EXPORT_SYMBOL_GPL(inet_twsk_purge);
This page took 0.060581 seconds and 5 git commands to generate.