| 1 | /* |
| 2 | * net/core/dst.c Protocol independent destination cache. |
| 3 | * |
| 4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 5 | * |
| 6 | */ |
| 7 | |
| 8 | #include <linux/bitops.h> |
| 9 | #include <linux/errno.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/netdevice.h> |
| 17 | #include <linux/skbuff.h> |
| 18 | #include <linux/string.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <net/net_namespace.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/prefetch.h> |
| 23 | #include <net/lwtunnel.h> |
| 24 | |
| 25 | #include <net/dst.h> |
| 26 | #include <net/dst_metadata.h> |
| 27 | |
| 28 | /* |
| 29 | * Theory of operations: |
| 30 | * 1) We use a list, protected by a spinlock, to add |
| 31 | * new entries from both BH and non-BH context. |
| 32 | * 2) In order to keep spinlock held for a small delay, |
| 33 | * we use a second list where are stored long lived |
| 34 | * entries, that are handled by the garbage collect thread |
| 35 | * fired by a workqueue. |
| 36 | * 3) This list is guarded by a mutex, |
| 37 | * so that the gc_task and dst_dev_event() can be synchronized. |
| 38 | */ |
| 39 | |
| 40 | /* |
| 41 | * We want to keep lock & list close together |
| 42 | * to dirty as few cache lines as possible in __dst_free(). |
| 43 | * As this is not a very strong hint, we dont force an alignment on SMP. |
| 44 | */ |
| 45 | static struct { |
| 46 | spinlock_t lock; |
| 47 | struct dst_entry *list; |
| 48 | unsigned long timer_inc; |
| 49 | unsigned long timer_expires; |
| 50 | } dst_garbage = { |
| 51 | .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), |
| 52 | .timer_inc = DST_GC_MAX, |
| 53 | }; |
| 54 | static void dst_gc_task(struct work_struct *work); |
| 55 | static void ___dst_free(struct dst_entry *dst); |
| 56 | |
| 57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
| 58 | |
| 59 | static DEFINE_MUTEX(dst_gc_mutex); |
| 60 | /* |
| 61 | * long lived entries are maintained in this list, guarded by dst_gc_mutex |
| 62 | */ |
| 63 | static struct dst_entry *dst_busy_list; |
| 64 | |
| 65 | static void dst_gc_task(struct work_struct *work) |
| 66 | { |
| 67 | int delayed = 0; |
| 68 | int work_performed = 0; |
| 69 | unsigned long expires = ~0L; |
| 70 | struct dst_entry *dst, *next, head; |
| 71 | struct dst_entry *last = &head; |
| 72 | |
| 73 | mutex_lock(&dst_gc_mutex); |
| 74 | next = dst_busy_list; |
| 75 | |
| 76 | loop: |
| 77 | while ((dst = next) != NULL) { |
| 78 | next = dst->next; |
| 79 | prefetch(&next->next); |
| 80 | cond_resched(); |
| 81 | if (likely(atomic_read(&dst->__refcnt))) { |
| 82 | last->next = dst; |
| 83 | last = dst; |
| 84 | delayed++; |
| 85 | continue; |
| 86 | } |
| 87 | work_performed++; |
| 88 | |
| 89 | dst = dst_destroy(dst); |
| 90 | if (dst) { |
| 91 | /* NOHASH and still referenced. Unless it is already |
| 92 | * on gc list, invalidate it and add to gc list. |
| 93 | * |
| 94 | * Note: this is temporary. Actually, NOHASH dst's |
| 95 | * must be obsoleted when parent is obsoleted. |
| 96 | * But we do not have state "obsoleted, but |
| 97 | * referenced by parent", so it is right. |
| 98 | */ |
| 99 | if (dst->obsolete > 0) |
| 100 | continue; |
| 101 | |
| 102 | ___dst_free(dst); |
| 103 | dst->next = next; |
| 104 | next = dst; |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | spin_lock_bh(&dst_garbage.lock); |
| 109 | next = dst_garbage.list; |
| 110 | if (next) { |
| 111 | dst_garbage.list = NULL; |
| 112 | spin_unlock_bh(&dst_garbage.lock); |
| 113 | goto loop; |
| 114 | } |
| 115 | last->next = NULL; |
| 116 | dst_busy_list = head.next; |
| 117 | if (!dst_busy_list) |
| 118 | dst_garbage.timer_inc = DST_GC_MAX; |
| 119 | else { |
| 120 | /* |
| 121 | * if we freed less than 1/10 of delayed entries, |
| 122 | * we can sleep longer. |
| 123 | */ |
| 124 | if (work_performed <= delayed/10) { |
| 125 | dst_garbage.timer_expires += dst_garbage.timer_inc; |
| 126 | if (dst_garbage.timer_expires > DST_GC_MAX) |
| 127 | dst_garbage.timer_expires = DST_GC_MAX; |
| 128 | dst_garbage.timer_inc += DST_GC_INC; |
| 129 | } else { |
| 130 | dst_garbage.timer_inc = DST_GC_INC; |
| 131 | dst_garbage.timer_expires = DST_GC_MIN; |
| 132 | } |
| 133 | expires = dst_garbage.timer_expires; |
| 134 | /* |
| 135 | * if the next desired timer is more than 4 seconds in the |
| 136 | * future then round the timer to whole seconds |
| 137 | */ |
| 138 | if (expires > 4*HZ) |
| 139 | expires = round_jiffies_relative(expires); |
| 140 | schedule_delayed_work(&dst_gc_work, expires); |
| 141 | } |
| 142 | |
| 143 | spin_unlock_bh(&dst_garbage.lock); |
| 144 | mutex_unlock(&dst_gc_mutex); |
| 145 | } |
| 146 | |
| 147 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 148 | { |
| 149 | kfree_skb(skb); |
| 150 | return 0; |
| 151 | } |
| 152 | EXPORT_SYMBOL(dst_discard_out); |
| 153 | |
| 154 | const u32 dst_default_metrics[RTAX_MAX + 1] = { |
| 155 | /* This initializer is needed to force linker to place this variable |
| 156 | * into const section. Otherwise it might end into bss section. |
| 157 | * We really want to avoid false sharing on this variable, and catch |
| 158 | * any writes on it. |
| 159 | */ |
| 160 | [RTAX_MAX] = 0xdeadbeef, |
| 161 | }; |
| 162 | |
| 163 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
| 164 | struct net_device *dev, int initial_ref, int initial_obsolete, |
| 165 | unsigned short flags) |
| 166 | { |
| 167 | dst->child = NULL; |
| 168 | dst->dev = dev; |
| 169 | if (dev) |
| 170 | dev_hold(dev); |
| 171 | dst->ops = ops; |
| 172 | dst_init_metrics(dst, dst_default_metrics, true); |
| 173 | dst->expires = 0UL; |
| 174 | dst->path = dst; |
| 175 | dst->from = NULL; |
| 176 | #ifdef CONFIG_XFRM |
| 177 | dst->xfrm = NULL; |
| 178 | #endif |
| 179 | dst->input = dst_discard; |
| 180 | dst->output = dst_discard_out; |
| 181 | dst->error = 0; |
| 182 | dst->obsolete = initial_obsolete; |
| 183 | dst->header_len = 0; |
| 184 | dst->trailer_len = 0; |
| 185 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 186 | dst->tclassid = 0; |
| 187 | #endif |
| 188 | dst->lwtstate = NULL; |
| 189 | atomic_set(&dst->__refcnt, initial_ref); |
| 190 | dst->__use = 0; |
| 191 | dst->lastuse = jiffies; |
| 192 | dst->flags = flags; |
| 193 | dst->pending_confirm = 0; |
| 194 | dst->next = NULL; |
| 195 | if (!(flags & DST_NOCOUNT)) |
| 196 | dst_entries_add(ops, 1); |
| 197 | } |
| 198 | EXPORT_SYMBOL(dst_init); |
| 199 | |
| 200 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
| 201 | int initial_ref, int initial_obsolete, unsigned short flags) |
| 202 | { |
| 203 | struct dst_entry *dst; |
| 204 | |
| 205 | if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) { |
| 206 | if (ops->gc(ops)) |
| 207 | return NULL; |
| 208 | } |
| 209 | |
| 210 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
| 211 | if (!dst) |
| 212 | return NULL; |
| 213 | |
| 214 | dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags); |
| 215 | |
| 216 | return dst; |
| 217 | } |
| 218 | EXPORT_SYMBOL(dst_alloc); |
| 219 | |
| 220 | static void ___dst_free(struct dst_entry *dst) |
| 221 | { |
| 222 | /* The first case (dev==NULL) is required, when |
| 223 | protocol module is unloaded. |
| 224 | */ |
| 225 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { |
| 226 | dst->input = dst_discard; |
| 227 | dst->output = dst_discard_out; |
| 228 | } |
| 229 | dst->obsolete = DST_OBSOLETE_DEAD; |
| 230 | } |
| 231 | |
| 232 | void __dst_free(struct dst_entry *dst) |
| 233 | { |
| 234 | spin_lock_bh(&dst_garbage.lock); |
| 235 | ___dst_free(dst); |
| 236 | dst->next = dst_garbage.list; |
| 237 | dst_garbage.list = dst; |
| 238 | if (dst_garbage.timer_inc > DST_GC_INC) { |
| 239 | dst_garbage.timer_inc = DST_GC_INC; |
| 240 | dst_garbage.timer_expires = DST_GC_MIN; |
| 241 | mod_delayed_work(system_wq, &dst_gc_work, |
| 242 | dst_garbage.timer_expires); |
| 243 | } |
| 244 | spin_unlock_bh(&dst_garbage.lock); |
| 245 | } |
| 246 | EXPORT_SYMBOL(__dst_free); |
| 247 | |
| 248 | struct dst_entry *dst_destroy(struct dst_entry * dst) |
| 249 | { |
| 250 | struct dst_entry *child; |
| 251 | |
| 252 | smp_rmb(); |
| 253 | |
| 254 | again: |
| 255 | child = dst->child; |
| 256 | |
| 257 | if (!(dst->flags & DST_NOCOUNT)) |
| 258 | dst_entries_add(dst->ops, -1); |
| 259 | |
| 260 | if (dst->ops->destroy) |
| 261 | dst->ops->destroy(dst); |
| 262 | if (dst->dev) |
| 263 | dev_put(dst->dev); |
| 264 | |
| 265 | lwtstate_put(dst->lwtstate); |
| 266 | |
| 267 | if (dst->flags & DST_METADATA) |
| 268 | metadata_dst_free((struct metadata_dst *)dst); |
| 269 | else |
| 270 | kmem_cache_free(dst->ops->kmem_cachep, dst); |
| 271 | |
| 272 | dst = child; |
| 273 | if (dst) { |
| 274 | int nohash = dst->flags & DST_NOHASH; |
| 275 | |
| 276 | if (atomic_dec_and_test(&dst->__refcnt)) { |
| 277 | /* We were real parent of this dst, so kill child. */ |
| 278 | if (nohash) |
| 279 | goto again; |
| 280 | } else { |
| 281 | /* Child is still referenced, return it for freeing. */ |
| 282 | if (nohash) |
| 283 | return dst; |
| 284 | /* Child is still in his hash table */ |
| 285 | } |
| 286 | } |
| 287 | return NULL; |
| 288 | } |
| 289 | EXPORT_SYMBOL(dst_destroy); |
| 290 | |
| 291 | static void dst_destroy_rcu(struct rcu_head *head) |
| 292 | { |
| 293 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); |
| 294 | |
| 295 | dst = dst_destroy(dst); |
| 296 | if (dst) |
| 297 | __dst_free(dst); |
| 298 | } |
| 299 | |
| 300 | void dst_release(struct dst_entry *dst) |
| 301 | { |
| 302 | if (dst) { |
| 303 | int newrefcnt; |
| 304 | unsigned short nocache = dst->flags & DST_NOCACHE; |
| 305 | |
| 306 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
| 307 | if (unlikely(newrefcnt < 0)) |
| 308 | net_warn_ratelimited("%s: dst:%p refcnt:%d\n", |
| 309 | __func__, dst, newrefcnt); |
| 310 | if (!newrefcnt && unlikely(nocache)) |
| 311 | call_rcu(&dst->rcu_head, dst_destroy_rcu); |
| 312 | } |
| 313 | } |
| 314 | EXPORT_SYMBOL(dst_release); |
| 315 | |
| 316 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) |
| 317 | { |
| 318 | u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); |
| 319 | |
| 320 | if (p) { |
| 321 | u32 *old_p = __DST_METRICS_PTR(old); |
| 322 | unsigned long prev, new; |
| 323 | |
| 324 | memcpy(p, old_p, sizeof(u32) * RTAX_MAX); |
| 325 | |
| 326 | new = (unsigned long) p; |
| 327 | prev = cmpxchg(&dst->_metrics, old, new); |
| 328 | |
| 329 | if (prev != old) { |
| 330 | kfree(p); |
| 331 | p = __DST_METRICS_PTR(prev); |
| 332 | if (prev & DST_METRICS_READ_ONLY) |
| 333 | p = NULL; |
| 334 | } |
| 335 | } |
| 336 | return p; |
| 337 | } |
| 338 | EXPORT_SYMBOL(dst_cow_metrics_generic); |
| 339 | |
| 340 | /* Caller asserts that dst_metrics_read_only(dst) is false. */ |
| 341 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) |
| 342 | { |
| 343 | unsigned long prev, new; |
| 344 | |
| 345 | new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; |
| 346 | prev = cmpxchg(&dst->_metrics, old, new); |
| 347 | if (prev == old) |
| 348 | kfree(__DST_METRICS_PTR(old)); |
| 349 | } |
| 350 | EXPORT_SYMBOL(__dst_destroy_metrics_generic); |
| 351 | |
| 352 | static struct dst_ops md_dst_ops = { |
| 353 | .family = AF_UNSPEC, |
| 354 | }; |
| 355 | |
| 356 | static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 357 | { |
| 358 | WARN_ONCE(1, "Attempting to call output on metadata dst\n"); |
| 359 | kfree_skb(skb); |
| 360 | return 0; |
| 361 | } |
| 362 | |
| 363 | static int dst_md_discard(struct sk_buff *skb) |
| 364 | { |
| 365 | WARN_ONCE(1, "Attempting to call input on metadata dst\n"); |
| 366 | kfree_skb(skb); |
| 367 | return 0; |
| 368 | } |
| 369 | |
| 370 | static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen) |
| 371 | { |
| 372 | struct dst_entry *dst; |
| 373 | |
| 374 | dst = &md_dst->dst; |
| 375 | dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE, |
| 376 | DST_METADATA | DST_NOCACHE | DST_NOCOUNT); |
| 377 | |
| 378 | dst->input = dst_md_discard; |
| 379 | dst->output = dst_md_discard_out; |
| 380 | |
| 381 | memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); |
| 382 | } |
| 383 | |
| 384 | struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags) |
| 385 | { |
| 386 | struct metadata_dst *md_dst; |
| 387 | |
| 388 | md_dst = kmalloc(sizeof(*md_dst) + optslen, flags); |
| 389 | if (!md_dst) |
| 390 | return NULL; |
| 391 | |
| 392 | __metadata_dst_init(md_dst, optslen); |
| 393 | |
| 394 | return md_dst; |
| 395 | } |
| 396 | EXPORT_SYMBOL_GPL(metadata_dst_alloc); |
| 397 | |
| 398 | void metadata_dst_free(struct metadata_dst *md_dst) |
| 399 | { |
| 400 | #ifdef CONFIG_DST_CACHE |
| 401 | dst_cache_destroy(&md_dst->u.tun_info.dst_cache); |
| 402 | #endif |
| 403 | kfree(md_dst); |
| 404 | } |
| 405 | |
| 406 | struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags) |
| 407 | { |
| 408 | int cpu; |
| 409 | struct metadata_dst __percpu *md_dst; |
| 410 | |
| 411 | md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen, |
| 412 | __alignof__(struct metadata_dst), flags); |
| 413 | if (!md_dst) |
| 414 | return NULL; |
| 415 | |
| 416 | for_each_possible_cpu(cpu) |
| 417 | __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen); |
| 418 | |
| 419 | return md_dst; |
| 420 | } |
| 421 | EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu); |
| 422 | |
| 423 | /* Dirty hack. We did it in 2.2 (in __dst_free), |
| 424 | * we have _very_ good reasons not to repeat |
| 425 | * this mistake in 2.3, but we have no choice |
| 426 | * now. _It_ _is_ _explicit_ _deliberate_ |
| 427 | * _race_ _condition_. |
| 428 | * |
| 429 | * Commented and originally written by Alexey. |
| 430 | */ |
| 431 | static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
| 432 | int unregister) |
| 433 | { |
| 434 | if (dst->ops->ifdown) |
| 435 | dst->ops->ifdown(dst, dev, unregister); |
| 436 | |
| 437 | if (dev != dst->dev) |
| 438 | return; |
| 439 | |
| 440 | if (!unregister) { |
| 441 | dst->input = dst_discard; |
| 442 | dst->output = dst_discard_out; |
| 443 | } else { |
| 444 | dst->dev = dev_net(dst->dev)->loopback_dev; |
| 445 | dev_hold(dst->dev); |
| 446 | dev_put(dev); |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | static int dst_dev_event(struct notifier_block *this, unsigned long event, |
| 451 | void *ptr) |
| 452 | { |
| 453 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 454 | struct dst_entry *dst, *last = NULL; |
| 455 | |
| 456 | switch (event) { |
| 457 | case NETDEV_UNREGISTER_FINAL: |
| 458 | case NETDEV_DOWN: |
| 459 | mutex_lock(&dst_gc_mutex); |
| 460 | for (dst = dst_busy_list; dst; dst = dst->next) { |
| 461 | last = dst; |
| 462 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
| 463 | } |
| 464 | |
| 465 | spin_lock_bh(&dst_garbage.lock); |
| 466 | dst = dst_garbage.list; |
| 467 | dst_garbage.list = NULL; |
| 468 | spin_unlock_bh(&dst_garbage.lock); |
| 469 | |
| 470 | if (last) |
| 471 | last->next = dst; |
| 472 | else |
| 473 | dst_busy_list = dst; |
| 474 | for (; dst; dst = dst->next) |
| 475 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
| 476 | mutex_unlock(&dst_gc_mutex); |
| 477 | break; |
| 478 | } |
| 479 | return NOTIFY_DONE; |
| 480 | } |
| 481 | |
| 482 | static struct notifier_block dst_dev_notifier = { |
| 483 | .notifier_call = dst_dev_event, |
| 484 | .priority = -10, /* must be called after other network notifiers */ |
| 485 | }; |
| 486 | |
| 487 | void __init dst_subsys_init(void) |
| 488 | { |
| 489 | register_netdevice_notifier(&dst_dev_notifier); |
| 490 | } |