[IPV6]: ROUTE: Ensure to accept redirects from nexthop for the target.
[deliverable/linux.git] / net / core / link_watch.c
CommitLineData
1da177e4
LT
1/*
2 * Linux network device link state notification
3 *
4 * Author:
5 * Stefan Rompf <sux@loplof.de>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/netdevice.h>
17#include <linux/if.h>
18#include <net/sock.h>
cacaddf5 19#include <net/pkt_sched.h>
1da177e4
LT
20#include <linux/rtnetlink.h>
21#include <linux/jiffies.h>
22#include <linux/spinlock.h>
23#include <linux/list.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include <linux/bitops.h>
27#include <asm/types.h>
28
29
30enum lw_bits {
31 LW_RUNNING = 0,
32 LW_SE_USED
33};
34
35static unsigned long linkwatch_flags;
36static unsigned long linkwatch_nextevent;
37
38static void linkwatch_event(void *dummy);
39static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
40
41static LIST_HEAD(lweventlist);
42static DEFINE_SPINLOCK(lweventlist_lock);
43
44struct lw_event {
45 struct list_head list;
46 struct net_device *dev;
47};
48
49/* Avoid kmalloc() for most systems */
50static struct lw_event singleevent;
51
52/* Must be called with the rtnl semaphore held */
53void linkwatch_run_queue(void)
54{
55 LIST_HEAD(head);
56 struct list_head *n, *next;
57
58 spin_lock_irq(&lweventlist_lock);
59 list_splice_init(&lweventlist, &head);
60 spin_unlock_irq(&lweventlist_lock);
61
62 list_for_each_safe(n, next, &head) {
63 struct lw_event *event = list_entry(n, struct lw_event, list);
64 struct net_device *dev = event->dev;
65
66 if (event == &singleevent) {
67 clear_bit(LW_SE_USED, &linkwatch_flags);
68 } else {
69 kfree(event);
70 }
71
72 /* We are about to handle this device,
73 * so new events can be accepted
74 */
75 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
76
77 if (dev->flags & IFF_UP) {
cacaddf5
TC
78 if (netif_carrier_ok(dev)) {
79 WARN_ON(dev->qdisc_sleeping == &noop_qdisc);
80 dev_activate(dev);
81 } else
82 dev_deactivate(dev);
83
1da177e4
LT
84 netdev_state_change(dev);
85 }
86
87 dev_put(dev);
88 }
89}
90
91
92static void linkwatch_event(void *dummy)
93{
94 /* Limit the number of linkwatch events to one
95 * per second so that a runaway driver does not
96 * cause a storm of messages on the netlink
97 * socket
98 */
99 linkwatch_nextevent = jiffies + HZ;
100 clear_bit(LW_RUNNING, &linkwatch_flags);
101
102 rtnl_shlock();
103 linkwatch_run_queue();
104 rtnl_shunlock();
105}
106
107
108void linkwatch_fire_event(struct net_device *dev)
109{
110 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
111 unsigned long flags;
112 struct lw_event *event;
113
114 if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) {
115 event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC);
116
117 if (unlikely(event == NULL)) {
118 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
119 return;
120 }
121 } else {
122 event = &singleevent;
123 }
124
125 dev_hold(dev);
126 event->dev = dev;
127
128 spin_lock_irqsave(&lweventlist_lock, flags);
129 list_add_tail(&event->list, &lweventlist);
130 spin_unlock_irqrestore(&lweventlist_lock, flags);
131
132 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
133 unsigned long thisevent = jiffies;
134
135 if (thisevent >= linkwatch_nextevent) {
136 schedule_work(&linkwatch_work);
137 } else {
138 schedule_delayed_work(&linkwatch_work, linkwatch_nextevent - thisevent);
139 }
140 }
141 }
142}
143
144EXPORT_SYMBOL(linkwatch_fire_event);
This page took 0.107816 seconds and 5 git commands to generate.