ab9aaaff8d04936f237e5dceee7740e81d027e0d
[deliverable/linux.git] / include / linux / netpoll.h
1 /*
2 * Common code for low-level network console, dump, and debugger code
3 *
4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches
5 */
6
7 #ifndef _LINUX_NETPOLL_H
8 #define _LINUX_NETPOLL_H
9
10 #include <linux/netdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/rcupdate.h>
13 #include <linux/list.h>
14
15 union inet_addr {
16 __u32 all[4];
17 __be32 ip;
18 __be32 ip6[4];
19 struct in_addr in;
20 struct in6_addr in6;
21 };
22
23 struct netpoll {
24 struct net_device *dev;
25 char dev_name[IFNAMSIZ];
26 const char *name;
27 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
29
30 union inet_addr local_ip, remote_ip;
31 bool ipv6;
32 u16 local_port, remote_port;
33 u8 remote_mac[ETH_ALEN];
34
35 struct list_head rx; /* rx_np list element */
36 struct work_struct cleanup_work;
37 };
38
39 struct netpoll_info {
40 atomic_t refcnt;
41
42 spinlock_t rx_lock;
43 struct semaphore dev_lock;
44 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
45
46 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
47 struct sk_buff_head txq;
48
49 struct delayed_work tx_work;
50
51 struct netpoll *netpoll;
52 struct rcu_head rcu;
53 };
54
55 #ifdef CONFIG_NETPOLL
56 extern void netpoll_rx_disable(struct net_device *dev);
57 extern void netpoll_rx_enable(struct net_device *dev);
58 #else
59 static inline void netpoll_rx_disable(struct net_device *dev) { return; }
60 static inline void netpoll_rx_enable(struct net_device *dev) { return; }
61 #endif
62
63 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
64 void netpoll_print_options(struct netpoll *np);
65 int netpoll_parse_options(struct netpoll *np, char *opt);
66 int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
67 int netpoll_setup(struct netpoll *np);
68 void __netpoll_cleanup(struct netpoll *np);
69 void __netpoll_free_async(struct netpoll *np);
70 void netpoll_cleanup(struct netpoll *np);
71 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
72 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
73 struct net_device *dev);
74 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
75 {
76 unsigned long flags;
77 local_irq_save(flags);
78 netpoll_send_skb_on_dev(np, skb, np->dev);
79 local_irq_restore(flags);
80 }
81
82 #ifdef CONFIG_NETPOLL_TRAP
83 int netpoll_trap(void);
84 void netpoll_set_trap(int trap);
85 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
86 {
87 return !list_empty(&npinfo->rx_np);
88 }
89 #else
90 static inline int netpoll_trap(void)
91 {
92 return 0;
93 }
94 static inline void netpoll_set_trap(int trap)
95 {
96 }
97 static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
98 {
99 return false;
100 }
101 #endif
102
103 #ifdef CONFIG_NETPOLL
104 static inline bool netpoll_rx_on(struct sk_buff *skb)
105 {
106 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
107
108 return npinfo && netpoll_rx_processing(npinfo);
109 }
110
111 static inline bool netpoll_rx(struct sk_buff *skb)
112 {
113 struct netpoll_info *npinfo;
114 unsigned long flags;
115 bool ret = false;
116
117 local_irq_save(flags);
118
119 if (!netpoll_rx_on(skb))
120 goto out;
121
122 npinfo = rcu_dereference_bh(skb->dev->npinfo);
123 spin_lock(&npinfo->rx_lock);
124 /* check rx_processing again with the lock held */
125 if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
126 ret = true;
127 spin_unlock(&npinfo->rx_lock);
128
129 out:
130 local_irq_restore(flags);
131 return ret;
132 }
133
134 static inline int netpoll_receive_skb(struct sk_buff *skb)
135 {
136 if (!list_empty(&skb->dev->napi_list))
137 return netpoll_rx(skb);
138 return 0;
139 }
140
141 static inline void *netpoll_poll_lock(struct napi_struct *napi)
142 {
143 struct net_device *dev = napi->dev;
144
145 if (dev && dev->npinfo) {
146 spin_lock(&napi->poll_lock);
147 napi->poll_owner = smp_processor_id();
148 return napi;
149 }
150 return NULL;
151 }
152
153 static inline void netpoll_poll_unlock(void *have)
154 {
155 struct napi_struct *napi = have;
156
157 if (napi) {
158 napi->poll_owner = -1;
159 spin_unlock(&napi->poll_lock);
160 }
161 }
162
163 static inline bool netpoll_tx_running(struct net_device *dev)
164 {
165 return irqs_disabled();
166 }
167
168 #else
169 static inline bool netpoll_rx(struct sk_buff *skb)
170 {
171 return false;
172 }
173 static inline bool netpoll_rx_on(struct sk_buff *skb)
174 {
175 return false;
176 }
177 static inline int netpoll_receive_skb(struct sk_buff *skb)
178 {
179 return 0;
180 }
181 static inline void *netpoll_poll_lock(struct napi_struct *napi)
182 {
183 return NULL;
184 }
185 static inline void netpoll_poll_unlock(void *have)
186 {
187 }
188 static inline void netpoll_netdev_init(struct net_device *dev)
189 {
190 }
191 static inline bool netpoll_tx_running(struct net_device *dev)
192 {
193 return false;
194 }
195 #endif
196
197 #endif
This page took 0.048091 seconds and 4 git commands to generate.