Merge branch 'fixes-for-3.6' of git://git.linaro.org/people/mszyprowski/linux-dma...
[deliverable/linux.git] / net / core / netpoll.c
CommitLineData
1da177e4
LT
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
e6ec2693
JP
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
bff38771 14#include <linux/moduleparam.h>
1da177e4
LT
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/string.h>
14c85021 18#include <linux/if_arp.h>
1da177e4
LT
19#include <linux/inetdevice.h>
20#include <linux/inet.h>
21#include <linux/interrupt.h>
22#include <linux/netpoll.h>
23#include <linux/sched.h>
24#include <linux/delay.h>
25#include <linux/rcupdate.h>
26#include <linux/workqueue.h>
5a0e3ad6 27#include <linux/slab.h>
bc3b2d7f 28#include <linux/export.h>
689971b4 29#include <linux/if_vlan.h>
1da177e4
LT
30#include <net/tcp.h>
31#include <net/udp.h>
32#include <asm/unaligned.h>
9cbc1cb8 33#include <trace/events/napi.h>
1da177e4
LT
34
35/*
36 * We maintain a small pool of fully-sized skbs, to make sure the
37 * message gets out even in extreme OOM situations.
38 */
39
40#define MAX_UDP_CHUNK 1460
41#define MAX_SKBS 32
1da177e4 42
a1bcfacd 43static struct sk_buff_head skb_pool;
1da177e4
LT
44
45static atomic_t trapped;
46
2bdfe0ba 47#define USEC_PER_POLL 50
d9452e9f
DM
48#define NETPOLL_RX_ENABLED 1
49#define NETPOLL_RX_DROP 2
1da177e4 50
6f706245
JP
51#define MAX_SKB_SIZE \
52 (sizeof(struct ethhdr) + \
53 sizeof(struct iphdr) + \
54 sizeof(struct udphdr) + \
55 MAX_UDP_CHUNK)
1da177e4 56
3578b0c8 57static void zap_completion_queue(void);
2899656b 58static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
1da177e4 59
bff38771
AV
60static unsigned int carrier_timeout = 4;
61module_param(carrier_timeout, uint, 0644);
62
e6ec2693
JP
63#define np_info(np, fmt, ...) \
64 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
65#define np_err(np, fmt, ...) \
66 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_notice(np, fmt, ...) \
68 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
69
c4028958 70static void queue_process(struct work_struct *work)
1da177e4 71{
4c1ac1b4
DH
72 struct netpoll_info *npinfo =
73 container_of(work, struct netpoll_info, tx_work.work);
1da177e4 74 struct sk_buff *skb;
3640543d 75 unsigned long flags;
1da177e4 76
6c43ff18
SH
77 while ((skb = skb_dequeue(&npinfo->txq))) {
78 struct net_device *dev = skb->dev;
00829823 79 const struct net_device_ops *ops = dev->netdev_ops;
fd2ea0a7 80 struct netdev_queue *txq;
1da177e4 81
6c43ff18
SH
82 if (!netif_device_present(dev) || !netif_running(dev)) {
83 __kfree_skb(skb);
84 continue;
85 }
1da177e4 86
fd2ea0a7
DM
87 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
88
3640543d 89 local_irq_save(flags);
fd2ea0a7 90 __netif_tx_lock(txq, smp_processor_id());
73466498 91 if (netif_xmit_frozen_or_stopped(txq) ||
00829823 92 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
6c43ff18 93 skb_queue_head(&npinfo->txq, skb);
fd2ea0a7 94 __netif_tx_unlock(txq);
3640543d 95 local_irq_restore(flags);
1da177e4 96
25442caf 97 schedule_delayed_work(&npinfo->tx_work, HZ/10);
6c43ff18
SH
98 return;
99 }
fd2ea0a7 100 __netif_tx_unlock(txq);
3640543d 101 local_irq_restore(flags);
1da177e4 102 }
1da177e4
LT
103}
104
b51655b9
AV
105static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
106 unsigned short ulen, __be32 saddr, __be32 daddr)
1da177e4 107{
d6f5493c 108 __wsum psum;
fb286bb2 109
60476372 110 if (uh->check == 0 || skb_csum_unnecessary(skb))
1da177e4
LT
111 return 0;
112
fb286bb2
HX
113 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
114
84fa7933 115 if (skb->ip_summed == CHECKSUM_COMPLETE &&
d3bc23e7 116 !csum_fold(csum_add(psum, skb->csum)))
fb286bb2 117 return 0;
1da177e4 118
fb286bb2 119 skb->csum = psum;
1da177e4 120
fb286bb2 121 return __skb_checksum_complete(skb);
1da177e4
LT
122}
123
124/*
125 * Check whether delayed processing was scheduled for our NIC. If so,
126 * we attempt to grab the poll lock and use ->poll() to pump the card.
127 * If this fails, either we've recursed in ->poll() or it's already
128 * running on another CPU.
129 *
130 * Note: we don't mask interrupts with this lock because we're using
131 * trylock here and interrupts are already disabled in the softirq
132 * case. Further, we test the poll_owner to avoid recursion on UP
133 * systems where the lock doesn't exist.
134 *
135 * In cases where there is bi-directional communications, reading only
136 * one message at a time can lead to packets being dropped by the
137 * network adapter, forcing superfluous retries and possibly timeouts.
138 * Thus, we set our budget to greater than 1.
139 */
0a7606c1
DM
140static int poll_one_napi(struct netpoll_info *npinfo,
141 struct napi_struct *napi, int budget)
142{
143 int work;
144
145 /* net_rx_action's ->poll() invocations and our's are
146 * synchronized by this test which is only made while
147 * holding the napi->poll_lock.
148 */
149 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
150 return budget;
151
d9452e9f 152 npinfo->rx_flags |= NETPOLL_RX_DROP;
0a7606c1 153 atomic_inc(&trapped);
7b363e44 154 set_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1
DM
155
156 work = napi->poll(napi, budget);
7d18f114 157 trace_napi_poll(napi);
0a7606c1 158
7b363e44 159 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1 160 atomic_dec(&trapped);
d9452e9f 161 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
0a7606c1
DM
162
163 return budget - work;
164}
165
5106930b 166static void poll_napi(struct net_device *dev)
1da177e4 167{
bea3348e 168 struct napi_struct *napi;
1da177e4
LT
169 int budget = 16;
170
f13d493d 171 list_for_each_entry(napi, &dev->napi_list, dev_list) {
0a7606c1 172 if (napi->poll_owner != smp_processor_id() &&
bea3348e 173 spin_trylock(&napi->poll_lock)) {
2899656b
AW
174 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
175 napi, budget);
bea3348e 176 spin_unlock(&napi->poll_lock);
0a7606c1 177
072a9c48 178 if (!budget)
0a7606c1 179 break;
bea3348e 180 }
1da177e4
LT
181 }
182}
183
068c6e98
NH
184static void service_arp_queue(struct netpoll_info *npi)
185{
5106930b
SH
186 if (npi) {
187 struct sk_buff *skb;
068c6e98 188
5106930b 189 while ((skb = skb_dequeue(&npi->arp_tx)))
2899656b 190 netpoll_arp_reply(skb, npi);
068c6e98 191 }
068c6e98
NH
192}
193
234b921d 194static void netpoll_poll_dev(struct net_device *dev)
1da177e4 195{
5e392739 196 const struct net_device_ops *ops;
2899656b 197 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
5106930b 198
5e392739
PE
199 if (!dev || !netif_running(dev))
200 return;
201
202 ops = dev->netdev_ops;
203 if (!ops->ndo_poll_controller)
1da177e4
LT
204 return;
205
206 /* Process pending work on NIC */
d314774c 207 ops->ndo_poll_controller(dev);
5106930b
SH
208
209 poll_napi(dev);
1da177e4 210
58e05f35 211 if (dev->flags & IFF_SLAVE) {
2899656b 212 if (ni) {
5a698af5
AW
213 struct net_device *bond_dev = dev->master;
214 struct sk_buff *skb;
2899656b
AW
215 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
216 while ((skb = skb_dequeue(&ni->arp_tx))) {
5a698af5 217 skb->dev = bond_dev;
2899656b 218 skb_queue_tail(&bond_ni->arp_tx, skb);
5a698af5
AW
219 }
220 }
221 }
222
2899656b 223 service_arp_queue(ni);
068c6e98 224
3578b0c8 225 zap_completion_queue();
1da177e4
LT
226}
227
228static void refill_skbs(void)
229{
230 struct sk_buff *skb;
231 unsigned long flags;
232
a1bcfacd
SH
233 spin_lock_irqsave(&skb_pool.lock, flags);
234 while (skb_pool.qlen < MAX_SKBS) {
1da177e4
LT
235 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
236 if (!skb)
237 break;
238
a1bcfacd 239 __skb_queue_tail(&skb_pool, skb);
1da177e4 240 }
a1bcfacd 241 spin_unlock_irqrestore(&skb_pool.lock, flags);
1da177e4
LT
242}
243
3578b0c8
DM
244static void zap_completion_queue(void)
245{
246 unsigned long flags;
247 struct softnet_data *sd = &get_cpu_var(softnet_data);
248
249 if (sd->completion_queue) {
250 struct sk_buff *clist;
251
252 local_irq_save(flags);
253 clist = sd->completion_queue;
254 sd->completion_queue = NULL;
255 local_irq_restore(flags);
256
257 while (clist != NULL) {
258 struct sk_buff *skb = clist;
259 clist = clist->next;
260 if (skb->destructor) {
261 atomic_inc(&skb->users);
262 dev_kfree_skb_any(skb); /* put this one back */
263 } else {
264 __kfree_skb(skb);
265 }
266 }
267 }
268
269 put_cpu_var(softnet_data);
270}
271
a1bcfacd 272static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
1da177e4 273{
a1bcfacd
SH
274 int count = 0;
275 struct sk_buff *skb;
1da177e4 276
3578b0c8 277 zap_completion_queue();
a1bcfacd 278 refill_skbs();
1da177e4 279repeat:
1da177e4
LT
280
281 skb = alloc_skb(len, GFP_ATOMIC);
a1bcfacd
SH
282 if (!skb)
283 skb = skb_dequeue(&skb_pool);
1da177e4
LT
284
285 if (!skb) {
a1bcfacd 286 if (++count < 10) {
2a49e001 287 netpoll_poll_dev(np->dev);
a1bcfacd 288 goto repeat;
1da177e4 289 }
a1bcfacd 290 return NULL;
1da177e4
LT
291 }
292
293 atomic_set(&skb->users, 1);
294 skb_reserve(skb, reserve);
295 return skb;
296}
297
bea3348e
SH
298static int netpoll_owner_active(struct net_device *dev)
299{
300 struct napi_struct *napi;
301
302 list_for_each_entry(napi, &dev->napi_list, dev_list) {
303 if (napi->poll_owner == smp_processor_id())
304 return 1;
305 }
306 return 0;
307}
308
2899656b 309/* call with IRQ disabled */
c2355e1a
NH
310void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
311 struct net_device *dev)
1da177e4 312{
2bdfe0ba
SH
313 int status = NETDEV_TX_BUSY;
314 unsigned long tries;
00829823 315 const struct net_device_ops *ops = dev->netdev_ops;
de85d99e 316 /* It is up to the caller to keep npinfo alive. */
2899656b 317 struct netpoll_info *npinfo;
2bdfe0ba 318
2899656b
AW
319 WARN_ON_ONCE(!irqs_disabled());
320
321 npinfo = rcu_dereference_bh(np->dev->npinfo);
4ec93edb
YH
322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
323 __kfree_skb(skb);
324 return;
325 }
2bdfe0ba
SH
326
327 /* don't get messages out of order, and no recursion */
bea3348e 328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
fd2ea0a7 329 struct netdev_queue *txq;
a49f99ff 330
fd2ea0a7
DM
331 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
332
0db3dc73
SH
333 /* try until next clock tick */
334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
335 tries > 0; --tries) {
fd2ea0a7 336 if (__netif_tx_trylock(txq)) {
73466498 337 if (!netif_xmit_stopped(txq)) {
689971b4
AW
338 if (vlan_tx_tag_present(skb) &&
339 !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
340 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
341 if (unlikely(!skb))
342 break;
343 skb->vlan_tci = 0;
344 }
345
00829823 346 status = ops->ndo_start_xmit(skb, dev);
08baf561
ED
347 if (status == NETDEV_TX_OK)
348 txq_trans_update(txq);
349 }
fd2ea0a7 350 __netif_tx_unlock(txq);
e37b8d93
AM
351
352 if (status == NETDEV_TX_OK)
353 break;
354
e37b8d93 355 }
0db3dc73
SH
356
357 /* tickle device maybe there is some cleanup */
2a49e001 358 netpoll_poll_dev(np->dev);
0db3dc73
SH
359
360 udelay(USEC_PER_POLL);
0db1d6fc 361 }
79b1bee8
DD
362
363 WARN_ONCE(!irqs_disabled(),
2899656b 364 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
79b1bee8
DD
365 dev->name, ops->ndo_start_xmit);
366
1da177e4 367 }
1da177e4 368
2bdfe0ba 369 if (status != NETDEV_TX_OK) {
5de4a473 370 skb_queue_tail(&npinfo->txq, skb);
4c1ac1b4 371 schedule_delayed_work(&npinfo->tx_work,0);
1da177e4 372 }
1da177e4 373}
c2355e1a 374EXPORT_SYMBOL(netpoll_send_skb_on_dev);
1da177e4
LT
375
376void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
377{
954fba02 378 int total_len, ip_len, udp_len;
1da177e4
LT
379 struct sk_buff *skb;
380 struct udphdr *udph;
381 struct iphdr *iph;
382 struct ethhdr *eth;
383
384 udp_len = len + sizeof(*udph);
954fba02
ED
385 ip_len = udp_len + sizeof(*iph);
386 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
1da177e4 387
954fba02
ED
388 skb = find_skb(np, total_len + np->dev->needed_tailroom,
389 total_len - len);
1da177e4
LT
390 if (!skb)
391 return;
392
27d7ff46 393 skb_copy_to_linear_data(skb, msg, len);
954fba02 394 skb_put(skb, len);
1da177e4 395
4bedb452
ACM
396 skb_push(skb, sizeof(*udph));
397 skb_reset_transport_header(skb);
398 udph = udp_hdr(skb);
1da177e4
LT
399 udph->source = htons(np->local_port);
400 udph->dest = htons(np->remote_port);
401 udph->len = htons(udp_len);
402 udph->check = 0;
e7557af5
HH
403 udph->check = csum_tcpudp_magic(np->local_ip,
404 np->remote_ip,
8e365eec 405 udp_len, IPPROTO_UDP,
07f0757a 406 csum_partial(udph, udp_len, 0));
8e365eec 407 if (udph->check == 0)
5e57dff2 408 udph->check = CSUM_MANGLED_0;
1da177e4 409
e2d1bca7
ACM
410 skb_push(skb, sizeof(*iph));
411 skb_reset_network_header(skb);
eddc9ec5 412 iph = ip_hdr(skb);
1da177e4
LT
413
414 /* iph->version = 4; iph->ihl = 5; */
415 put_unaligned(0x45, (unsigned char *)iph);
416 iph->tos = 0;
417 put_unaligned(htons(ip_len), &(iph->tot_len));
418 iph->id = 0;
419 iph->frag_off = 0;
420 iph->ttl = 64;
421 iph->protocol = IPPROTO_UDP;
422 iph->check = 0;
e7557af5
HH
423 put_unaligned(np->local_ip, &(iph->saddr));
424 put_unaligned(np->remote_ip, &(iph->daddr));
1da177e4
LT
425 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
426
427 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
459a98ed 428 skb_reset_mac_header(skb);
206daaf7 429 skb->protocol = eth->h_proto = htons(ETH_P_IP);
09538641
SH
430 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
431 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
1da177e4
LT
432
433 skb->dev = np->dev;
434
435 netpoll_send_skb(np, skb);
436}
9e34a5b5 437EXPORT_SYMBOL(netpoll_send_udp);
1da177e4 438
2899656b 439static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
1da177e4
LT
440{
441 struct arphdr *arp;
442 unsigned char *arp_ptr;
443 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
252e3346 444 __be32 sip, tip;
47bbec02 445 unsigned char *sha;
1da177e4 446 struct sk_buff *send_skb;
508e14b4
DB
447 struct netpoll *np, *tmp;
448 unsigned long flags;
ae641949 449 int hlen, tlen;
508e14b4
DB
450 int hits = 0;
451
452 if (list_empty(&npinfo->rx_np))
453 return;
454
455 /* Before checking the packet, we do some early
456 inspection whether this is interesting at all */
457 spin_lock_irqsave(&npinfo->rx_lock, flags);
458 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
459 if (np->dev == skb->dev)
460 hits++;
461 }
462 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4 463
508e14b4
DB
464 /* No netpoll struct is using this dev */
465 if (!hits)
115c1d6e 466 return;
1da177e4
LT
467
468 /* No arp on this interface */
469 if (skb->dev->flags & IFF_NOARP)
470 return;
471
988b7050 472 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
1da177e4
LT
473 return;
474
c1d2bbe1 475 skb_reset_network_header(skb);
badff6d0 476 skb_reset_transport_header(skb);
d0a92be0 477 arp = arp_hdr(skb);
1da177e4
LT
478
479 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
480 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
481 arp->ar_pro != htons(ETH_P_IP) ||
482 arp->ar_op != htons(ARPOP_REQUEST))
483 return;
484
47bbec02
NH
485 arp_ptr = (unsigned char *)(arp+1);
486 /* save the location of the src hw addr */
487 sha = arp_ptr;
488 arp_ptr += skb->dev->addr_len;
1da177e4 489 memcpy(&sip, arp_ptr, 4);
47bbec02 490 arp_ptr += 4;
508e14b4
DB
491 /* If we actually cared about dst hw addr,
492 it would get copied here */
47bbec02 493 arp_ptr += skb->dev->addr_len;
1da177e4
LT
494 memcpy(&tip, arp_ptr, 4);
495
496 /* Should we ignore arp? */
508e14b4 497 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
1da177e4
LT
498 return;
499
988b7050 500 size = arp_hdr_len(skb->dev);
1da177e4 501
508e14b4
DB
502 spin_lock_irqsave(&npinfo->rx_lock, flags);
503 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
504 if (tip != np->local_ip)
505 continue;
1da177e4 506
ae641949
HX
507 hlen = LL_RESERVED_SPACE(np->dev);
508 tlen = np->dev->needed_tailroom;
509 send_skb = find_skb(np, size + hlen + tlen, hlen);
508e14b4
DB
510 if (!send_skb)
511 continue;
1da177e4 512
508e14b4
DB
513 skb_reset_network_header(send_skb);
514 arp = (struct arphdr *) skb_put(send_skb, size);
515 send_skb->dev = skb->dev;
516 send_skb->protocol = htons(ETH_P_ARP);
1da177e4 517
508e14b4
DB
518 /* Fill the device header for the ARP frame */
519 if (dev_hard_header(send_skb, skb->dev, ptype,
520 sha, np->dev->dev_addr,
521 send_skb->len) < 0) {
522 kfree_skb(send_skb);
523 continue;
524 }
1da177e4 525
508e14b4
DB
526 /*
527 * Fill out the arp protocol part.
528 *
529 * we only support ethernet device type,
530 * which (according to RFC 1390) should
531 * always equal 1 (Ethernet).
532 */
1da177e4 533
508e14b4
DB
534 arp->ar_hrd = htons(np->dev->type);
535 arp->ar_pro = htons(ETH_P_IP);
536 arp->ar_hln = np->dev->addr_len;
537 arp->ar_pln = 4;
538 arp->ar_op = htons(type);
539
540 arp_ptr = (unsigned char *)(arp + 1);
541 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
542 arp_ptr += np->dev->addr_len;
543 memcpy(arp_ptr, &tip, 4);
544 arp_ptr += 4;
545 memcpy(arp_ptr, sha, np->dev->addr_len);
546 arp_ptr += np->dev->addr_len;
547 memcpy(arp_ptr, &sip, 4);
548
549 netpoll_send_skb(np, send_skb);
550
551 /* If there are several rx_hooks for the same address,
552 we're fine by sending a single reply */
553 break;
554 }
555 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1da177e4
LT
556}
557
57c5d461 558int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
1da177e4
LT
559{
560 int proto, len, ulen;
508e14b4 561 int hits = 0;
b71d1d42 562 const struct iphdr *iph;
1da177e4 563 struct udphdr *uh;
508e14b4 564 struct netpoll *np, *tmp;
068c6e98 565
508e14b4 566 if (list_empty(&npinfo->rx_np))
1da177e4 567 goto out;
508e14b4 568
1da177e4
LT
569 if (skb->dev->type != ARPHRD_ETHER)
570 goto out;
571
d9452e9f 572 /* check if netpoll clients need ARP */
724800d6 573 if (skb->protocol == htons(ETH_P_ARP) &&
1da177e4 574 atomic_read(&trapped)) {
508e14b4 575 skb_queue_tail(&npinfo->arp_tx, skb);
1da177e4
LT
576 return 1;
577 }
578
689971b4
AW
579 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
580 skb = vlan_untag(skb);
581 if (unlikely(!skb))
582 goto out;
583 }
584
1da177e4
LT
585 proto = ntohs(eth_hdr(skb)->h_proto);
586 if (proto != ETH_P_IP)
587 goto out;
588 if (skb->pkt_type == PACKET_OTHERHOST)
589 goto out;
590 if (skb_shared(skb))
591 goto out;
592
1da177e4
LT
593 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
594 goto out;
e9278a47 595 iph = (struct iphdr *)skb->data;
1da177e4
LT
596 if (iph->ihl < 5 || iph->version != 4)
597 goto out;
598 if (!pskb_may_pull(skb, iph->ihl*4))
599 goto out;
e9278a47 600 iph = (struct iphdr *)skb->data;
1da177e4
LT
601 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
602 goto out;
603
604 len = ntohs(iph->tot_len);
605 if (skb->len < len || len < iph->ihl*4)
606 goto out;
607
5e7d7fa5
AL
608 /*
609 * Our transport medium may have padded the buffer out.
610 * Now We trim to the true length of the frame.
611 */
612 if (pskb_trim_rcsum(skb, len))
613 goto out;
614
e9278a47 615 iph = (struct iphdr *)skb->data;
1da177e4
LT
616 if (iph->protocol != IPPROTO_UDP)
617 goto out;
618
619 len -= iph->ihl*4;
620 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
621 ulen = ntohs(uh->len);
622
623 if (ulen != len)
624 goto out;
fb286bb2 625 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
1da177e4 626 goto out;
1da177e4 627
508e14b4
DB
628 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
629 if (np->local_ip && np->local_ip != iph->daddr)
630 continue;
631 if (np->remote_ip && np->remote_ip != iph->saddr)
632 continue;
633 if (np->local_port && np->local_port != ntohs(uh->dest))
634 continue;
635
636 np->rx_hook(np, ntohs(uh->source),
637 (char *)(uh+1),
638 ulen - sizeof(struct udphdr));
639 hits++;
640 }
641
642 if (!hits)
643 goto out;
1da177e4
LT
644
645 kfree_skb(skb);
646 return 1;
647
648out:
649 if (atomic_read(&trapped)) {
650 kfree_skb(skb);
651 return 1;
652 }
653
654 return 0;
655}
656
0bcc1816
SS
657void netpoll_print_options(struct netpoll *np)
658{
e6ec2693
JP
659 np_info(np, "local port %d\n", np->local_port);
660 np_info(np, "local IP %pI4\n", &np->local_ip);
661 np_info(np, "interface '%s'\n", np->dev_name);
662 np_info(np, "remote port %d\n", np->remote_port);
663 np_info(np, "remote IP %pI4\n", &np->remote_ip);
664 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
0bcc1816 665}
9e34a5b5 666EXPORT_SYMBOL(netpoll_print_options);
0bcc1816 667
1da177e4
LT
668int netpoll_parse_options(struct netpoll *np, char *opt)
669{
670 char *cur=opt, *delim;
671
c68b9070 672 if (*cur != '@') {
1da177e4
LT
673 if ((delim = strchr(cur, '@')) == NULL)
674 goto parse_failed;
c68b9070
DM
675 *delim = 0;
676 np->local_port = simple_strtol(cur, NULL, 10);
677 cur = delim;
1da177e4
LT
678 }
679 cur++;
1da177e4 680
c68b9070 681 if (*cur != '/') {
1da177e4
LT
682 if ((delim = strchr(cur, '/')) == NULL)
683 goto parse_failed;
c68b9070 684 *delim = 0;
e7557af5 685 np->local_ip = in_aton(cur);
c68b9070 686 cur = delim;
1da177e4
LT
687 }
688 cur++;
689
c68b9070 690 if (*cur != ',') {
1da177e4
LT
691 /* parse out dev name */
692 if ((delim = strchr(cur, ',')) == NULL)
693 goto parse_failed;
c68b9070 694 *delim = 0;
1da177e4 695 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
c68b9070 696 cur = delim;
1da177e4
LT
697 }
698 cur++;
699
c68b9070 700 if (*cur != '@') {
1da177e4
LT
701 /* dst port */
702 if ((delim = strchr(cur, '@')) == NULL)
703 goto parse_failed;
c68b9070 704 *delim = 0;
5fc05f87 705 if (*cur == ' ' || *cur == '\t')
e6ec2693 706 np_info(np, "warning: whitespace is not allowed\n");
c68b9070
DM
707 np->remote_port = simple_strtol(cur, NULL, 10);
708 cur = delim;
1da177e4
LT
709 }
710 cur++;
1da177e4
LT
711
712 /* dst ip */
713 if ((delim = strchr(cur, '/')) == NULL)
714 goto parse_failed;
c68b9070 715 *delim = 0;
e7557af5 716 np->remote_ip = in_aton(cur);
c68b9070 717 cur = delim + 1;
1da177e4 718
c68b9070 719 if (*cur != 0) {
1da177e4 720 /* MAC address */
4940fc88 721 if (!mac_pton(cur, np->remote_mac))
1da177e4 722 goto parse_failed;
1da177e4
LT
723 }
724
0bcc1816 725 netpoll_print_options(np);
1da177e4
LT
726
727 return 0;
728
729 parse_failed:
e6ec2693 730 np_info(np, "couldn't parse config at '%s'!\n", cur);
1da177e4
LT
731 return -1;
732}
9e34a5b5 733EXPORT_SYMBOL(netpoll_parse_options);
1da177e4 734
47be03a2 735int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1da177e4 736{
115c1d6e 737 struct netpoll_info *npinfo;
4247e161 738 const struct net_device_ops *ops;
fbeec2e1 739 unsigned long flags;
b41848b6 740 int err;
1da177e4 741
30fdd8a0
JP
742 np->dev = ndev;
743 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
744
8fdd95ec
HX
745 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
746 !ndev->netdev_ops->ndo_poll_controller) {
e6ec2693
JP
747 np_err(np, "%s doesn't support polling, aborting\n",
748 np->dev_name);
8fdd95ec
HX
749 err = -ENOTSUPP;
750 goto out;
751 }
752
753 if (!ndev->npinfo) {
47be03a2 754 npinfo = kmalloc(sizeof(*npinfo), gfp);
8fdd95ec
HX
755 if (!npinfo) {
756 err = -ENOMEM;
757 goto out;
758 }
759
760 npinfo->rx_flags = 0;
761 INIT_LIST_HEAD(&npinfo->rx_np);
762
763 spin_lock_init(&npinfo->rx_lock);
764 skb_queue_head_init(&npinfo->arp_tx);
765 skb_queue_head_init(&npinfo->txq);
766 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
767
768 atomic_set(&npinfo->refcnt, 1);
769
770 ops = np->dev->netdev_ops;
771 if (ops->ndo_netpoll_setup) {
47be03a2 772 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
8fdd95ec
HX
773 if (err)
774 goto free_npinfo;
775 }
776 } else {
777 npinfo = ndev->npinfo;
778 atomic_inc(&npinfo->refcnt);
779 }
780
781 npinfo->netpoll = np;
782
783 if (np->rx_hook) {
784 spin_lock_irqsave(&npinfo->rx_lock, flags);
785 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
786 list_add_tail(&np->rx, &npinfo->rx_np);
787 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
788 }
789
790 /* last thing to do is link it to the net device structure */
cf778b00 791 rcu_assign_pointer(ndev->npinfo, npinfo);
8fdd95ec
HX
792
793 return 0;
794
795free_npinfo:
796 kfree(npinfo);
797out:
798 return err;
799}
800EXPORT_SYMBOL_GPL(__netpoll_setup);
801
802int netpoll_setup(struct netpoll *np)
803{
804 struct net_device *ndev = NULL;
805 struct in_device *in_dev;
806 int err;
807
1da177e4 808 if (np->dev_name)
881d966b 809 ndev = dev_get_by_name(&init_net, np->dev_name);
1da177e4 810 if (!ndev) {
e6ec2693 811 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
b41848b6 812 return -ENODEV;
1da177e4
LT
813 }
814
0c1ad04a 815 if (ndev->master) {
e6ec2693 816 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
83fe32de
DC
817 err = -EBUSY;
818 goto put;
0c1ad04a
WC
819 }
820
1da177e4
LT
821 if (!netif_running(ndev)) {
822 unsigned long atmost, atleast;
823
e6ec2693 824 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
1da177e4 825
6756ae4b 826 rtnl_lock();
b41848b6
SH
827 err = dev_open(ndev);
828 rtnl_unlock();
829
830 if (err) {
e6ec2693 831 np_err(np, "failed to open %s\n", ndev->name);
dbaa1541 832 goto put;
1da177e4 833 }
1da177e4
LT
834
835 atleast = jiffies + HZ/10;
bff38771 836 atmost = jiffies + carrier_timeout * HZ;
1da177e4
LT
837 while (!netif_carrier_ok(ndev)) {
838 if (time_after(jiffies, atmost)) {
e6ec2693 839 np_notice(np, "timeout waiting for carrier\n");
1da177e4
LT
840 break;
841 }
1b614fb9 842 msleep(1);
1da177e4
LT
843 }
844
845 /* If carrier appears to come up instantly, we don't
846 * trust it and pause so that we don't pump all our
847 * queued console messages into the bitbucket.
848 */
849
850 if (time_before(jiffies, atleast)) {
e6ec2693 851 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1da177e4
LT
852 msleep(4000);
853 }
854 }
855
1da177e4
LT
856 if (!np->local_ip) {
857 rcu_read_lock();
e5ed6399 858 in_dev = __in_dev_get_rcu(ndev);
1da177e4
LT
859
860 if (!in_dev || !in_dev->ifa_list) {
861 rcu_read_unlock();
e6ec2693
JP
862 np_err(np, "no IP address for %s, aborting\n",
863 np->dev_name);
b41848b6 864 err = -EDESTADDRREQ;
dbaa1541 865 goto put;
1da177e4
LT
866 }
867
e7557af5 868 np->local_ip = in_dev->ifa_list->ifa_local;
1da177e4 869 rcu_read_unlock();
e6ec2693 870 np_info(np, "local IP %pI4\n", &np->local_ip);
1da177e4
LT
871 }
872
dbaa1541
HX
873 /* fill up the skb queue */
874 refill_skbs();
875
876 rtnl_lock();
47be03a2 877 err = __netpoll_setup(np, ndev, GFP_KERNEL);
dbaa1541 878 rtnl_unlock();
53fb95d3 879
8fdd95ec
HX
880 if (err)
881 goto put;
882
1da177e4
LT
883 return 0;
884
21edbb22 885put:
1da177e4 886 dev_put(ndev);
b41848b6 887 return err;
1da177e4 888}
9e34a5b5 889EXPORT_SYMBOL(netpoll_setup);
1da177e4 890
c68b9070
DM
891static int __init netpoll_init(void)
892{
a1bcfacd
SH
893 skb_queue_head_init(&skb_pool);
894 return 0;
895}
896core_initcall(netpoll_init);
897
38e6bc18
AW
898static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
899{
900 struct netpoll_info *npinfo =
901 container_of(rcu_head, struct netpoll_info, rcu);
902
903 skb_queue_purge(&npinfo->arp_tx);
904 skb_queue_purge(&npinfo->txq);
905
906 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
907 cancel_delayed_work(&npinfo->tx_work);
908
909 /* clean after last, unfinished work */
910 __skb_queue_purge(&npinfo->txq);
911 /* now cancel it again */
912 cancel_delayed_work(&npinfo->tx_work);
913 kfree(npinfo);
914}
915
8fdd95ec 916void __netpoll_cleanup(struct netpoll *np)
1da177e4 917{
fbeec2e1
JM
918 struct netpoll_info *npinfo;
919 unsigned long flags;
920
8fdd95ec
HX
921 npinfo = np->dev->npinfo;
922 if (!npinfo)
dbaa1541 923 return;
93ec2c72 924
8fdd95ec
HX
925 if (!list_empty(&npinfo->rx_np)) {
926 spin_lock_irqsave(&npinfo->rx_lock, flags);
927 list_del(&np->rx);
928 if (list_empty(&npinfo->rx_np))
929 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
930 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
931 }
de85d99e 932
8fdd95ec
HX
933 if (atomic_dec_and_test(&npinfo->refcnt)) {
934 const struct net_device_ops *ops;
de85d99e 935
8fdd95ec
HX
936 ops = np->dev->netdev_ops;
937 if (ops->ndo_netpoll_cleanup)
938 ops->ndo_netpoll_cleanup(np->dev);
de85d99e 939
a9b3cd7f 940 RCU_INIT_POINTER(np->dev->npinfo, NULL);
38e6bc18
AW
941 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
942 }
943}
944EXPORT_SYMBOL_GPL(__netpoll_cleanup);
de85d99e 945
38e6bc18
AW
946static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
947{
948 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
93ec2c72 949
38e6bc18
AW
950 __netpoll_cleanup(np);
951 kfree(np);
952}
93ec2c72 953
38e6bc18
AW
954void __netpoll_free_rcu(struct netpoll *np)
955{
956 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
8fdd95ec 957}
38e6bc18 958EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
fbeec2e1 959
8fdd95ec
HX
960void netpoll_cleanup(struct netpoll *np)
961{
962 if (!np->dev)
963 return;
dbaa1541 964
8fdd95ec
HX
965 rtnl_lock();
966 __netpoll_cleanup(np);
967 rtnl_unlock();
968
969 dev_put(np->dev);
1da177e4
LT
970 np->dev = NULL;
971}
9e34a5b5 972EXPORT_SYMBOL(netpoll_cleanup);
1da177e4
LT
973
974int netpoll_trap(void)
975{
976 return atomic_read(&trapped);
977}
9e34a5b5 978EXPORT_SYMBOL(netpoll_trap);
1da177e4
LT
979
980void netpoll_set_trap(int trap)
981{
982 if (trap)
983 atomic_inc(&trapped);
984 else
985 atomic_dec(&trapped);
986}
1da177e4 987EXPORT_SYMBOL(netpoll_set_trap);
This page took 0.768814 seconds and 5 git commands to generate.