Merge branch 'for-rafael' of https://git.kernel.org/pub/scm/linux/kernel/git/mzx...
[deliverable/linux.git] / net / core / netpoll.c
1 /*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
31 #include <net/tcp.h>
32 #include <net/udp.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
38
39 /*
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
42 */
43
44 #define MAX_UDP_CHUNK 1460
45 #define MAX_SKBS 32
46
47 static struct sk_buff_head skb_pool;
48
49 DEFINE_STATIC_SRCU(netpoll_srcu);
50
51 #define USEC_PER_POLL 50
52
53 #define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
57 MAX_UDP_CHUNK)
58
59 static void zap_completion_queue(void);
60 static void netpoll_async_cleanup(struct work_struct *work);
61
62 static unsigned int carrier_timeout = 4;
63 module_param(carrier_timeout, uint, 0644);
64
65 #define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69 #define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
72 static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74 {
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
77
78 features = netif_skb_features(skb);
79
80 if (skb_vlan_tag_present(skb) &&
81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
82 skb = __vlan_hwaccel_push_inside(skb);
83 if (unlikely(!skb)) {
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
87 */
88 goto out;
89 }
90 }
91
92 status = netdev_start_xmit(skb, dev, txq, false);
93
94 out:
95 return status;
96 }
97
98 static void queue_process(struct work_struct *work)
99 {
100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
102 struct sk_buff *skb;
103 unsigned long flags;
104
105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq;
108
109 if (!netif_device_present(dev) || !netif_running(dev)) {
110 kfree_skb(skb);
111 continue;
112 }
113
114 txq = skb_get_tx_queue(dev, skb);
115
116 local_irq_save(flags);
117 HARD_TX_LOCK(dev, txq, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq) ||
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
120 skb_queue_head(&npinfo->txq, skb);
121 HARD_TX_UNLOCK(dev, txq);
122 local_irq_restore(flags);
123
124 schedule_delayed_work(&npinfo->tx_work, HZ/10);
125 return;
126 }
127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags);
129 }
130 }
131
132 /*
133 * Check whether delayed processing was scheduled for our NIC. If so,
134 * we attempt to grab the poll lock and use ->poll() to pump the card.
135 * If this fails, either we've recursed in ->poll() or it's already
136 * running on another CPU.
137 *
138 * Note: we don't mask interrupts with this lock because we're using
139 * trylock here and interrupts are already disabled in the softirq
140 * case. Further, we test the poll_owner to avoid recursion on UP
141 * systems where the lock doesn't exist.
142 */
143 static int poll_one_napi(struct napi_struct *napi, int budget)
144 {
145 int work;
146
147 /* net_rx_action's ->poll() invocations and our's are
148 * synchronized by this test which is only made while
149 * holding the napi->poll_lock.
150 */
151 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
152 return budget;
153
154 set_bit(NAPI_STATE_NPSVC, &napi->state);
155
156 work = napi->poll(napi, budget);
157 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
158 trace_napi_poll(napi);
159
160 clear_bit(NAPI_STATE_NPSVC, &napi->state);
161
162 return budget - work;
163 }
164
165 static void poll_napi(struct net_device *dev, int budget)
166 {
167 struct napi_struct *napi;
168
169 list_for_each_entry(napi, &dev->napi_list, dev_list) {
170 if (napi->poll_owner != smp_processor_id() &&
171 spin_trylock(&napi->poll_lock)) {
172 budget = poll_one_napi(napi, budget);
173 spin_unlock(&napi->poll_lock);
174 }
175 }
176 }
177
178 static void netpoll_poll_dev(struct net_device *dev)
179 {
180 const struct net_device_ops *ops;
181 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
182 int budget = 0;
183
184 /* Don't do any rx activity if the dev_lock mutex is held
185 * the dev_open/close paths use this to block netpoll activity
186 * while changing device state
187 */
188 if (down_trylock(&ni->dev_lock))
189 return;
190
191 if (!netif_running(dev)) {
192 up(&ni->dev_lock);
193 return;
194 }
195
196 ops = dev->netdev_ops;
197 if (!ops->ndo_poll_controller) {
198 up(&ni->dev_lock);
199 return;
200 }
201
202 /* Process pending work on NIC */
203 ops->ndo_poll_controller(dev);
204
205 poll_napi(dev, budget);
206
207 up(&ni->dev_lock);
208
209 zap_completion_queue();
210 }
211
212 void netpoll_poll_disable(struct net_device *dev)
213 {
214 struct netpoll_info *ni;
215 int idx;
216 might_sleep();
217 idx = srcu_read_lock(&netpoll_srcu);
218 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
219 if (ni)
220 down(&ni->dev_lock);
221 srcu_read_unlock(&netpoll_srcu, idx);
222 }
223 EXPORT_SYMBOL(netpoll_poll_disable);
224
225 void netpoll_poll_enable(struct net_device *dev)
226 {
227 struct netpoll_info *ni;
228 rcu_read_lock();
229 ni = rcu_dereference(dev->npinfo);
230 if (ni)
231 up(&ni->dev_lock);
232 rcu_read_unlock();
233 }
234 EXPORT_SYMBOL(netpoll_poll_enable);
235
236 static void refill_skbs(void)
237 {
238 struct sk_buff *skb;
239 unsigned long flags;
240
241 spin_lock_irqsave(&skb_pool.lock, flags);
242 while (skb_pool.qlen < MAX_SKBS) {
243 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
244 if (!skb)
245 break;
246
247 __skb_queue_tail(&skb_pool, skb);
248 }
249 spin_unlock_irqrestore(&skb_pool.lock, flags);
250 }
251
252 static void zap_completion_queue(void)
253 {
254 unsigned long flags;
255 struct softnet_data *sd = &get_cpu_var(softnet_data);
256
257 if (sd->completion_queue) {
258 struct sk_buff *clist;
259
260 local_irq_save(flags);
261 clist = sd->completion_queue;
262 sd->completion_queue = NULL;
263 local_irq_restore(flags);
264
265 while (clist != NULL) {
266 struct sk_buff *skb = clist;
267 clist = clist->next;
268 if (!skb_irq_freeable(skb)) {
269 atomic_inc(&skb->users);
270 dev_kfree_skb_any(skb); /* put this one back */
271 } else {
272 __kfree_skb(skb);
273 }
274 }
275 }
276
277 put_cpu_var(softnet_data);
278 }
279
280 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
281 {
282 int count = 0;
283 struct sk_buff *skb;
284
285 zap_completion_queue();
286 refill_skbs();
287 repeat:
288
289 skb = alloc_skb(len, GFP_ATOMIC);
290 if (!skb)
291 skb = skb_dequeue(&skb_pool);
292
293 if (!skb) {
294 if (++count < 10) {
295 netpoll_poll_dev(np->dev);
296 goto repeat;
297 }
298 return NULL;
299 }
300
301 atomic_set(&skb->users, 1);
302 skb_reserve(skb, reserve);
303 return skb;
304 }
305
306 static int netpoll_owner_active(struct net_device *dev)
307 {
308 struct napi_struct *napi;
309
310 list_for_each_entry(napi, &dev->napi_list, dev_list) {
311 if (napi->poll_owner == smp_processor_id())
312 return 1;
313 }
314 return 0;
315 }
316
317 /* call with IRQ disabled */
318 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
319 struct net_device *dev)
320 {
321 int status = NETDEV_TX_BUSY;
322 unsigned long tries;
323 /* It is up to the caller to keep npinfo alive. */
324 struct netpoll_info *npinfo;
325
326 WARN_ON_ONCE(!irqs_disabled());
327
328 npinfo = rcu_dereference_bh(np->dev->npinfo);
329 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
330 dev_kfree_skb_irq(skb);
331 return;
332 }
333
334 /* don't get messages out of order, and no recursion */
335 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
336 struct netdev_queue *txq;
337
338 txq = netdev_pick_tx(dev, skb, NULL);
339
340 /* try until next clock tick */
341 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
342 tries > 0; --tries) {
343 if (HARD_TX_TRYLOCK(dev, txq)) {
344 if (!netif_xmit_stopped(txq))
345 status = netpoll_start_xmit(skb, dev, txq);
346
347 HARD_TX_UNLOCK(dev, txq);
348
349 if (status == NETDEV_TX_OK)
350 break;
351
352 }
353
354 /* tickle device maybe there is some cleanup */
355 netpoll_poll_dev(np->dev);
356
357 udelay(USEC_PER_POLL);
358 }
359
360 WARN_ONCE(!irqs_disabled(),
361 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
362 dev->name, dev->netdev_ops->ndo_start_xmit);
363
364 }
365
366 if (status != NETDEV_TX_OK) {
367 skb_queue_tail(&npinfo->txq, skb);
368 schedule_delayed_work(&npinfo->tx_work,0);
369 }
370 }
371 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
372
373 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
374 {
375 int total_len, ip_len, udp_len;
376 struct sk_buff *skb;
377 struct udphdr *udph;
378 struct iphdr *iph;
379 struct ethhdr *eth;
380 static atomic_t ip_ident;
381 struct ipv6hdr *ip6h;
382
383 WARN_ON_ONCE(!irqs_disabled());
384
385 udp_len = len + sizeof(*udph);
386 if (np->ipv6)
387 ip_len = udp_len + sizeof(*ip6h);
388 else
389 ip_len = udp_len + sizeof(*iph);
390
391 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
392
393 skb = find_skb(np, total_len + np->dev->needed_tailroom,
394 total_len - len);
395 if (!skb)
396 return;
397
398 skb_copy_to_linear_data(skb, msg, len);
399 skb_put(skb, len);
400
401 skb_push(skb, sizeof(*udph));
402 skb_reset_transport_header(skb);
403 udph = udp_hdr(skb);
404 udph->source = htons(np->local_port);
405 udph->dest = htons(np->remote_port);
406 udph->len = htons(udp_len);
407
408 if (np->ipv6) {
409 udph->check = 0;
410 udph->check = csum_ipv6_magic(&np->local_ip.in6,
411 &np->remote_ip.in6,
412 udp_len, IPPROTO_UDP,
413 csum_partial(udph, udp_len, 0));
414 if (udph->check == 0)
415 udph->check = CSUM_MANGLED_0;
416
417 skb_push(skb, sizeof(*ip6h));
418 skb_reset_network_header(skb);
419 ip6h = ipv6_hdr(skb);
420
421 /* ip6h->version = 6; ip6h->priority = 0; */
422 put_unaligned(0x60, (unsigned char *)ip6h);
423 ip6h->flow_lbl[0] = 0;
424 ip6h->flow_lbl[1] = 0;
425 ip6h->flow_lbl[2] = 0;
426
427 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
428 ip6h->nexthdr = IPPROTO_UDP;
429 ip6h->hop_limit = 32;
430 ip6h->saddr = np->local_ip.in6;
431 ip6h->daddr = np->remote_ip.in6;
432
433 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
434 skb_reset_mac_header(skb);
435 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
436 } else {
437 udph->check = 0;
438 udph->check = csum_tcpudp_magic(np->local_ip.ip,
439 np->remote_ip.ip,
440 udp_len, IPPROTO_UDP,
441 csum_partial(udph, udp_len, 0));
442 if (udph->check == 0)
443 udph->check = CSUM_MANGLED_0;
444
445 skb_push(skb, sizeof(*iph));
446 skb_reset_network_header(skb);
447 iph = ip_hdr(skb);
448
449 /* iph->version = 4; iph->ihl = 5; */
450 put_unaligned(0x45, (unsigned char *)iph);
451 iph->tos = 0;
452 put_unaligned(htons(ip_len), &(iph->tot_len));
453 iph->id = htons(atomic_inc_return(&ip_ident));
454 iph->frag_off = 0;
455 iph->ttl = 64;
456 iph->protocol = IPPROTO_UDP;
457 iph->check = 0;
458 put_unaligned(np->local_ip.ip, &(iph->saddr));
459 put_unaligned(np->remote_ip.ip, &(iph->daddr));
460 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
461
462 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
463 skb_reset_mac_header(skb);
464 skb->protocol = eth->h_proto = htons(ETH_P_IP);
465 }
466
467 ether_addr_copy(eth->h_source, np->dev->dev_addr);
468 ether_addr_copy(eth->h_dest, np->remote_mac);
469
470 skb->dev = np->dev;
471
472 netpoll_send_skb(np, skb);
473 }
474 EXPORT_SYMBOL(netpoll_send_udp);
475
476 void netpoll_print_options(struct netpoll *np)
477 {
478 np_info(np, "local port %d\n", np->local_port);
479 if (np->ipv6)
480 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
481 else
482 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
483 np_info(np, "interface '%s'\n", np->dev_name);
484 np_info(np, "remote port %d\n", np->remote_port);
485 if (np->ipv6)
486 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
487 else
488 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
489 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
490 }
491 EXPORT_SYMBOL(netpoll_print_options);
492
493 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
494 {
495 const char *end;
496
497 if (!strchr(str, ':') &&
498 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
499 if (!*end)
500 return 0;
501 }
502 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
503 #if IS_ENABLED(CONFIG_IPV6)
504 if (!*end)
505 return 1;
506 #else
507 return -1;
508 #endif
509 }
510 return -1;
511 }
512
513 int netpoll_parse_options(struct netpoll *np, char *opt)
514 {
515 char *cur=opt, *delim;
516 int ipv6;
517 bool ipversion_set = false;
518
519 if (*cur != '@') {
520 if ((delim = strchr(cur, '@')) == NULL)
521 goto parse_failed;
522 *delim = 0;
523 if (kstrtou16(cur, 10, &np->local_port))
524 goto parse_failed;
525 cur = delim;
526 }
527 cur++;
528
529 if (*cur != '/') {
530 ipversion_set = true;
531 if ((delim = strchr(cur, '/')) == NULL)
532 goto parse_failed;
533 *delim = 0;
534 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
535 if (ipv6 < 0)
536 goto parse_failed;
537 else
538 np->ipv6 = (bool)ipv6;
539 cur = delim;
540 }
541 cur++;
542
543 if (*cur != ',') {
544 /* parse out dev name */
545 if ((delim = strchr(cur, ',')) == NULL)
546 goto parse_failed;
547 *delim = 0;
548 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
549 cur = delim;
550 }
551 cur++;
552
553 if (*cur != '@') {
554 /* dst port */
555 if ((delim = strchr(cur, '@')) == NULL)
556 goto parse_failed;
557 *delim = 0;
558 if (*cur == ' ' || *cur == '\t')
559 np_info(np, "warning: whitespace is not allowed\n");
560 if (kstrtou16(cur, 10, &np->remote_port))
561 goto parse_failed;
562 cur = delim;
563 }
564 cur++;
565
566 /* dst ip */
567 if ((delim = strchr(cur, '/')) == NULL)
568 goto parse_failed;
569 *delim = 0;
570 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
571 if (ipv6 < 0)
572 goto parse_failed;
573 else if (ipversion_set && np->ipv6 != (bool)ipv6)
574 goto parse_failed;
575 else
576 np->ipv6 = (bool)ipv6;
577 cur = delim + 1;
578
579 if (*cur != 0) {
580 /* MAC address */
581 if (!mac_pton(cur, np->remote_mac))
582 goto parse_failed;
583 }
584
585 netpoll_print_options(np);
586
587 return 0;
588
589 parse_failed:
590 np_info(np, "couldn't parse config at '%s'!\n", cur);
591 return -1;
592 }
593 EXPORT_SYMBOL(netpoll_parse_options);
594
595 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
596 {
597 struct netpoll_info *npinfo;
598 const struct net_device_ops *ops;
599 int err;
600
601 np->dev = ndev;
602 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
603 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
604
605 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
606 !ndev->netdev_ops->ndo_poll_controller) {
607 np_err(np, "%s doesn't support polling, aborting\n",
608 np->dev_name);
609 err = -ENOTSUPP;
610 goto out;
611 }
612
613 if (!ndev->npinfo) {
614 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
615 if (!npinfo) {
616 err = -ENOMEM;
617 goto out;
618 }
619
620 sema_init(&npinfo->dev_lock, 1);
621 skb_queue_head_init(&npinfo->txq);
622 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
623
624 atomic_set(&npinfo->refcnt, 1);
625
626 ops = np->dev->netdev_ops;
627 if (ops->ndo_netpoll_setup) {
628 err = ops->ndo_netpoll_setup(ndev, npinfo);
629 if (err)
630 goto free_npinfo;
631 }
632 } else {
633 npinfo = rtnl_dereference(ndev->npinfo);
634 atomic_inc(&npinfo->refcnt);
635 }
636
637 npinfo->netpoll = np;
638
639 /* last thing to do is link it to the net device structure */
640 rcu_assign_pointer(ndev->npinfo, npinfo);
641
642 return 0;
643
644 free_npinfo:
645 kfree(npinfo);
646 out:
647 return err;
648 }
649 EXPORT_SYMBOL_GPL(__netpoll_setup);
650
651 int netpoll_setup(struct netpoll *np)
652 {
653 struct net_device *ndev = NULL;
654 struct in_device *in_dev;
655 int err;
656
657 rtnl_lock();
658 if (np->dev_name) {
659 struct net *net = current->nsproxy->net_ns;
660 ndev = __dev_get_by_name(net, np->dev_name);
661 }
662 if (!ndev) {
663 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
664 err = -ENODEV;
665 goto unlock;
666 }
667 dev_hold(ndev);
668
669 if (netdev_master_upper_dev_get(ndev)) {
670 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
671 err = -EBUSY;
672 goto put;
673 }
674
675 if (!netif_running(ndev)) {
676 unsigned long atmost, atleast;
677
678 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
679
680 err = dev_open(ndev);
681
682 if (err) {
683 np_err(np, "failed to open %s\n", ndev->name);
684 goto put;
685 }
686
687 rtnl_unlock();
688 atleast = jiffies + HZ/10;
689 atmost = jiffies + carrier_timeout * HZ;
690 while (!netif_carrier_ok(ndev)) {
691 if (time_after(jiffies, atmost)) {
692 np_notice(np, "timeout waiting for carrier\n");
693 break;
694 }
695 msleep(1);
696 }
697
698 /* If carrier appears to come up instantly, we don't
699 * trust it and pause so that we don't pump all our
700 * queued console messages into the bitbucket.
701 */
702
703 if (time_before(jiffies, atleast)) {
704 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
705 msleep(4000);
706 }
707 rtnl_lock();
708 }
709
710 if (!np->local_ip.ip) {
711 if (!np->ipv6) {
712 in_dev = __in_dev_get_rtnl(ndev);
713
714 if (!in_dev || !in_dev->ifa_list) {
715 np_err(np, "no IP address for %s, aborting\n",
716 np->dev_name);
717 err = -EDESTADDRREQ;
718 goto put;
719 }
720
721 np->local_ip.ip = in_dev->ifa_list->ifa_local;
722 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
723 } else {
724 #if IS_ENABLED(CONFIG_IPV6)
725 struct inet6_dev *idev;
726
727 err = -EDESTADDRREQ;
728 idev = __in6_dev_get(ndev);
729 if (idev) {
730 struct inet6_ifaddr *ifp;
731
732 read_lock_bh(&idev->lock);
733 list_for_each_entry(ifp, &idev->addr_list, if_list) {
734 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
735 continue;
736 np->local_ip.in6 = ifp->addr;
737 err = 0;
738 break;
739 }
740 read_unlock_bh(&idev->lock);
741 }
742 if (err) {
743 np_err(np, "no IPv6 address for %s, aborting\n",
744 np->dev_name);
745 goto put;
746 } else
747 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
748 #else
749 np_err(np, "IPv6 is not supported %s, aborting\n",
750 np->dev_name);
751 err = -EINVAL;
752 goto put;
753 #endif
754 }
755 }
756
757 /* fill up the skb queue */
758 refill_skbs();
759
760 err = __netpoll_setup(np, ndev);
761 if (err)
762 goto put;
763
764 rtnl_unlock();
765 return 0;
766
767 put:
768 dev_put(ndev);
769 unlock:
770 rtnl_unlock();
771 return err;
772 }
773 EXPORT_SYMBOL(netpoll_setup);
774
775 static int __init netpoll_init(void)
776 {
777 skb_queue_head_init(&skb_pool);
778 return 0;
779 }
780 core_initcall(netpoll_init);
781
782 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
783 {
784 struct netpoll_info *npinfo =
785 container_of(rcu_head, struct netpoll_info, rcu);
786
787 skb_queue_purge(&npinfo->txq);
788
789 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
790 cancel_delayed_work(&npinfo->tx_work);
791
792 /* clean after last, unfinished work */
793 __skb_queue_purge(&npinfo->txq);
794 /* now cancel it again */
795 cancel_delayed_work(&npinfo->tx_work);
796 kfree(npinfo);
797 }
798
799 void __netpoll_cleanup(struct netpoll *np)
800 {
801 struct netpoll_info *npinfo;
802
803 /* rtnl_dereference would be preferable here but
804 * rcu_cleanup_netpoll path can put us in here safely without
805 * holding the rtnl, so plain rcu_dereference it is
806 */
807 npinfo = rtnl_dereference(np->dev->npinfo);
808 if (!npinfo)
809 return;
810
811 synchronize_srcu(&netpoll_srcu);
812
813 if (atomic_dec_and_test(&npinfo->refcnt)) {
814 const struct net_device_ops *ops;
815
816 ops = np->dev->netdev_ops;
817 if (ops->ndo_netpoll_cleanup)
818 ops->ndo_netpoll_cleanup(np->dev);
819
820 RCU_INIT_POINTER(np->dev->npinfo, NULL);
821 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
822 } else
823 RCU_INIT_POINTER(np->dev->npinfo, NULL);
824 }
825 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
826
827 static void netpoll_async_cleanup(struct work_struct *work)
828 {
829 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
830
831 rtnl_lock();
832 __netpoll_cleanup(np);
833 rtnl_unlock();
834 kfree(np);
835 }
836
837 void __netpoll_free_async(struct netpoll *np)
838 {
839 schedule_work(&np->cleanup_work);
840 }
841 EXPORT_SYMBOL_GPL(__netpoll_free_async);
842
843 void netpoll_cleanup(struct netpoll *np)
844 {
845 rtnl_lock();
846 if (!np->dev)
847 goto out;
848 __netpoll_cleanup(np);
849 dev_put(np->dev);
850 np->dev = NULL;
851 out:
852 rtnl_unlock();
853 }
854 EXPORT_SYMBOL(netpoll_cleanup);
This page took 0.071951 seconds and 5 git commands to generate.