346b1eb83a1f0336ed1e9dcf4f5905b733022dff
2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/inet.h>
21 #include <linux/interrupt.h>
22 #include <linux/netpoll.h>
23 #include <linux/sched.h>
24 #include <linux/delay.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/if_vlan.h>
32 #include <asm/unaligned.h>
33 #include <trace/events/napi.h>
36 * We maintain a small pool of fully-sized skbs, to make sure the
37 * message gets out even in extreme OOM situations.
40 #define MAX_UDP_CHUNK 1460
43 static struct sk_buff_head skb_pool
;
45 static atomic_t trapped
;
47 #define USEC_PER_POLL 50
48 #define NETPOLL_RX_ENABLED 1
49 #define NETPOLL_RX_DROP 2
51 #define MAX_SKB_SIZE \
52 (sizeof(struct ethhdr) + \
53 sizeof(struct iphdr) + \
54 sizeof(struct udphdr) + \
57 static void zap_completion_queue(void);
58 static void netpoll_arp_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
);
60 static unsigned int carrier_timeout
= 4;
61 module_param(carrier_timeout
, uint
, 0644);
63 #define np_info(np, fmt, ...) \
64 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
65 #define np_err(np, fmt, ...) \
66 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_notice(np, fmt, ...) \
68 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
70 static void queue_process(struct work_struct
*work
)
72 struct netpoll_info
*npinfo
=
73 container_of(work
, struct netpoll_info
, tx_work
.work
);
77 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
78 struct net_device
*dev
= skb
->dev
;
79 const struct net_device_ops
*ops
= dev
->netdev_ops
;
80 struct netdev_queue
*txq
;
82 if (!netif_device_present(dev
) || !netif_running(dev
)) {
87 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
89 local_irq_save(flags
);
90 __netif_tx_lock(txq
, smp_processor_id());
91 if (netif_xmit_frozen_or_stopped(txq
) ||
92 ops
->ndo_start_xmit(skb
, dev
) != NETDEV_TX_OK
) {
93 skb_queue_head(&npinfo
->txq
, skb
);
94 __netif_tx_unlock(txq
);
95 local_irq_restore(flags
);
97 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
100 __netif_tx_unlock(txq
);
101 local_irq_restore(flags
);
105 static __sum16
checksum_udp(struct sk_buff
*skb
, struct udphdr
*uh
,
106 unsigned short ulen
, __be32 saddr
, __be32 daddr
)
110 if (uh
->check
== 0 || skb_csum_unnecessary(skb
))
113 psum
= csum_tcpudp_nofold(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
115 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&&
116 !csum_fold(csum_add(psum
, skb
->csum
)))
121 return __skb_checksum_complete(skb
);
125 * Check whether delayed processing was scheduled for our NIC. If so,
126 * we attempt to grab the poll lock and use ->poll() to pump the card.
127 * If this fails, either we've recursed in ->poll() or it's already
128 * running on another CPU.
130 * Note: we don't mask interrupts with this lock because we're using
131 * trylock here and interrupts are already disabled in the softirq
132 * case. Further, we test the poll_owner to avoid recursion on UP
133 * systems where the lock doesn't exist.
135 * In cases where there is bi-directional communications, reading only
136 * one message at a time can lead to packets being dropped by the
137 * network adapter, forcing superfluous retries and possibly timeouts.
138 * Thus, we set our budget to greater than 1.
140 static int poll_one_napi(struct netpoll_info
*npinfo
,
141 struct napi_struct
*napi
, int budget
)
145 /* net_rx_action's ->poll() invocations and our's are
146 * synchronized by this test which is only made while
147 * holding the napi->poll_lock.
149 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
152 npinfo
->rx_flags
|= NETPOLL_RX_DROP
;
153 atomic_inc(&trapped
);
154 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
156 work
= napi
->poll(napi
, budget
);
157 trace_napi_poll(napi
);
159 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
160 atomic_dec(&trapped
);
161 npinfo
->rx_flags
&= ~NETPOLL_RX_DROP
;
163 return budget
- work
;
166 static void poll_napi(struct net_device
*dev
)
168 struct napi_struct
*napi
;
171 WARN_ON_ONCE(!irqs_disabled());
173 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
175 if (napi
->poll_owner
!= smp_processor_id() &&
176 spin_trylock(&napi
->poll_lock
)) {
178 budget
= poll_one_napi(rcu_dereference_bh(dev
->npinfo
),
180 rcu_read_unlock_bh();
181 spin_unlock(&napi
->poll_lock
);
192 static void service_arp_queue(struct netpoll_info
*npi
)
197 while ((skb
= skb_dequeue(&npi
->arp_tx
)))
198 netpoll_arp_reply(skb
, npi
);
202 static void netpoll_poll_dev(struct net_device
*dev
)
204 const struct net_device_ops
*ops
;
205 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
207 if (!dev
|| !netif_running(dev
))
210 ops
= dev
->netdev_ops
;
211 if (!ops
->ndo_poll_controller
)
214 /* Process pending work on NIC */
215 ops
->ndo_poll_controller(dev
);
219 if (dev
->flags
& IFF_SLAVE
) {
221 struct net_device
*bond_dev
= dev
->master
;
223 struct netpoll_info
*bond_ni
= rcu_dereference_bh(bond_dev
->npinfo
);
224 while ((skb
= skb_dequeue(&ni
->arp_tx
))) {
226 skb_queue_tail(&bond_ni
->arp_tx
, skb
);
231 service_arp_queue(ni
);
233 zap_completion_queue();
236 static void refill_skbs(void)
241 spin_lock_irqsave(&skb_pool
.lock
, flags
);
242 while (skb_pool
.qlen
< MAX_SKBS
) {
243 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
247 __skb_queue_tail(&skb_pool
, skb
);
249 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
252 static void zap_completion_queue(void)
255 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
257 if (sd
->completion_queue
) {
258 struct sk_buff
*clist
;
260 local_irq_save(flags
);
261 clist
= sd
->completion_queue
;
262 sd
->completion_queue
= NULL
;
263 local_irq_restore(flags
);
265 while (clist
!= NULL
) {
266 struct sk_buff
*skb
= clist
;
268 if (skb
->destructor
) {
269 atomic_inc(&skb
->users
);
270 dev_kfree_skb_any(skb
); /* put this one back */
277 put_cpu_var(softnet_data
);
280 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
285 zap_completion_queue();
289 skb
= alloc_skb(len
, GFP_ATOMIC
);
291 skb
= skb_dequeue(&skb_pool
);
295 netpoll_poll_dev(np
->dev
);
301 atomic_set(&skb
->users
, 1);
302 skb_reserve(skb
, reserve
);
306 static int netpoll_owner_active(struct net_device
*dev
)
308 struct napi_struct
*napi
;
310 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
311 if (napi
->poll_owner
== smp_processor_id())
317 /* call with IRQ disabled */
318 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
319 struct net_device
*dev
)
321 int status
= NETDEV_TX_BUSY
;
323 const struct net_device_ops
*ops
= dev
->netdev_ops
;
324 /* It is up to the caller to keep npinfo alive. */
325 struct netpoll_info
*npinfo
;
327 WARN_ON_ONCE(!irqs_disabled());
329 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
330 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
335 /* don't get messages out of order, and no recursion */
336 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
337 struct netdev_queue
*txq
;
339 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
341 /* try until next clock tick */
342 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
343 tries
> 0; --tries
) {
344 if (__netif_tx_trylock(txq
)) {
345 if (!netif_xmit_stopped(txq
)) {
346 if (vlan_tx_tag_present(skb
) &&
347 !(netif_skb_features(skb
) & NETIF_F_HW_VLAN_TX
)) {
348 skb
= __vlan_put_tag(skb
, vlan_tx_tag_get(skb
));
354 status
= ops
->ndo_start_xmit(skb
, dev
);
355 if (status
== NETDEV_TX_OK
)
356 txq_trans_update(txq
);
358 __netif_tx_unlock(txq
);
360 if (status
== NETDEV_TX_OK
)
365 /* tickle device maybe there is some cleanup */
366 netpoll_poll_dev(np
->dev
);
368 udelay(USEC_PER_POLL
);
371 WARN_ONCE(!irqs_disabled(),
372 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
373 dev
->name
, ops
->ndo_start_xmit
);
377 if (status
!= NETDEV_TX_OK
) {
378 skb_queue_tail(&npinfo
->txq
, skb
);
379 schedule_delayed_work(&npinfo
->tx_work
,0);
382 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
384 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
386 int total_len
, ip_len
, udp_len
;
392 udp_len
= len
+ sizeof(*udph
);
393 ip_len
= udp_len
+ sizeof(*iph
);
394 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
396 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
401 skb_copy_to_linear_data(skb
, msg
, len
);
404 skb_push(skb
, sizeof(*udph
));
405 skb_reset_transport_header(skb
);
407 udph
->source
= htons(np
->local_port
);
408 udph
->dest
= htons(np
->remote_port
);
409 udph
->len
= htons(udp_len
);
411 udph
->check
= csum_tcpudp_magic(np
->local_ip
,
413 udp_len
, IPPROTO_UDP
,
414 csum_partial(udph
, udp_len
, 0));
415 if (udph
->check
== 0)
416 udph
->check
= CSUM_MANGLED_0
;
418 skb_push(skb
, sizeof(*iph
));
419 skb_reset_network_header(skb
);
422 /* iph->version = 4; iph->ihl = 5; */
423 put_unaligned(0x45, (unsigned char *)iph
);
425 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
429 iph
->protocol
= IPPROTO_UDP
;
431 put_unaligned(np
->local_ip
, &(iph
->saddr
));
432 put_unaligned(np
->remote_ip
, &(iph
->daddr
));
433 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
435 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
436 skb_reset_mac_header(skb
);
437 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
438 memcpy(eth
->h_source
, np
->dev
->dev_addr
, ETH_ALEN
);
439 memcpy(eth
->h_dest
, np
->remote_mac
, ETH_ALEN
);
443 netpoll_send_skb(np
, skb
);
445 EXPORT_SYMBOL(netpoll_send_udp
);
447 static void netpoll_arp_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
450 unsigned char *arp_ptr
;
451 int size
, type
= ARPOP_REPLY
, ptype
= ETH_P_ARP
;
454 struct sk_buff
*send_skb
;
455 struct netpoll
*np
, *tmp
;
460 if (list_empty(&npinfo
->rx_np
))
463 /* Before checking the packet, we do some early
464 inspection whether this is interesting at all */
465 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
466 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
467 if (np
->dev
== skb
->dev
)
470 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
472 /* No netpoll struct is using this dev */
476 /* No arp on this interface */
477 if (skb
->dev
->flags
& IFF_NOARP
)
480 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
483 skb_reset_network_header(skb
);
484 skb_reset_transport_header(skb
);
487 if ((arp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
488 arp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
489 arp
->ar_pro
!= htons(ETH_P_IP
) ||
490 arp
->ar_op
!= htons(ARPOP_REQUEST
))
493 arp_ptr
= (unsigned char *)(arp
+1);
494 /* save the location of the src hw addr */
496 arp_ptr
+= skb
->dev
->addr_len
;
497 memcpy(&sip
, arp_ptr
, 4);
499 /* If we actually cared about dst hw addr,
500 it would get copied here */
501 arp_ptr
+= skb
->dev
->addr_len
;
502 memcpy(&tip
, arp_ptr
, 4);
504 /* Should we ignore arp? */
505 if (ipv4_is_loopback(tip
) || ipv4_is_multicast(tip
))
508 size
= arp_hdr_len(skb
->dev
);
510 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
511 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
512 if (tip
!= np
->local_ip
)
515 hlen
= LL_RESERVED_SPACE(np
->dev
);
516 tlen
= np
->dev
->needed_tailroom
;
517 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
521 skb_reset_network_header(send_skb
);
522 arp
= (struct arphdr
*) skb_put(send_skb
, size
);
523 send_skb
->dev
= skb
->dev
;
524 send_skb
->protocol
= htons(ETH_P_ARP
);
526 /* Fill the device header for the ARP frame */
527 if (dev_hard_header(send_skb
, skb
->dev
, ptype
,
528 sha
, np
->dev
->dev_addr
,
529 send_skb
->len
) < 0) {
535 * Fill out the arp protocol part.
537 * we only support ethernet device type,
538 * which (according to RFC 1390) should
539 * always equal 1 (Ethernet).
542 arp
->ar_hrd
= htons(np
->dev
->type
);
543 arp
->ar_pro
= htons(ETH_P_IP
);
544 arp
->ar_hln
= np
->dev
->addr_len
;
546 arp
->ar_op
= htons(type
);
548 arp_ptr
= (unsigned char *)(arp
+ 1);
549 memcpy(arp_ptr
, np
->dev
->dev_addr
, np
->dev
->addr_len
);
550 arp_ptr
+= np
->dev
->addr_len
;
551 memcpy(arp_ptr
, &tip
, 4);
553 memcpy(arp_ptr
, sha
, np
->dev
->addr_len
);
554 arp_ptr
+= np
->dev
->addr_len
;
555 memcpy(arp_ptr
, &sip
, 4);
557 netpoll_send_skb(np
, send_skb
);
559 /* If there are several rx_hooks for the same address,
560 we're fine by sending a single reply */
563 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
566 int __netpoll_rx(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
568 int proto
, len
, ulen
;
570 const struct iphdr
*iph
;
572 struct netpoll
*np
, *tmp
;
574 if (list_empty(&npinfo
->rx_np
))
577 if (skb
->dev
->type
!= ARPHRD_ETHER
)
580 /* check if netpoll clients need ARP */
581 if (skb
->protocol
== htons(ETH_P_ARP
) &&
582 atomic_read(&trapped
)) {
583 skb_queue_tail(&npinfo
->arp_tx
, skb
);
587 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
588 skb
= vlan_untag(skb
);
593 proto
= ntohs(eth_hdr(skb
)->h_proto
);
594 if (proto
!= ETH_P_IP
)
596 if (skb
->pkt_type
== PACKET_OTHERHOST
)
601 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
603 iph
= (struct iphdr
*)skb
->data
;
604 if (iph
->ihl
< 5 || iph
->version
!= 4)
606 if (!pskb_may_pull(skb
, iph
->ihl
*4))
608 iph
= (struct iphdr
*)skb
->data
;
609 if (ip_fast_csum((u8
*)iph
, iph
->ihl
) != 0)
612 len
= ntohs(iph
->tot_len
);
613 if (skb
->len
< len
|| len
< iph
->ihl
*4)
617 * Our transport medium may have padded the buffer out.
618 * Now We trim to the true length of the frame.
620 if (pskb_trim_rcsum(skb
, len
))
623 iph
= (struct iphdr
*)skb
->data
;
624 if (iph
->protocol
!= IPPROTO_UDP
)
628 uh
= (struct udphdr
*)(((char *)iph
) + iph
->ihl
*4);
629 ulen
= ntohs(uh
->len
);
633 if (checksum_udp(skb
, uh
, ulen
, iph
->saddr
, iph
->daddr
))
636 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
637 if (np
->local_ip
&& np
->local_ip
!= iph
->daddr
)
639 if (np
->remote_ip
&& np
->remote_ip
!= iph
->saddr
)
641 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
644 np
->rx_hook(np
, ntohs(uh
->source
),
646 ulen
- sizeof(struct udphdr
));
657 if (atomic_read(&trapped
)) {
665 void netpoll_print_options(struct netpoll
*np
)
667 np_info(np
, "local port %d\n", np
->local_port
);
668 np_info(np
, "local IP %pI4\n", &np
->local_ip
);
669 np_info(np
, "interface '%s'\n", np
->dev_name
);
670 np_info(np
, "remote port %d\n", np
->remote_port
);
671 np_info(np
, "remote IP %pI4\n", &np
->remote_ip
);
672 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
674 EXPORT_SYMBOL(netpoll_print_options
);
676 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
678 char *cur
=opt
, *delim
;
681 if ((delim
= strchr(cur
, '@')) == NULL
)
684 np
->local_port
= simple_strtol(cur
, NULL
, 10);
690 if ((delim
= strchr(cur
, '/')) == NULL
)
693 np
->local_ip
= in_aton(cur
);
699 /* parse out dev name */
700 if ((delim
= strchr(cur
, ',')) == NULL
)
703 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
710 if ((delim
= strchr(cur
, '@')) == NULL
)
713 if (*cur
== ' ' || *cur
== '\t')
714 np_info(np
, "warning: whitespace is not allowed\n");
715 np
->remote_port
= simple_strtol(cur
, NULL
, 10);
721 if ((delim
= strchr(cur
, '/')) == NULL
)
724 np
->remote_ip
= in_aton(cur
);
729 if (!mac_pton(cur
, np
->remote_mac
))
733 netpoll_print_options(np
);
738 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
741 EXPORT_SYMBOL(netpoll_parse_options
);
743 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
, gfp_t gfp
)
745 struct netpoll_info
*npinfo
;
746 const struct net_device_ops
*ops
;
751 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
753 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
754 !ndev
->netdev_ops
->ndo_poll_controller
) {
755 np_err(np
, "%s doesn't support polling, aborting\n",
762 npinfo
= kmalloc(sizeof(*npinfo
), gfp
);
768 npinfo
->rx_flags
= 0;
769 INIT_LIST_HEAD(&npinfo
->rx_np
);
771 spin_lock_init(&npinfo
->rx_lock
);
772 skb_queue_head_init(&npinfo
->arp_tx
);
773 skb_queue_head_init(&npinfo
->txq
);
774 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
776 atomic_set(&npinfo
->refcnt
, 1);
778 ops
= np
->dev
->netdev_ops
;
779 if (ops
->ndo_netpoll_setup
) {
780 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
, gfp
);
785 npinfo
= ndev
->npinfo
;
786 atomic_inc(&npinfo
->refcnt
);
789 npinfo
->netpoll
= np
;
792 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
793 npinfo
->rx_flags
|= NETPOLL_RX_ENABLED
;
794 list_add_tail(&np
->rx
, &npinfo
->rx_np
);
795 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
798 /* last thing to do is link it to the net device structure */
799 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
808 EXPORT_SYMBOL_GPL(__netpoll_setup
);
810 int netpoll_setup(struct netpoll
*np
)
812 struct net_device
*ndev
= NULL
;
813 struct in_device
*in_dev
;
817 ndev
= dev_get_by_name(&init_net
, np
->dev_name
);
819 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
824 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
829 if (!netif_running(ndev
)) {
830 unsigned long atmost
, atleast
;
832 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
835 err
= dev_open(ndev
);
839 np_err(np
, "failed to open %s\n", ndev
->name
);
843 atleast
= jiffies
+ HZ
/10;
844 atmost
= jiffies
+ carrier_timeout
* HZ
;
845 while (!netif_carrier_ok(ndev
)) {
846 if (time_after(jiffies
, atmost
)) {
847 np_notice(np
, "timeout waiting for carrier\n");
853 /* If carrier appears to come up instantly, we don't
854 * trust it and pause so that we don't pump all our
855 * queued console messages into the bitbucket.
858 if (time_before(jiffies
, atleast
)) {
859 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
866 in_dev
= __in_dev_get_rcu(ndev
);
868 if (!in_dev
|| !in_dev
->ifa_list
) {
870 np_err(np
, "no IP address for %s, aborting\n",
876 np
->local_ip
= in_dev
->ifa_list
->ifa_local
;
878 np_info(np
, "local IP %pI4\n", &np
->local_ip
);
881 /* fill up the skb queue */
885 err
= __netpoll_setup(np
, ndev
, GFP_KERNEL
);
897 EXPORT_SYMBOL(netpoll_setup
);
899 static int __init
netpoll_init(void)
901 skb_queue_head_init(&skb_pool
);
904 core_initcall(netpoll_init
);
906 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
908 struct netpoll_info
*npinfo
=
909 container_of(rcu_head
, struct netpoll_info
, rcu
);
911 skb_queue_purge(&npinfo
->arp_tx
);
912 skb_queue_purge(&npinfo
->txq
);
914 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
915 cancel_delayed_work(&npinfo
->tx_work
);
917 /* clean after last, unfinished work */
918 __skb_queue_purge(&npinfo
->txq
);
919 /* now cancel it again */
920 cancel_delayed_work(&npinfo
->tx_work
);
924 void __netpoll_cleanup(struct netpoll
*np
)
926 struct netpoll_info
*npinfo
;
929 npinfo
= np
->dev
->npinfo
;
933 if (!list_empty(&npinfo
->rx_np
)) {
934 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
936 if (list_empty(&npinfo
->rx_np
))
937 npinfo
->rx_flags
&= ~NETPOLL_RX_ENABLED
;
938 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
941 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
942 const struct net_device_ops
*ops
;
944 ops
= np
->dev
->netdev_ops
;
945 if (ops
->ndo_netpoll_cleanup
)
946 ops
->ndo_netpoll_cleanup(np
->dev
);
948 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
949 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
952 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
954 static void rcu_cleanup_netpoll(struct rcu_head
*rcu_head
)
956 struct netpoll
*np
= container_of(rcu_head
, struct netpoll
, rcu
);
958 __netpoll_cleanup(np
);
962 void __netpoll_free_rcu(struct netpoll
*np
)
964 call_rcu_bh(&np
->rcu
, rcu_cleanup_netpoll
);
966 EXPORT_SYMBOL_GPL(__netpoll_free_rcu
);
968 void netpoll_cleanup(struct netpoll
*np
)
974 __netpoll_cleanup(np
);
980 EXPORT_SYMBOL(netpoll_cleanup
);
982 int netpoll_trap(void)
984 return atomic_read(&trapped
);
986 EXPORT_SYMBOL(netpoll_trap
);
988 void netpoll_set_trap(int trap
)
991 atomic_inc(&trapped
);
993 atomic_dec(&trapped
);
995 EXPORT_SYMBOL(netpoll_set_trap
);
This page took 0.108716 seconds and 4 git commands to generate.