2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/string.h>
18 #include <linux/if_arp.h>
19 #include <linux/inetdevice.h>
20 #include <linux/inet.h>
21 #include <linux/interrupt.h>
22 #include <linux/netpoll.h>
23 #include <linux/sched.h>
24 #include <linux/delay.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29 #include <linux/if_vlan.h>
32 #include <asm/unaligned.h>
33 #include <trace/events/napi.h>
36 * We maintain a small pool of fully-sized skbs, to make sure the
37 * message gets out even in extreme OOM situations.
40 #define MAX_UDP_CHUNK 1460
43 static struct sk_buff_head skb_pool
;
45 static atomic_t trapped
;
47 #define USEC_PER_POLL 50
48 #define NETPOLL_RX_ENABLED 1
49 #define NETPOLL_RX_DROP 2
51 #define MAX_SKB_SIZE \
52 (sizeof(struct ethhdr) + \
53 sizeof(struct iphdr) + \
54 sizeof(struct udphdr) + \
57 static void zap_completion_queue(void);
58 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
);
60 static unsigned int carrier_timeout
= 4;
61 module_param(carrier_timeout
, uint
, 0644);
63 #define np_info(np, fmt, ...) \
64 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
65 #define np_err(np, fmt, ...) \
66 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
67 #define np_notice(np, fmt, ...) \
68 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
70 static void queue_process(struct work_struct
*work
)
72 struct netpoll_info
*npinfo
=
73 container_of(work
, struct netpoll_info
, tx_work
.work
);
77 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
78 struct net_device
*dev
= skb
->dev
;
79 const struct net_device_ops
*ops
= dev
->netdev_ops
;
80 struct netdev_queue
*txq
;
82 if (!netif_device_present(dev
) || !netif_running(dev
)) {
87 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
89 local_irq_save(flags
);
90 __netif_tx_lock(txq
, smp_processor_id());
91 if (netif_xmit_frozen_or_stopped(txq
) ||
92 ops
->ndo_start_xmit(skb
, dev
) != NETDEV_TX_OK
) {
93 skb_queue_head(&npinfo
->txq
, skb
);
94 __netif_tx_unlock(txq
);
95 local_irq_restore(flags
);
97 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
100 __netif_tx_unlock(txq
);
101 local_irq_restore(flags
);
105 static __sum16
checksum_udp(struct sk_buff
*skb
, struct udphdr
*uh
,
106 unsigned short ulen
, __be32 saddr
, __be32 daddr
)
110 if (uh
->check
== 0 || skb_csum_unnecessary(skb
))
113 psum
= csum_tcpudp_nofold(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
115 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&&
116 !csum_fold(csum_add(psum
, skb
->csum
)))
121 return __skb_checksum_complete(skb
);
125 * Check whether delayed processing was scheduled for our NIC. If so,
126 * we attempt to grab the poll lock and use ->poll() to pump the card.
127 * If this fails, either we've recursed in ->poll() or it's already
128 * running on another CPU.
130 * Note: we don't mask interrupts with this lock because we're using
131 * trylock here and interrupts are already disabled in the softirq
132 * case. Further, we test the poll_owner to avoid recursion on UP
133 * systems where the lock doesn't exist.
135 * In cases where there is bi-directional communications, reading only
136 * one message at a time can lead to packets being dropped by the
137 * network adapter, forcing superfluous retries and possibly timeouts.
138 * Thus, we set our budget to greater than 1.
140 static int poll_one_napi(struct netpoll_info
*npinfo
,
141 struct napi_struct
*napi
, int budget
)
145 /* net_rx_action's ->poll() invocations and our's are
146 * synchronized by this test which is only made while
147 * holding the napi->poll_lock.
149 if (!test_bit(NAPI_STATE_SCHED
, &napi
->state
))
152 npinfo
->rx_flags
|= NETPOLL_RX_DROP
;
153 atomic_inc(&trapped
);
154 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
156 work
= napi
->poll(napi
, budget
);
157 trace_napi_poll(napi
);
159 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
160 atomic_dec(&trapped
);
161 npinfo
->rx_flags
&= ~NETPOLL_RX_DROP
;
163 return budget
- work
;
166 static void poll_napi(struct net_device
*dev
)
168 struct napi_struct
*napi
;
171 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
172 if (napi
->poll_owner
!= smp_processor_id() &&
173 spin_trylock(&napi
->poll_lock
)) {
174 budget
= poll_one_napi(rcu_dereference_bh(dev
->npinfo
),
176 spin_unlock(&napi
->poll_lock
);
184 static void service_neigh_queue(struct netpoll_info
*npi
)
189 while ((skb
= skb_dequeue(&npi
->neigh_tx
)))
190 netpoll_neigh_reply(skb
, npi
);
194 static void netpoll_poll_dev(struct net_device
*dev
)
196 const struct net_device_ops
*ops
;
197 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
199 if (!dev
|| !netif_running(dev
))
202 ops
= dev
->netdev_ops
;
203 if (!ops
->ndo_poll_controller
)
206 /* Process pending work on NIC */
207 ops
->ndo_poll_controller(dev
);
211 if (dev
->flags
& IFF_SLAVE
) {
213 struct net_device
*bond_dev
;
215 struct netpoll_info
*bond_ni
;
217 bond_dev
= netdev_master_upper_dev_get_rcu(dev
);
218 bond_ni
= rcu_dereference_bh(bond_dev
->npinfo
);
219 while ((skb
= skb_dequeue(&ni
->neigh_tx
))) {
221 skb_queue_tail(&bond_ni
->neigh_tx
, skb
);
226 service_neigh_queue(ni
);
228 zap_completion_queue();
231 static void refill_skbs(void)
236 spin_lock_irqsave(&skb_pool
.lock
, flags
);
237 while (skb_pool
.qlen
< MAX_SKBS
) {
238 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
242 __skb_queue_tail(&skb_pool
, skb
);
244 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
247 static void zap_completion_queue(void)
250 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
252 if (sd
->completion_queue
) {
253 struct sk_buff
*clist
;
255 local_irq_save(flags
);
256 clist
= sd
->completion_queue
;
257 sd
->completion_queue
= NULL
;
258 local_irq_restore(flags
);
260 while (clist
!= NULL
) {
261 struct sk_buff
*skb
= clist
;
263 if (skb
->destructor
) {
264 atomic_inc(&skb
->users
);
265 dev_kfree_skb_any(skb
); /* put this one back */
272 put_cpu_var(softnet_data
);
275 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
280 zap_completion_queue();
284 skb
= alloc_skb(len
, GFP_ATOMIC
);
286 skb
= skb_dequeue(&skb_pool
);
290 netpoll_poll_dev(np
->dev
);
296 atomic_set(&skb
->users
, 1);
297 skb_reserve(skb
, reserve
);
301 static int netpoll_owner_active(struct net_device
*dev
)
303 struct napi_struct
*napi
;
305 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
306 if (napi
->poll_owner
== smp_processor_id())
312 /* call with IRQ disabled */
313 void netpoll_send_skb_on_dev(struct netpoll
*np
, struct sk_buff
*skb
,
314 struct net_device
*dev
)
316 int status
= NETDEV_TX_BUSY
;
318 const struct net_device_ops
*ops
= dev
->netdev_ops
;
319 /* It is up to the caller to keep npinfo alive. */
320 struct netpoll_info
*npinfo
;
322 WARN_ON_ONCE(!irqs_disabled());
324 npinfo
= rcu_dereference_bh(np
->dev
->npinfo
);
325 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
330 /* don't get messages out of order, and no recursion */
331 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
332 struct netdev_queue
*txq
;
334 txq
= netdev_pick_tx(dev
, skb
);
336 /* try until next clock tick */
337 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
338 tries
> 0; --tries
) {
339 if (__netif_tx_trylock(txq
)) {
340 if (!netif_xmit_stopped(txq
)) {
341 if (vlan_tx_tag_present(skb
) &&
342 !(netif_skb_features(skb
) & NETIF_F_HW_VLAN_TX
)) {
343 skb
= __vlan_put_tag(skb
, vlan_tx_tag_get(skb
));
349 status
= ops
->ndo_start_xmit(skb
, dev
);
350 if (status
== NETDEV_TX_OK
)
351 txq_trans_update(txq
);
353 __netif_tx_unlock(txq
);
355 if (status
== NETDEV_TX_OK
)
360 /* tickle device maybe there is some cleanup */
361 netpoll_poll_dev(np
->dev
);
363 udelay(USEC_PER_POLL
);
366 WARN_ONCE(!irqs_disabled(),
367 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
368 dev
->name
, ops
->ndo_start_xmit
);
372 if (status
!= NETDEV_TX_OK
) {
373 skb_queue_tail(&npinfo
->txq
, skb
);
374 schedule_delayed_work(&npinfo
->tx_work
,0);
377 EXPORT_SYMBOL(netpoll_send_skb_on_dev
);
379 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
381 int total_len
, ip_len
, udp_len
;
386 static atomic_t ip_ident
;
388 udp_len
= len
+ sizeof(*udph
);
390 ip_len
= udp_len
+ sizeof(*iph
);
392 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
394 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
399 skb_copy_to_linear_data(skb
, msg
, len
);
402 skb_push(skb
, sizeof(*udph
));
403 skb_reset_transport_header(skb
);
405 udph
->source
= htons(np
->local_port
);
406 udph
->dest
= htons(np
->remote_port
);
407 udph
->len
= htons(udp_len
);
411 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
413 udp_len
, IPPROTO_UDP
,
414 csum_partial(udph
, udp_len
, 0));
415 if (udph
->check
== 0)
416 udph
->check
= CSUM_MANGLED_0
;
418 skb_push(skb
, sizeof(*iph
));
419 skb_reset_network_header(skb
);
422 /* iph->version = 4; iph->ihl = 5; */
423 put_unaligned(0x45, (unsigned char *)iph
);
425 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
426 iph
->id
= htons(atomic_inc_return(&ip_ident
));
429 iph
->protocol
= IPPROTO_UDP
;
431 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
432 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
433 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
435 eth
= (struct ethhdr
*) skb_push(skb
, ETH_HLEN
);
436 skb_reset_mac_header(skb
);
437 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
440 memcpy(eth
->h_source
, np
->dev
->dev_addr
, ETH_ALEN
);
441 memcpy(eth
->h_dest
, np
->remote_mac
, ETH_ALEN
);
445 netpoll_send_skb(np
, skb
);
447 EXPORT_SYMBOL(netpoll_send_udp
);
449 static void netpoll_neigh_reply(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
452 unsigned char *arp_ptr
;
453 int size
, type
= ARPOP_REPLY
, ptype
= ETH_P_ARP
;
456 struct sk_buff
*send_skb
;
457 struct netpoll
*np
, *tmp
;
462 if (list_empty(&npinfo
->rx_np
))
465 /* Before checking the packet, we do some early
466 inspection whether this is interesting at all */
467 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
468 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
469 if (np
->dev
== skb
->dev
)
472 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
474 /* No netpoll struct is using this dev */
478 proto
= ntohs(eth_hdr(skb
)->h_proto
);
479 if (proto
== ETH_P_IP
) {
480 /* No arp on this interface */
481 if (skb
->dev
->flags
& IFF_NOARP
)
484 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
487 skb_reset_network_header(skb
);
488 skb_reset_transport_header(skb
);
491 if ((arp
->ar_hrd
!= htons(ARPHRD_ETHER
) &&
492 arp
->ar_hrd
!= htons(ARPHRD_IEEE802
)) ||
493 arp
->ar_pro
!= htons(ETH_P_IP
) ||
494 arp
->ar_op
!= htons(ARPOP_REQUEST
))
497 arp_ptr
= (unsigned char *)(arp
+1);
498 /* save the location of the src hw addr */
500 arp_ptr
+= skb
->dev
->addr_len
;
501 memcpy(&sip
, arp_ptr
, 4);
503 /* If we actually cared about dst hw addr,
504 it would get copied here */
505 arp_ptr
+= skb
->dev
->addr_len
;
506 memcpy(&tip
, arp_ptr
, 4);
508 /* Should we ignore arp? */
509 if (ipv4_is_loopback(tip
) || ipv4_is_multicast(tip
))
512 size
= arp_hdr_len(skb
->dev
);
514 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
515 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
516 if (tip
!= np
->local_ip
.ip
)
519 hlen
= LL_RESERVED_SPACE(np
->dev
);
520 tlen
= np
->dev
->needed_tailroom
;
521 send_skb
= find_skb(np
, size
+ hlen
+ tlen
, hlen
);
525 skb_reset_network_header(send_skb
);
526 arp
= (struct arphdr
*) skb_put(send_skb
, size
);
527 send_skb
->dev
= skb
->dev
;
528 send_skb
->protocol
= htons(ETH_P_ARP
);
530 /* Fill the device header for the ARP frame */
531 if (dev_hard_header(send_skb
, skb
->dev
, ptype
,
532 sha
, np
->dev
->dev_addr
,
533 send_skb
->len
) < 0) {
539 * Fill out the arp protocol part.
541 * we only support ethernet device type,
542 * which (according to RFC 1390) should
543 * always equal 1 (Ethernet).
546 arp
->ar_hrd
= htons(np
->dev
->type
);
547 arp
->ar_pro
= htons(ETH_P_IP
);
548 arp
->ar_hln
= np
->dev
->addr_len
;
550 arp
->ar_op
= htons(type
);
552 arp_ptr
= (unsigned char *)(arp
+ 1);
553 memcpy(arp_ptr
, np
->dev
->dev_addr
, np
->dev
->addr_len
);
554 arp_ptr
+= np
->dev
->addr_len
;
555 memcpy(arp_ptr
, &tip
, 4);
557 memcpy(arp_ptr
, sha
, np
->dev
->addr_len
);
558 arp_ptr
+= np
->dev
->addr_len
;
559 memcpy(arp_ptr
, &sip
, 4);
561 netpoll_send_skb(np
, send_skb
);
563 /* If there are several rx_hooks for the same address,
564 we're fine by sending a single reply */
567 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
571 int __netpoll_rx(struct sk_buff
*skb
, struct netpoll_info
*npinfo
)
573 int proto
, len
, ulen
;
575 const struct iphdr
*iph
;
577 struct netpoll
*np
, *tmp
;
579 if (list_empty(&npinfo
->rx_np
))
582 if (skb
->dev
->type
!= ARPHRD_ETHER
)
585 /* check if netpoll clients need ARP */
586 if (skb
->protocol
== htons(ETH_P_ARP
) &&
587 atomic_read(&trapped
)) {
588 skb_queue_tail(&npinfo
->neigh_tx
, skb
);
592 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
593 skb
= vlan_untag(skb
);
598 proto
= ntohs(eth_hdr(skb
)->h_proto
);
599 if (proto
!= ETH_P_IP
&& proto
!= ETH_P_IPV6
)
601 if (skb
->pkt_type
== PACKET_OTHERHOST
)
606 if (proto
== ETH_P_IP
) {
607 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
609 iph
= (struct iphdr
*)skb
->data
;
610 if (iph
->ihl
< 5 || iph
->version
!= 4)
612 if (!pskb_may_pull(skb
, iph
->ihl
*4))
614 iph
= (struct iphdr
*)skb
->data
;
615 if (ip_fast_csum((u8
*)iph
, iph
->ihl
) != 0)
618 len
= ntohs(iph
->tot_len
);
619 if (skb
->len
< len
|| len
< iph
->ihl
*4)
623 * Our transport medium may have padded the buffer out.
624 * Now We trim to the true length of the frame.
626 if (pskb_trim_rcsum(skb
, len
))
629 iph
= (struct iphdr
*)skb
->data
;
630 if (iph
->protocol
!= IPPROTO_UDP
)
634 uh
= (struct udphdr
*)(((char *)iph
) + iph
->ihl
*4);
635 ulen
= ntohs(uh
->len
);
639 if (checksum_udp(skb
, uh
, ulen
, iph
->saddr
, iph
->daddr
))
641 list_for_each_entry_safe(np
, tmp
, &npinfo
->rx_np
, rx
) {
642 if (np
->local_ip
.ip
&& np
->local_ip
.ip
!= iph
->daddr
)
644 if (np
->remote_ip
.ip
&& np
->remote_ip
.ip
!= iph
->saddr
)
646 if (np
->local_port
&& np
->local_port
!= ntohs(uh
->dest
))
649 np
->rx_hook(np
, ntohs(uh
->source
),
651 ulen
- sizeof(struct udphdr
));
663 if (atomic_read(&trapped
)) {
671 void netpoll_print_options(struct netpoll
*np
)
673 np_info(np
, "local port %d\n", np
->local_port
);
675 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
676 np_info(np
, "interface '%s'\n", np
->dev_name
);
677 np_info(np
, "remote port %d\n", np
->remote_port
);
679 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
680 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
682 EXPORT_SYMBOL(netpoll_print_options
);
684 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
688 if (!strchr(str
, ':') &&
689 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
693 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
694 #if IS_ENABLED(CONFIG_IPV6)
704 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
706 char *cur
=opt
, *delim
;
710 if ((delim
= strchr(cur
, '@')) == NULL
)
713 if (kstrtou16(cur
, 10, &np
->local_port
))
720 if ((delim
= strchr(cur
, '/')) == NULL
)
723 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
727 np
->ipv6
= (bool)ipv6
;
733 /* parse out dev name */
734 if ((delim
= strchr(cur
, ',')) == NULL
)
737 strlcpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
744 if ((delim
= strchr(cur
, '@')) == NULL
)
747 if (*cur
== ' ' || *cur
== '\t')
748 np_info(np
, "warning: whitespace is not allowed\n");
749 if (kstrtou16(cur
, 10, &np
->remote_port
))
756 if ((delim
= strchr(cur
, '/')) == NULL
)
759 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
762 else if (np
->ipv6
!= (bool)ipv6
)
765 np
->ipv6
= (bool)ipv6
;
770 if (!mac_pton(cur
, np
->remote_mac
))
774 netpoll_print_options(np
);
779 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
782 EXPORT_SYMBOL(netpoll_parse_options
);
784 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
, gfp_t gfp
)
786 struct netpoll_info
*npinfo
;
787 const struct net_device_ops
*ops
;
792 strlcpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
794 if ((ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) ||
795 !ndev
->netdev_ops
->ndo_poll_controller
) {
796 np_err(np
, "%s doesn't support polling, aborting\n",
803 npinfo
= kmalloc(sizeof(*npinfo
), gfp
);
809 npinfo
->rx_flags
= 0;
810 INIT_LIST_HEAD(&npinfo
->rx_np
);
812 spin_lock_init(&npinfo
->rx_lock
);
813 skb_queue_head_init(&npinfo
->neigh_tx
);
814 skb_queue_head_init(&npinfo
->txq
);
815 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
817 atomic_set(&npinfo
->refcnt
, 1);
819 ops
= np
->dev
->netdev_ops
;
820 if (ops
->ndo_netpoll_setup
) {
821 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
, gfp
);
826 npinfo
= ndev
->npinfo
;
827 atomic_inc(&npinfo
->refcnt
);
830 npinfo
->netpoll
= np
;
833 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
834 npinfo
->rx_flags
|= NETPOLL_RX_ENABLED
;
835 list_add_tail(&np
->rx
, &npinfo
->rx_np
);
836 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
839 /* last thing to do is link it to the net device structure */
840 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
849 EXPORT_SYMBOL_GPL(__netpoll_setup
);
851 int netpoll_setup(struct netpoll
*np
)
853 struct net_device
*ndev
= NULL
;
854 struct in_device
*in_dev
;
858 ndev
= dev_get_by_name(&init_net
, np
->dev_name
);
860 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
864 if (netdev_master_upper_dev_get(ndev
)) {
865 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
870 if (!netif_running(ndev
)) {
871 unsigned long atmost
, atleast
;
873 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
876 err
= dev_open(ndev
);
880 np_err(np
, "failed to open %s\n", ndev
->name
);
884 atleast
= jiffies
+ HZ
/10;
885 atmost
= jiffies
+ carrier_timeout
* HZ
;
886 while (!netif_carrier_ok(ndev
)) {
887 if (time_after(jiffies
, atmost
)) {
888 np_notice(np
, "timeout waiting for carrier\n");
894 /* If carrier appears to come up instantly, we don't
895 * trust it and pause so that we don't pump all our
896 * queued console messages into the bitbucket.
899 if (time_before(jiffies
, atleast
)) {
900 np_notice(np
, "carrier detect appears untrustworthy, waiting 4 seconds\n");
905 if (!np
->local_ip
.ip
) {
908 in_dev
= __in_dev_get_rcu(ndev
);
911 if (!in_dev
|| !in_dev
->ifa_list
) {
913 np_err(np
, "no IP address for %s, aborting\n",
919 np
->local_ip
.ip
= in_dev
->ifa_list
->ifa_local
;
921 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
925 /* fill up the skb queue */
929 err
= __netpoll_setup(np
, ndev
, GFP_KERNEL
);
941 EXPORT_SYMBOL(netpoll_setup
);
943 static int __init
netpoll_init(void)
945 skb_queue_head_init(&skb_pool
);
948 core_initcall(netpoll_init
);
950 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
952 struct netpoll_info
*npinfo
=
953 container_of(rcu_head
, struct netpoll_info
, rcu
);
955 skb_queue_purge(&npinfo
->neigh_tx
);
956 skb_queue_purge(&npinfo
->txq
);
958 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
959 cancel_delayed_work(&npinfo
->tx_work
);
961 /* clean after last, unfinished work */
962 __skb_queue_purge(&npinfo
->txq
);
963 /* now cancel it again */
964 cancel_delayed_work(&npinfo
->tx_work
);
968 void __netpoll_cleanup(struct netpoll
*np
)
970 struct netpoll_info
*npinfo
;
973 npinfo
= np
->dev
->npinfo
;
977 if (!list_empty(&npinfo
->rx_np
)) {
978 spin_lock_irqsave(&npinfo
->rx_lock
, flags
);
980 if (list_empty(&npinfo
->rx_np
))
981 npinfo
->rx_flags
&= ~NETPOLL_RX_ENABLED
;
982 spin_unlock_irqrestore(&npinfo
->rx_lock
, flags
);
985 if (atomic_dec_and_test(&npinfo
->refcnt
)) {
986 const struct net_device_ops
*ops
;
988 ops
= np
->dev
->netdev_ops
;
989 if (ops
->ndo_netpoll_cleanup
)
990 ops
->ndo_netpoll_cleanup(np
->dev
);
992 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
993 call_rcu_bh(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
996 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
998 static void rcu_cleanup_netpoll(struct rcu_head
*rcu_head
)
1000 struct netpoll
*np
= container_of(rcu_head
, struct netpoll
, rcu
);
1002 __netpoll_cleanup(np
);
1006 void __netpoll_free_rcu(struct netpoll
*np
)
1008 call_rcu_bh(&np
->rcu
, rcu_cleanup_netpoll
);
1010 EXPORT_SYMBOL_GPL(__netpoll_free_rcu
);
1012 void netpoll_cleanup(struct netpoll
*np
)
1018 __netpoll_cleanup(np
);
1024 EXPORT_SYMBOL(netpoll_cleanup
);
1026 int netpoll_trap(void)
1028 return atomic_read(&trapped
);
1030 EXPORT_SYMBOL(netpoll_trap
);
1032 void netpoll_set_trap(int trap
)
1035 atomic_inc(&trapped
);
1037 atomic_dec(&trapped
);
1039 EXPORT_SYMBOL(netpoll_set_trap
);
This page took 0.055051 seconds and 5 git commands to generate.