struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
{
- struct net_device *dev;
+ struct net_device *dev, *ret = NULL;
- rtnl_lock();
- dev = __dev_getfirstbyhwtype(net, type);
- if (dev)
- dev_hold(dev);
- rtnl_unlock();
- return dev;
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev)
+ if (dev->type == type) {
+ dev_hold(dev);
+ ret = dev;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);
}
EXPORT_SYMBOL(netdev_state_change);
-void netdev_bonding_change(struct net_device *dev, unsigned long event)
+int netdev_bonding_change(struct net_device *dev, unsigned long event)
{
- call_netdevice_notifiers(event, dev);
+ return call_netdevice_notifiers(event, dev);
}
EXPORT_SYMBOL(netdev_bonding_change);
DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
+#ifdef CONFIG_SMP
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
__napi_schedule(&queue->backlog);
__get_cpu_var(netdev_rx_stat).received_rps++;
}
+#endif /* CONFIG_SMP */
/*
* enqueue_to_backlog is called to queue an skb to a per CPU backlog
/* Schedule NAPI for backlog device */
if (napi_schedule_prep(&queue->backlog)) {
+#ifdef CONFIG_SMP
if (cpu != smp_processor_id()) {
struct rps_remote_softirq_cpus *rcpus =
&__get_cpu_var(rps_remote_softirq_cpus);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
} else
__napi_schedule(&queue->backlog);
+#else
+ __napi_schedule(&queue->backlog);
+#endif
}
goto enqueue;
}
if (!skb->tstamp.tv64)
net_timestamp(skb);
+#ifdef CONFIG_SMP
cpu = get_rps_cpu(skb->dev, skb);
if (cpu < 0)
cpu = smp_processor_id();
+#else
+ cpu = smp_processor_id();
+#endif
return enqueue_to_backlog(skb, cpu);
}
{
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
+ struct net_device *master;
struct net_device *null_or_orig;
struct net_device *null_or_bond;
int ret = NET_RX_DROP;
null_or_orig = NULL;
orig_dev = skb->dev;
- if (orig_dev->master) {
- if (skb_bond_should_drop(skb))
+ master = ACCESS_ONCE(orig_dev->master);
+ if (master) {
+ if (skb_bond_should_drop(skb, master))
null_or_orig = orig_dev; /* deliver only exact match */
else
- skb->dev = orig_dev->master;
+ skb->dev = master;
}
__get_cpu_var(netdev_rx_stat).total++;
*/
int netif_receive_skb(struct sk_buff *skb)
{
+#ifdef CONFIG_SMP
int cpu;
cpu = get_rps_cpu(skb->dev, skb);
return __netif_receive_skb(skb);
else
return enqueue_to_backlog(skb, cpu);
+#else
+ return __netif_receive_skb(skb);
+#endif
}
EXPORT_SYMBOL(netif_receive_skb);
/* Network device is going away, flush any packets still pending */
-static void flush_backlog(void *arg)
+static void flush_backlog(struct net_device *dev, int cpu)
{
- struct net_device *dev = arg;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct softnet_data *queue = &per_cpu(softnet_data, cpu);
struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ spin_lock_irqsave(&queue->input_pkt_queue.lock, flags);
skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
if (skb->dev == dev) {
__skb_unlink(skb, &queue->input_pkt_queue);
kfree_skb(skb);
}
+ spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags);
}
static int napi_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_napi_del);
+#ifdef CONFIG_SMP
/*
* net_rps_action sends any pending IPI's for rps. This is only called from
* softirq and interrupts must be enabled.
}
cpus_clear(*mask);
}
+#endif
static void net_rx_action(struct softirq_action *h)
{
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
+#ifdef CONFIG_SMP
int select;
struct rps_remote_softirq_cpus *rcpus;
+#endif
local_irq_disable();
netpoll_poll_unlock(have);
}
out:
+#ifdef CONFIG_SMP
rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
select = rcpus->select;
rcpus->select ^= 1;
local_irq_enable();
net_rps_action(&rcpus->mask[select]);
+#else
+ local_irq_enable();
+#endif
#ifdef CONFIG_NET_DMA
/*
slave->master = master;
- synchronize_net();
-
- if (old)
+ if (old) {
+ synchronize_net();
dev_put(old);
-
+ }
if (master)
slave->flags |= IFF_SLAVE;
else
}
EXPORT_SYMBOL(dev_unicast_unsync);
-static void dev_unicast_flush(struct net_device *dev)
+void dev_unicast_flush(struct net_device *dev)
{
netif_addr_lock_bh(dev);
__hw_addr_flush(&dev->uc);
netif_addr_unlock_bh(dev);
}
+EXPORT_SYMBOL(dev_unicast_flush);
static void dev_unicast_init(struct net_device *dev)
{
}
}
-static void dev_addr_discard(struct net_device *dev)
+void dev_addr_discard(struct net_device *dev)
{
netif_addr_lock_bh(dev);
netif_addr_unlock_bh(dev);
}
+EXPORT_SYMBOL(dev_addr_discard);
/**
* dev_get_flags - get flags reported to userspace
while (!list_empty(&list)) {
struct net_device *dev
= list_first_entry(&list, struct net_device, todo_list);
+ int i;
list_del(&dev->todo_list);
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
dev->reg_state = NETREG_UNREGISTERED;
- on_each_cpu(flush_backlog, dev, 1);
+ for_each_online_cpu(i)
+ flush_backlog(dev, i);
netdev_wait_allrefs(dev);
queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list);
+#ifdef CONFIG_SMP
queue->csd.func = trigger_softirq;
queue->csd.info = queue;
queue->csd.flags = 0;
+#endif
queue->backlog.poll = process_backlog;
queue->backlog.weight = weight_p;