{
struct packet_type *ptype;
+#ifdef CONFIG_NET_CLS_ACT
+ if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
+ net_timestamp(skb);
+#else
net_timestamp(skb);
+#endif
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
__netdev_watchdog_up(dev);
}
}
{
u32 hash;
- if (skb_rx_queue_recorded(skb)) {
- hash = skb_get_rx_queue(skb);
- } else if (skb->sk && skb->sk->sk_hash) {
+ if (skb_rx_queue_recorded(skb))
+ return skb_get_rx_queue(skb) % dev->real_num_tx_queues;
+
+ if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
- } else
+ else
hash = skb->protocol;
hash = jhash_1word(hash, skb_tx_hashrnd);
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int err = -ENOENT;
- if (NAPI_GRO_CB(skb)->count == 1)
+ if (NAPI_GRO_CB(skb)->count == 1) {
+ skb_shinfo(skb)->gso_size = 0;
goto out;
+ }
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
}
out:
- skb_shinfo(skb)->gso_size = 0;
return netif_receive_skb(skb);
}
unsigned int offset = skb_gro_offset(skb);
hlen += offset;
- if (hlen <= skb_headlen(skb))
- return skb->data + offset;
-
- if (unlikely(!skb_shinfo(skb)->nr_frags ||
- skb_shinfo(skb)->frags[0].size <=
- hlen - skb_headlen(skb) ||
+ if (unlikely(skb_headlen(skb) ||
+ skb_shinfo(skb)->frags[0].size < hlen ||
PageHighMem(skb_shinfo(skb)->frags[0].page)))
return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
return page_address(skb_shinfo(skb)->frags[0].page) +
- skb_shinfo(skb)->frags[0].page_offset +
- offset - skb_headlen(skb);
+ skb_shinfo(skb)->frags[0].page_offset + offset;
}
EXPORT_SYMBOL(skb_gro_header);
return GRO_NORMAL;
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow = !compare_ether_header(
- skb_mac_header(p), skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
+ && !compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->flush = 0;
}
}
EXPORT_SYMBOL(napi_reuse_skb);
-struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
- struct napi_gro_fraginfo *info)
+struct sk_buff *napi_get_frags(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
struct sk_buff *skb = napi->skb;
- struct ethhdr *eth;
- skb_frag_t *frag;
- int i;
-
- napi->skb = NULL;
if (!skb) {
skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
goto out;
skb_reserve(skb, NET_IP_ALIGN);
- }
-
- BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
- frag = &info->frags[info->nr_frags - 1];
-
- for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
- skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
- frag->size);
- frag++;
- }
- skb_shinfo(skb)->nr_frags = info->nr_frags;
-
- skb->data_len = info->len;
- skb->len += info->len;
- skb->truesize += info->len;
-
- skb_reset_mac_header(skb);
- skb_gro_reset_offset(skb);
- eth = skb_gro_header(skb, sizeof(*eth));
- if (!eth) {
- napi_reuse_skb(napi, skb);
- skb = NULL;
- goto out;
+ napi->skb = skb;
}
- skb_gro_pull(skb, sizeof(*eth));
-
- /*
- * This works because the only protocols we care about don't require
- * special handling. We'll fix it up properly at the end.
- */
- skb->protocol = eth->h_proto;
-
- skb->ip_summed = info->ip_summed;
- skb->csum = info->csum;
-
out:
return skb;
}
-EXPORT_SYMBOL(napi_fraginfo_skb);
+EXPORT_SYMBOL(napi_get_frags);
int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
{
}
EXPORT_SYMBOL(napi_frags_finish);
-int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+{
+ struct sk_buff *skb = napi->skb;
+ struct ethhdr *eth;
+
+ napi->skb = NULL;
+
+ skb_reset_mac_header(skb);
+ skb_gro_reset_offset(skb);
+
+ eth = skb_gro_header(skb, sizeof(*eth));
+ if (!eth) {
+ napi_reuse_skb(napi, skb);
+ skb = NULL;
+ goto out;
+ }
+
+ skb_gro_pull(skb, sizeof(*eth));
+
+ /*
+ * This works because the only protocols we care about don't require
+ * special handling. We'll fix it up properly at the end.
+ */
+ skb->protocol = eth->h_proto;
+
+out:
+ return skb;
+}
+EXPORT_SYMBOL(napi_frags_skb);
+
+int napi_gro_frags(struct napi_struct *napi)
{
- struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
return NET_RX_DROP;
struct sk_buff *skb, *next;
list_del_init(&napi->dev_list);
- kfree_skb(napi->skb);
+ napi_free_frags(napi);
for (skb = napi->gro_list; skb; skb = next) {
next = skb->next;
dev->iflink = -1;
#ifdef CONFIG_COMPAT_NET_DEV_OPS
- /* Netdevice_ops API compatiability support.
+ /* Netdevice_ops API compatibility support.
* This is temporary until all network devices are converted.
*/
if (dev->netdev_ops) {
dev->name, netdev_drivername(dev, drivername, 64));
/* This works only because net_device_ops and the
- compatiablity structure are the same. */
+ compatibility structure are the same. */
dev->netdev_ops = (void *) &(dev->init);
}
#endif