Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Thu, 17 May 2012 02:17:37 +0000 (22:17 -0400)
committerDavid S. Miller <davem@davemloft.net>
Thu, 17 May 2012 02:17:37 +0000 (22:17 -0400)
29 files changed:
1  2 
MAINTAINERS
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/usb/usbnet.c
drivers/net/wireless/rtlwifi/pci.c
drivers/ptp/ptp_pch.c
drivers/vhost/net.c
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set_ahash.h
net/8021q/vlan_dev.c
net/core/dev.c
net/core/pktgen.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netport.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/sctp/output.c

diff --combined MAINTAINERS
index 887c965c2711c5bb5caacf16bbbe42c843e1da33,b36270986501dd41fe33280388ea2f7e9c3c9720..490dd6e640aca4d29197eabe68406034071c0a1f
@@@ -1431,7 -1431,6 +1431,7 @@@ F:      include/linux/backlight.
  BATMAN ADVANCED
  M:    Marek Lindner <lindner_marek@yahoo.de>
  M:    Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
 +M:    Antonio Quartulli <ordex@autistici.org>
  L:    b.a.t.m.a.n@lists.open-mesh.org
  W:    http://www.open-mesh.org/
  S:    Maintained
@@@ -1969,10 -1968,9 +1969,9 @@@ S:     Maintaine
  F:    drivers/net/ethernet/ti/cpmac.c
  
  CPU FREQUENCY DRIVERS
- M:    Dave Jones <davej@redhat.com>
+ M:    Rafael J. Wysocki <rjw@sisk.pl>
  L:    cpufreq@vger.kernel.org
- W:    http://www.codemonkey.org.uk/projects/cpufreq/
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
+ L:    linux-pm@vger.kernel.org
  S:    Maintained
  F:    drivers/cpufreq/
  F:    include/linux/cpufreq.h
@@@ -3520,6 -3518,12 +3519,6 @@@ M:     Deepak Saxena <dsaxena@plexity.net
  S:    Maintained
  F:    drivers/char/hw_random/ixp4xx-rng.c
  
 -INTEL IXP2000 ETHERNET DRIVER
 -M:    Lennert Buytenhek <kernel@wantstofly.org>
 -L:    netdev@vger.kernel.org
 -S:    Maintained
 -F:    drivers/net/ethernet/xscale/ixp2000/
 -
  INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
  M:    Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  M:    Jesse Brandeburg <jesse.brandeburg@intel.com>
@@@ -4032,6 -4036,7 +4031,7 @@@ F:      Documentation/scsi/53c700.tx
  F:    drivers/scsi/53c700*
  
  LED SUBSYSTEM
+ M:    Bryan Wu <bryan.wu@canonical.com>
  M:    Richard Purdie <rpurdie@rpsys.net>
  S:    Maintained
  F:    drivers/leds/
@@@ -5203,7 -5208,7 +5203,7 @@@ S:      Maintaine
  F:    include/linux/personality.h
  
  PHONET PROTOCOL
 -M:    Remi Denis-Courmont <remi.denis-courmont@nokia.com>
 +M:    Remi Denis-Courmont <courmisch@gmail.com>
  S:    Supported
  F:    Documentation/networking/phonet.txt
  F:    include/linux/phonet.h
@@@ -6671,16 -6676,6 +6671,16 @@@ L:    alsa-devel@alsa-project.org (moderat
  S:    Maintained
  F:    sound/soc/codecs/twl4030*
  
 +TI WILINK WIRELESS DRIVERS
 +M:    Luciano Coelho <coelho@ti.com>
 +L:    linux-wireless@vger.kernel.org
 +W:    http://wireless.kernel.org/en/users/Drivers/wl12xx
 +W:    http://wireless.kernel.org/en/users/Drivers/wl1251
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
 +S:    Maintained
 +F:    drivers/net/wireless/ti/
 +F:    include/linux/wl12xx.h
 +
  TIPC NETWORK LAYER
  M:    Jon Maloy <jon.maloy@ericsson.com>
  M:    Allan Stephens <allan.stephens@windriver.com>
@@@ -7437,6 -7432,23 +7437,6 @@@ M:     Miloslav Trmac <mitr@volny.cz
  S:    Maintained
  F:    drivers/input/misc/wistron_btns.c
  
 -WL1251 WIRELESS DRIVER
 -M:    Luciano Coelho <coelho@ti.com>
 -L:    linux-wireless@vger.kernel.org
 -W:    http://wireless.kernel.org/en/users/Drivers/wl1251
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
 -S:    Maintained
 -F:    drivers/net/wireless/wl1251/*
 -
 -WL1271 WIRELESS DRIVER
 -M:    Luciano Coelho <coelho@ti.com>
 -L:    linux-wireless@vger.kernel.org
 -W:    http://wireless.kernel.org/en/users/Drivers/wl12xx
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
 -S:    Maintained
 -F:    drivers/net/wireless/wl12xx/
 -F:    include/linux/wl12xx.h
 -
  WL3501 WIRELESS PCMCIA CARD DRIVER
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  L:    linux-wireless@vger.kernel.org
index b4f1b4ac92c6b066d5b64fbfdd30f8eee3c73821,2e1f8066f1a8146b0f89cfb4a9e08e1f6b9a54bd..0f59c1564e53263248a889a7b29f80a87847c488
@@@ -332,7 -332,7 +332,7 @@@ static void rlb_update_entry_from_arp(s
        if ((client_info->assigned) &&
            (client_info->ip_src == arp->ip_dst) &&
            (client_info->ip_dst == arp->ip_src) &&
 -          (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) {
 +          (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
                /* update the clients MAC address */
                memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
                client_info->ntt = 1;
        _unlock_rx_hashtbl_bh(bond);
  }
  
- static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
+ static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave)
  {
        struct arp_pkt *arp;
  
        if (skb->protocol != cpu_to_be16(ETH_P_ARP))
-               return;
+               goto out;
  
        arp = (struct arp_pkt *) skb->data;
        if (!arp) {
                pr_debug("Packet has no ARP data\n");
-               return;
+               goto out;
        }
  
        if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
-               return;
+               goto out;
  
        if (skb->len < sizeof(struct arp_pkt)) {
                pr_debug("Packet is too small to be an ARP\n");
-               return;
+               goto out;
        }
  
        if (arp->op_code == htons(ARPOP_REPLY)) {
                rlb_update_entry_from_arp(bond, arp);
                pr_debug("Server received an ARP Reply from client\n");
        }
+ out:
+       return RX_HANDLER_ANOTHER;
  }
  
  /* Caller must hold bond lock for read */
@@@ -448,8 -450,8 +450,8 @@@ static void rlb_clear_slave(struct bond
  
                        if (assigned_slave) {
                                rx_hash_table[index].slave = assigned_slave;
 -                              if (compare_ether_addr_64bits(rx_hash_table[index].mac_dst,
 -                                                            mac_bcast)) {
 +                              if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst,
 +                                                           mac_bcast)) {
                                        bond_info->rx_hashtbl[index].ntt = 1;
                                        bond_info->rx_ntt = 1;
                                        /* A slave has been removed from the
@@@ -561,7 -563,7 +563,7 @@@ static void rlb_req_update_slave_client
                client_info = &(bond_info->rx_hashtbl[hash_index]);
  
                if ((client_info->slave == slave) &&
 -                  compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
 +                  !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
                        client_info->ntt = 1;
                        ntt = 1;
                }
@@@ -600,9 -602,9 +602,9 @@@ static void rlb_req_update_subnet_clien
                 * unicast mac address.
                 */
                if ((client_info->ip_src == src_ip) &&
 -                  compare_ether_addr_64bits(client_info->slave->dev->dev_addr,
 -                         bond->dev->dev_addr) &&
 -                  compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
 +                  !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
 +                                           bond->dev->dev_addr) &&
 +                  !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
                        client_info->ntt = 1;
                        bond_info->rx_ntt = 1;
                }
@@@ -629,7 -631,7 +631,7 @@@ static struct slave *rlb_choose_channel
                if ((client_info->ip_src == arp->ip_src) &&
                    (client_info->ip_dst == arp->ip_dst)) {
                        /* the entry is already assigned to this client */
 -                      if (compare_ether_addr_64bits(arp->mac_dst, mac_bcast)) {
 +                      if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
                                /* update mac address from arp */
                                memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
                        }
                memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
                client_info->slave = assigned_slave;
  
 -              if (compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
 +              if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
                        client_info->ntt = 1;
                        bond->alb_info.rx_ntt = 1;
                } else {
@@@ -1009,18 -1011,18 +1011,18 @@@ static void alb_change_hw_addr_on_detac
        int perm_curr_diff;
        int perm_bond_diff;
  
 -      perm_curr_diff = compare_ether_addr_64bits(slave->perm_hwaddr,
 -                                                 slave->dev->dev_addr);
 -      perm_bond_diff = compare_ether_addr_64bits(slave->perm_hwaddr,
 -                                                 bond->dev->dev_addr);
 +      perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
 +                                                slave->dev->dev_addr);
 +      perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
 +                                                bond->dev->dev_addr);
  
        if (perm_curr_diff && perm_bond_diff) {
                struct slave *tmp_slave;
                int i, found = 0;
  
                bond_for_each_slave(bond, tmp_slave, i) {
 -                      if (!compare_ether_addr_64bits(slave->perm_hwaddr,
 -                                                     tmp_slave->dev->dev_addr)) {
 +                      if (ether_addr_equal_64bits(slave->perm_hwaddr,
 +                                                  tmp_slave->dev->dev_addr)) {
                                found = 1;
                                break;
                        }
@@@ -1074,10 -1076,10 +1076,10 @@@ static int alb_handle_addr_collision_on
         * check uniqueness of slave's mac address against the other
         * slaves in the bond.
         */
 -      if (compare_ether_addr_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
 +      if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
                bond_for_each_slave(bond, tmp_slave1, i) {
 -                      if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr,
 -                                                     slave->dev->dev_addr)) {
 +                      if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
 +                                                  slave->dev->dev_addr)) {
                                found = 1;
                                break;
                        }
        bond_for_each_slave(bond, tmp_slave1, i) {
                found = 0;
                bond_for_each_slave(bond, tmp_slave2, j) {
 -                      if (!compare_ether_addr_64bits(tmp_slave1->perm_hwaddr,
 -                                                     tmp_slave2->dev->dev_addr)) {
 +                      if (ether_addr_equal_64bits(tmp_slave1->perm_hwaddr,
 +                                                  tmp_slave2->dev->dev_addr)) {
                                found = 1;
                                break;
                        }
                }
  
                if (!has_bond_addr) {
 -                      if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr,
 -                                                     bond->dev->dev_addr)) {
 +                      if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
 +                                                  bond->dev->dev_addr)) {
  
                                has_bond_addr = tmp_slave1;
                        }
@@@ -1257,7 -1259,7 +1259,7 @@@ int bond_alb_xmit(struct sk_buff *skb, 
        case ETH_P_IP: {
                const struct iphdr *iph = ip_hdr(skb);
  
 -              if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast) ||
 +              if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
                    (iph->daddr == ip_bcast) ||
                    (iph->protocol == IPPROTO_IGMP)) {
                        do_tx_balance = 0;
                /* IPv6 doesn't really use broadcast mac address, but leave
                 * that here just in case.
                 */
 -              if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast)) {
 +              if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
                        do_tx_balance = 0;
                        break;
                }
                /* IPv6 uses all-nodes multicast as an equivalent to
                 * broadcasts in IPv4.
                 */
 -              if (!compare_ether_addr_64bits(eth_data->h_dest, mac_v6_allmcast)) {
 +              if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
                        do_tx_balance = 0;
                        break;
                }
@@@ -1603,8 -1605,8 +1605,8 @@@ void bond_alb_handle_active_change(stru
                struct slave *tmp_slave;
                /* find slave that is holding the bond's mac address */
                bond_for_each_slave(bond, tmp_slave, i) {
 -                      if (!compare_ether_addr_64bits(tmp_slave->dev->dev_addr,
 -                                                     bond->dev->dev_addr)) {
 +                      if (ether_addr_equal_64bits(tmp_slave->dev->dev_addr,
 +                                                  bond->dev->dev_addr)) {
                                swap_slave = tmp_slave;
                                break;
                        }
@@@ -1681,8 -1683,8 +1683,8 @@@ int bond_alb_set_mac_address(struct net
        swap_slave = NULL;
  
        bond_for_each_slave(bond, slave, i) {
 -              if (!compare_ether_addr_64bits(slave->dev->dev_addr,
 -                                             bond_dev->dev_addr)) {
 +              if (ether_addr_equal_64bits(slave->dev->dev_addr,
 +                                          bond_dev->dev_addr)) {
                        swap_slave = slave;
                        break;
                }
index bbb004354bbd27afa499a5cb5255627da9d46424,bc13b3d774329c5168bc77a44bb7299056b52947..2ee8cf9e8a3b9fe8e728e1bc6d2334793712deb6
@@@ -549,9 -549,9 +549,9 @@@ down
   * Get link speed and duplex from the slave's base driver
   * using ethtool. If for some reason the call fails or the
   * values are invalid, set speed and duplex to -1,
 - * and return error.
 + * and return.
   */
 -static int bond_update_speed_duplex(struct slave *slave)
 +static void bond_update_speed_duplex(struct slave *slave)
  {
        struct net_device *slave_dev = slave->dev;
        struct ethtool_cmd ecmd;
  
        res = __ethtool_get_settings(slave_dev, &ecmd);
        if (res < 0)
 -              return -1;
 +              return;
  
        slave_speed = ethtool_cmd_speed(&ecmd);
        if (slave_speed == 0 || slave_speed == ((__u32) -1))
 -              return -1;
 +              return;
  
        switch (ecmd.duplex) {
        case DUPLEX_FULL:
        case DUPLEX_HALF:
                break;
        default:
 -              return -1;
 +              return;
        }
  
        slave->speed = slave_speed;
        slave->duplex = ecmd.duplex;
  
 -      return 0;
 +      return;
  }
  
  /*
@@@ -1444,8 -1444,9 +1444,9 @@@ static rx_handler_result_t bond_handle_
        struct sk_buff *skb = *pskb;
        struct slave *slave;
        struct bonding *bond;
-       void (*recv_probe)(struct sk_buff *, struct bonding *,
+       int (*recv_probe)(struct sk_buff *, struct bonding *,
                                struct slave *);
+       int ret = RX_HANDLER_ANOTHER;
  
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (unlikely(!skb))
                struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
  
                if (likely(nskb)) {
-                       recv_probe(nskb, bond, slave);
+                       ret = recv_probe(nskb, bond, slave);
                        dev_kfree_skb(nskb);
+                       if (ret == RX_HANDLER_CONSUMED) {
+                               consume_skb(skb);
+                               return ret;
+                       }
                }
        }
  
                memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
        }
  
-       return RX_HANDLER_ANOTHER;
+       return ret;
  }
  
  /* enslave device <slave> to bond device <master> */
@@@ -1726,8 -1731,7 +1731,8 @@@ int bond_enslave(struct net_device *bon
  
        read_lock(&bond->lock);
  
 -      new_slave->last_arp_rx = jiffies;
 +      new_slave->last_arp_rx = jiffies -
 +              (msecs_to_jiffies(bond->params.arp_interval) + 1);
  
        if (bond->params.miimon && !bond->params.use_carrier) {
                link_reporting = bond_check_dev_link(bond, slave_dev, 1);
        }
  
        /* check for initial state */
 -      if (!bond->params.miimon ||
 -          (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
 -              if (bond->params.updelay) {
 -                      pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");
 -                      new_slave->link  = BOND_LINK_BACK;
 -                      new_slave->delay = bond->params.updelay;
 +      if (bond->params.miimon) {
 +              if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
 +                      if (bond->params.updelay) {
 +                              new_slave->link = BOND_LINK_BACK;
 +                              new_slave->delay = bond->params.updelay;
 +                      } else {
 +                              new_slave->link = BOND_LINK_UP;
 +                      }
                } else {
 -                      pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");
 -                      new_slave->link  = BOND_LINK_UP;
 +                      new_slave->link = BOND_LINK_DOWN;
                }
 -              new_slave->jiffies = jiffies;
 +      } else if (bond->params.arp_interval) {
 +              new_slave->link = (netif_carrier_ok(slave_dev) ?
 +                      BOND_LINK_UP : BOND_LINK_DOWN);
        } else {
 -              pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");
 -              new_slave->link  = BOND_LINK_DOWN;
 +              new_slave->link = BOND_LINK_UP;
        }
  
 +      if (new_slave->link != BOND_LINK_DOWN)
 +              new_slave->jiffies = jiffies;
 +      pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
 +              new_slave->link == BOND_LINK_DOWN ? "DOWN" :
 +                      (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 +
        bond_update_speed_duplex(new_slave);
  
        if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
@@@ -1961,7 -1957,7 +1966,7 @@@ int bond_release(struct net_device *bon
        write_lock_bh(&bond->lock);
  
        if (!bond->params.fail_over_mac) {
 -              if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
 +              if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond->slave_cnt > 1)
                        pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
                                   bond_dev->name, slave_dev->name,
@@@ -2732,7 -2728,7 +2737,7 @@@ static void bond_validate_arp(struct bo
        }
  }
  
- static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
+ static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave)
  {
        struct arphdr *arp;
        __be32 sip, tip;
  
        if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
-               return;
+               return RX_HANDLER_ANOTHER;
  
        read_lock(&bond->lock);
  
  
  out_unlock:
        read_unlock(&bond->lock);
+       return RX_HANDLER_ANOTHER;
  }
  
  /*
@@@ -4829,9 -4826,12 +4835,9 @@@ static int bond_validate(struct nlattr 
        return 0;
  }
  
 -static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
 -                            unsigned int *num_queues,
 -                            unsigned int *real_num_queues)
 +static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
  {
 -      *num_queues = tx_queues;
 -      return 0;
 +      return tx_queues;
  }
  
  static struct rtnl_link_ops bond_link_ops __read_mostly = {
index ba21f9c72a21deef9ca15e66e91e90aa3ebd99d3,8683ca4748c882130cab85dc1fd7433cd2ce4375..9bbf1a275947c1de3625736751bc4e96d3a5e240
@@@ -60,8 -60,8 +60,8 @@@
  #include "igb.h"
  
  #define MAJ 3
 -#define MIN 2
 -#define BUILD 10
 +#define MIN 4
 +#define BUILD 7
  #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
  __stringify(BUILD) "-k"
  char igb_driver_name[] = "igb";
@@@ -75,11 -75,6 +75,11 @@@ static const struct e1000_info *igb_inf
  };
  
  static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@@ -119,6 -114,7 +119,6 @@@ static void igb_free_all_rx_resources(s
  static void igb_setup_mrqc(struct igb_adapter *);
  static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  static void __devexit igb_remove(struct pci_dev *pdev);
 -static void igb_init_hw_timer(struct igb_adapter *adapter);
  static int igb_sw_init(struct igb_adapter *);
  static int igb_open(struct net_device *);
  static int igb_close(struct net_device *);
@@@ -569,6 -565,33 +569,6 @@@ exit
        return;
  }
  
 -
 -/**
 - * igb_read_clock - read raw cycle counter (to be used by time counter)
 - */
 -static cycle_t igb_read_clock(const struct cyclecounter *tc)
 -{
 -      struct igb_adapter *adapter =
 -              container_of(tc, struct igb_adapter, cycles);
 -      struct e1000_hw *hw = &adapter->hw;
 -      u64 stamp = 0;
 -      int shift = 0;
 -
 -      /*
 -       * The timestamp latches on lowest register read. For the 82580
 -       * the lowest register is SYSTIMR instead of SYSTIML.  However we never
 -       * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
 -       */
 -      if (hw->mac.type >= e1000_82580) {
 -              stamp = rd32(E1000_SYSTIMR) >> 8;
 -              shift = IGB_82580_TSYNC_SHIFT;
 -      }
 -
 -      stamp |= (u64)rd32(E1000_SYSTIML) << shift;
 -      stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
 -      return stamp;
 -}
 -
  /**
   * igb_get_hw_dev - return device
   * used by hardware layer to print debugging information
@@@ -646,8 -669,6 +646,8 @@@ static void igb_cache_ring_register(str
        case e1000_82575:
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i210:
 +      case e1000_i211:
        default:
                for (; i < adapter->num_rx_queues; i++)
                        adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@@ -734,11 -755,8 +734,11 @@@ static int igb_alloc_queues(struct igb_
                if (adapter->hw.mac.type >= e1000_82576)
                        set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  
 -              /* On i350, loopback VLAN packets have the tag byte-swapped. */
 -              if (adapter->hw.mac.type == e1000_i350)
 +              /*
 +               * On i350, i210, and i211, loopback VLAN packets
 +               * have the tag byte-swapped.
 +               * */
 +              if (adapter->hw.mac.type >= e1000_i350)
                        set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  
                adapter->rx_ring[i] = ring;
@@@ -832,8 -850,6 +832,8 @@@ static void igb_assign_vector(struct ig
                break;
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i210:
 +      case e1000_i211:
                /*
                 * On 82580 and newer adapters the scheme is similar to 82576
                 * however instead of ordering column-major we have things
@@@ -900,8 -916,6 +900,8 @@@ static void igb_configure_msix(struct i
        case e1000_82576:
        case e1000_82580:
        case e1000_i350:
 +      case e1000_i210:
 +      case e1000_i211:
                /* Turn on MSI-X capability first, or our settings
                 * won't stick.  And it will take days to debug. */
                wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@@ -1048,11 -1062,6 +1048,11 @@@ static int igb_set_interrupt_capability
        if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
                numvecs += adapter->num_tx_queues;
  
 +      /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
 +      if ((adapter->hw.mac.type == e1000_i210)
 +              || (adapter->hw.mac.type == e1000_i211))
 +              numvecs = 4;
 +
        /* store the number of vectors reserved for queues */
        adapter->num_q_vectors = numvecs;
  
        numvecs++;
        adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
                                        GFP_KERNEL);
 +
        if (!adapter->msix_entries)
                goto msi_only;
  
@@@ -1103,9 -1111,12 +1103,12 @@@ msi_only
                adapter->flags |= IGB_FLAG_HAS_MSI;
  out:
        /* Notify the stack of the (possibly) reduced queue counts. */
+       rtnl_lock();
        netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
-       return netif_set_real_num_rx_queues(adapter->netdev,
-                                           adapter->num_rx_queues);
+       err = netif_set_real_num_rx_queues(adapter->netdev,
+               adapter->num_rx_queues);
+       rtnl_unlock();
+       return err;
  }
  
  /**
@@@ -1651,8 -1662,6 +1654,8 @@@ void igb_reset(struct igb_adapter *adap
                pba &= E1000_RXPBS_SIZE_MASK_82576;
                break;
        case e1000_82575:
 +      case e1000_i210:
 +      case e1000_i211:
        default:
                pba = E1000_PBA_34K;
                break;
        if (hw->mac.ops.init_hw(hw))
                dev_err(&pdev->dev, "Hardware Error\n");
  
 +      /*
 +       * Flow control settings reset on hardware reset, so guarantee flow
 +       * control is off when forcing speed.
 +       */
 +      if (!hw->mac.autoneg)
 +              igb_force_mac_fc(hw);
 +
        igb_init_dmac(adapter, pba);
        if (!netif_running(adapter->netdev))
                igb_power_down_link(adapter);
@@@ -1848,7 -1850,7 +1851,7 @@@ static int __devinit igb_probe(struct p
         */
        if (pdev->is_virtfn) {
                WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
 -                   pci_name(pdev), pdev->vendor, pdev->device);
 +                      pci_name(pdev), pdev->vendor, pdev->device);
                return -EINVAL;
        }
  
         * known good starting state */
        hw->mac.ops.reset_hw(hw);
  
 -      /* make sure the NVM is good */
 -      if (hw->nvm.ops.validate(hw) < 0) {
 -              dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
 -              err = -EIO;
 -              goto err_eeprom;
 +      /*
 +       * make sure the NVM is good , i211 parts have special NVM that
 +       * doesn't contain a checksum
 +       */
 +      if (hw->mac.type != e1000_i211) {
 +              if (hw->nvm.ops.validate(hw) < 0) {
 +                      dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
 +                      err = -EIO;
 +                      goto err_eeprom;
 +              }
        }
  
        /* copy the MAC address out of the NVM */
        }
  
  #endif
 +#ifdef CONFIG_IGB_PTP
        /* do hw tstamp init after resetting */
 -      igb_init_hw_timer(adapter);
 +      igb_ptp_init(adapter);
  
 +#endif
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
                adapter->num_rx_queues, adapter->num_tx_queues);
        switch (hw->mac.type) {
        case e1000_i350:
 +      case e1000_i210:
 +      case e1000_i211:
                igb_set_eee_i350(hw);
                break;
        default:
@@@ -2194,10 -2187,7 +2197,10 @@@ static void __devexit igb_remove(struc
        struct e1000_hw *hw = &adapter->hw;
  
        pm_runtime_get_noresume(&pdev->dev);
 +#ifdef CONFIG_IGB_PTP
 +      igb_ptp_remove(adapter);
  
 +#endif
        /*
         * The watchdog timer may be rescheduled, so explicitly
         * disable watchdog from being rescheduled.
@@@ -2273,14 -2263,9 +2276,14 @@@ static void __devinit igb_probe_vfs(str
  {
  #ifdef CONFIG_PCI_IOV
        struct pci_dev *pdev = adapter->pdev;
 +      struct e1000_hw *hw = &adapter->hw;
        int old_vfs = igb_find_enabled_vfs(adapter);
        int i;
  
 +      /* Virtualization features not supported on i210 family. */
 +      if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
 +              return;
 +
        if (old_vfs) {
                dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
                         "max_vfs setting of %d\n", old_vfs, max_vfs);
  
        adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
                                sizeof(struct vf_data_storage), GFP_KERNEL);
 +
        /* if allocation failed then we do not support SR-IOV */
        if (!adapter->vf_data) {
                adapter->vfs_allocated_count = 0;
  #endif /* CONFIG_PCI_IOV */
  }
  
 -/**
 - * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
 - * @adapter: board private structure to initialize
 - *
 - * igb_init_hw_timer initializes the function pointer and values for the hw
 - * timer found in hardware.
 - **/
 -static void igb_init_hw_timer(struct igb_adapter *adapter)
 -{
 -      struct e1000_hw *hw = &adapter->hw;
 -
 -      switch (hw->mac.type) {
 -      case e1000_i350:
 -      case e1000_82580:
 -              memset(&adapter->cycles, 0, sizeof(adapter->cycles));
 -              adapter->cycles.read = igb_read_clock;
 -              adapter->cycles.mask = CLOCKSOURCE_MASK(64);
 -              adapter->cycles.mult = 1;
 -              /*
 -               * The 82580 timesync updates the system timer every 8ns by 8ns
 -               * and the value cannot be shifted.  Instead we need to shift
 -               * the registers to generate a 64bit timer value.  As a result
 -               * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
 -               * 24 in order to generate a larger value for synchronization.
 -               */
 -              adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
 -              /* disable system timer temporarily by setting bit 31 */
 -              wr32(E1000_TSAUXC, 0x80000000);
 -              wrfl();
 -
 -              /* Set registers so that rollover occurs soon to test this. */
 -              wr32(E1000_SYSTIMR, 0x00000000);
 -              wr32(E1000_SYSTIML, 0x80000000);
 -              wr32(E1000_SYSTIMH, 0x000000FF);
 -              wrfl();
 -
 -              /* enable system timer by clearing bit 31 */
 -              wr32(E1000_TSAUXC, 0x0);
 -              wrfl();
 -
 -              timecounter_init(&adapter->clock,
 -                               &adapter->cycles,
 -                               ktime_to_ns(ktime_get_real()));
 -              /*
 -               * Synchronize our NIC clock against system wall clock. NIC
 -               * time stamp reading requires ~3us per sample, each sample
 -               * was pretty stable even under load => only require 10
 -               * samples for each offset comparison.
 -               */
 -              memset(&adapter->compare, 0, sizeof(adapter->compare));
 -              adapter->compare.source = &adapter->clock;
 -              adapter->compare.target = ktime_get_real;
 -              adapter->compare.num_samples = 10;
 -              timecompare_update(&adapter->compare, 0);
 -              break;
 -      case e1000_82576:
 -              /*
 -               * Initialize hardware timer: we keep it running just in case
 -               * that some program needs it later on.
 -               */
 -              memset(&adapter->cycles, 0, sizeof(adapter->cycles));
 -              adapter->cycles.read = igb_read_clock;
 -              adapter->cycles.mask = CLOCKSOURCE_MASK(64);
 -              adapter->cycles.mult = 1;
 -              /**
 -               * Scale the NIC clock cycle by a large factor so that
 -               * relatively small clock corrections can be added or
 -               * subtracted at each clock tick. The drawbacks of a large
 -               * factor are a) that the clock register overflows more quickly
 -               * (not such a big deal) and b) that the increment per tick has
 -               * to fit into 24 bits.  As a result we need to use a shift of
 -               * 19 so we can fit a value of 16 into the TIMINCA register.
 -               */
 -              adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
 -              wr32(E1000_TIMINCA,
 -                              (1 << E1000_TIMINCA_16NS_SHIFT) |
 -                              (16 << IGB_82576_TSYNC_SHIFT));
 -
 -              /* Set registers so that rollover occurs soon to test this. */
 -              wr32(E1000_SYSTIML, 0x00000000);
 -              wr32(E1000_SYSTIMH, 0xFF800000);
 -              wrfl();
 -
 -              timecounter_init(&adapter->clock,
 -                               &adapter->cycles,
 -                               ktime_to_ns(ktime_get_real()));
 -              /*
 -               * Synchronize our NIC clock against system wall clock. NIC
 -               * time stamp reading requires ~3us per sample, each sample
 -               * was pretty stable even under load => only require 10
 -               * samples for each offset comparison.
 -               */
 -              memset(&adapter->compare, 0, sizeof(adapter->compare));
 -              adapter->compare.source = &adapter->clock;
 -              adapter->compare.target = ktime_get_real;
 -              adapter->compare.num_samples = 10;
 -              timecompare_update(&adapter->compare, 0);
 -              break;
 -      case e1000_82575:
 -              /* 82575 does not support timesync */
 -      default:
 -              break;
 -      }
 -
 -}
 -
  /**
   * igb_sw_init - Initialize general software structures (struct igb_adapter)
   * @adapter: board private structure to initialize
@@@ -2367,28 -2457,11 +2370,28 @@@ static int __devinit igb_sw_init(struc
                } else
                        adapter->vfs_allocated_count = max_vfs;
                break;
 +      case e1000_i210:
 +      case e1000_i211:
 +              adapter->vfs_allocated_count = 0;
 +              break;
        default:
                break;
        }
  #endif /* CONFIG_PCI_IOV */
 -      adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
 +      switch (hw->mac.type) {
 +      case e1000_i210:
 +              adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
 +                      num_online_cpus());
 +              break;
 +      case e1000_i211:
 +              adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
 +                      num_online_cpus());
 +              break;
 +      default:
 +              adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
 +              num_online_cpus());
 +              break;
 +      }
        /* i350 cannot do RSS and SR-IOV at the same time */
        if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
                adapter->rss_queues = 1;
        /* Explicitly disable IRQ since the NIC can be in any state. */
        igb_irq_disable(adapter);
  
 -      if (hw->mac.type == e1000_i350)
 +      if (hw->mac.type >= e1000_i350)
                adapter->flags &= ~IGB_FLAG_DMAC;
  
        set_bit(__IGB_DOWN, &adapter->state);
@@@ -2871,17 -2944,6 +2874,17 @@@ static void igb_setup_mrqc(struct igb_a
  
        /* Don't need to set TUOFL or IPOFL, they default to 1 */
        wr32(E1000_RXCSUM, rxcsum);
 +      /*
 +       * Generate RSS hash based on TCP port numbers and/or
 +       * IPv4/v6 src and dst addresses since UDP cannot be
 +       * hashed reliably due to IP fragmentation
 +       */
 +
 +      mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
 +             E1000_MRQC_RSS_FIELD_IPV4_TCP |
 +             E1000_MRQC_RSS_FIELD_IPV6 |
 +             E1000_MRQC_RSS_FIELD_IPV6_TCP |
 +             E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
  
        /* If VMDq is enabled then we set the appropriate mode for that, else
         * we default to RSS so that an RSS hash is calculated per packet even
                        wr32(E1000_VT_CTL, vtctl);
                }
                if (adapter->rss_queues > 1)
 -                      mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
 +                      mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
                else
 -                      mrqc = E1000_MRQC_ENABLE_VMDQ;
 +                      mrqc |= E1000_MRQC_ENABLE_VMDQ;
        } else {
 -              mrqc = E1000_MRQC_ENABLE_RSS_4Q;
 +              if (hw->mac.type != e1000_i211)
 +                      mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
        }
        igb_vmm_control(adapter);
  
 -      /*
 -       * Generate RSS hash based on TCP port numbers and/or
 -       * IPv4/v6 src and dst addresses since UDP cannot be
 -       * hashed reliably due to IP fragmentation
 -       */
 -      mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
 -              E1000_MRQC_RSS_FIELD_IPV4_TCP |
 -              E1000_MRQC_RSS_FIELD_IPV6 |
 -              E1000_MRQC_RSS_FIELD_IPV6_TCP |
 -              E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
 -
        wr32(E1000_MRQC, mrqc);
  }
  
@@@ -3507,7 -3579,7 +3510,7 @@@ static void igb_set_rx_mode(struct net_
         * we will have issues with VLAN tag stripping not being done for frames
         * that are only arriving because we are the default pool
         */
 -      if (hw->mac.type < e1000_82576)
 +      if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
                return;
  
        vmolr |= rd32(E1000_VMOLR(vfn)) &
@@@ -3604,7 -3676,7 +3607,7 @@@ static bool igb_thermal_sensor_event(st
        bool ret = false;
        u32 ctrl_ext, thstat;
  
 -      /* check for thermal sensor event on i350, copper only */
 +      /* check for thermal sensor event on i350 copper only */
        if (hw->mac.type == e1000_i350) {
                thstat = rd32(E1000_THSTAT);
                ctrl_ext = rd32(E1000_CTRL_EXT);
@@@ -5649,7 -5721,35 +5652,7 @@@ static int igb_poll(struct napi_struct 
        return 0;
  }
  
 -/**
 - * igb_systim_to_hwtstamp - convert system time value to hw timestamp
 - * @adapter: board private structure
 - * @shhwtstamps: timestamp structure to update
 - * @regval: unsigned 64bit system time value.
 - *
 - * We need to convert the system time value stored in the RX/TXSTMP registers
 - * into a hwtstamp which can be used by the upper level timestamping functions
 - */
 -static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
 -                                   struct skb_shared_hwtstamps *shhwtstamps,
 -                                   u64 regval)
 -{
 -      u64 ns;
 -
 -      /*
 -       * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
 -       * 24 to match clock shift we setup earlier.
 -       */
 -      if (adapter->hw.mac.type >= e1000_82580)
 -              regval <<= IGB_82580_TSYNC_SHIFT;
 -
 -      ns = timecounter_cyc2time(&adapter->clock, regval);
 -      timecompare_update(&adapter->compare, ns);
 -      memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
 -      shhwtstamps->hwtstamp = ns_to_ktime(ns);
 -      shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
 -}
 -
 +#ifdef CONFIG_IGB_PTP
  /**
   * igb_tx_hwtstamp - utility function which checks for TX time stamp
   * @q_vector: pointer to q_vector containing needed info
@@@ -5679,7 -5779,6 +5682,7 @@@ static void igb_tx_hwtstamp(struct igb_
        skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
  }
  
 +#endif
  /**
   * igb_clean_tx_irq - Reclaim resources after transmit completes
   * @q_vector: pointer to q_vector containing needed info
@@@ -5723,11 -5822,9 +5726,11 @@@ static bool igb_clean_tx_irq(struct igb
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
  
 +#ifdef CONFIG_IGB_PTP
                /* retrieve hardware timestamp */
                igb_tx_hwtstamp(q_vector, tx_buffer);
  
 +#endif
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
                tx_buffer->skb = NULL;
@@@ -5899,7 -5996,6 +5902,7 @@@ static inline void igb_rx_hash(struct i
                skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
  }
  
 +#ifdef CONFIG_IGB_PTP
  static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
                            union e1000_adv_rx_desc *rx_desc,
                            struct sk_buff *skb)
        igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
  }
  
 +#endif
  static void igb_rx_vlan(struct igb_ring *ring,
                        union e1000_adv_rx_desc *rx_desc,
                        struct sk_buff *skb)
@@@ -6050,9 -6145,7 +6053,9 @@@ static bool igb_clean_rx_irq(struct igb
                        goto next_desc;
                }
  
 +#ifdef CONFIG_IGB_PTP
                igb_rx_hwtstamp(q_vector, rx_desc, skb);
 +#endif
                igb_rx_hash(rx_ring, rx_desc, skb);
                igb_rx_checksum(rx_ring, rx_desc, skb);
                igb_rx_vlan(rx_ring, rx_desc, skb);
@@@ -6706,18 -6799,7 +6709,7 @@@ static int igb_resume(struct device *de
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
  
-       if (!rtnl_is_locked()) {
-               /*
-                * shut up ASSERT_RTNL() warning in
-                * netif_set_real_num_tx/rx_queues.
-                */
-               rtnl_lock();
-               err = igb_init_interrupt_scheme(adapter);
-               rtnl_unlock();
-       } else {
-               err = igb_init_interrupt_scheme(adapter);
-       }
-       if (err) {
+       if (igb_init_interrupt_scheme(adapter)) {
                dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
@@@ -7080,8 -7162,6 +7072,8 @@@ static void igb_vmm_control(struct igb_
  
        switch (hw->mac.type) {
        case e1000_82575:
 +      case e1000_i210:
 +      case e1000_i211:
        default:
                /* replication is not supported for 82575 */
                return;
index 9f3dbc4feadc4c24463060df977ed4e7a036e9c8,ba781747d17421d7a0fff044b74c79303e6a8fa3..b07311eaa69386d3cdbad86537da599b5dcaccb7
@@@ -584,7 -584,6 +584,6 @@@ struct pch_gbe_hw_stats 
  /**
   * struct pch_gbe_adapter - board specific private data structure
   * @stats_lock:       Spinlock structure for status
-  * @tx_queue_lock:    Spinlock structure for transmit
   * @ethtool_lock:     Spinlock structure for ethtool
   * @irq_sem:          Semaphore for interrupt
   * @netdev:           Pointer of network device structure
  
  struct pch_gbe_adapter {
        spinlock_t stats_lock;
-       spinlock_t tx_queue_lock;
        spinlock_t ethtool_lock;
        atomic_t irq_sem;
        struct net_device *netdev;
@@@ -660,7 -658,6 +658,7 @@@ extern u32 pch_src_uuid_lo_read(struct 
  extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
  extern u64 pch_rx_snap_read(struct pci_dev *pdev);
  extern u64 pch_tx_snap_read(struct pci_dev *pdev);
 +extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
  #endif
  
  /* pch_gbe_param.c */
index 9dc7e50236711364e0dbdf50a6f1c6b9ecbfbd32,1e38d502a06202d381b14d50964712896da4c67e..3787c64ee71cd1b4b7dfb269b7b3f0cb6e0cffc9
@@@ -79,6 -79,7 +79,6 @@@ const char pch_driver_version[] = DRV_V
  #define       PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
  #define       PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
  
 -#define PCH_GBE_ETH_ALEN            6
  
  /* This defines the bits that are set in the Interrupt Mask
   * Set/Read Register.  Each bit is documented below:
  
  #ifdef CONFIG_PCH_PTP
  /* Macros for ieee1588 */
 -#define TICKS_NS_SHIFT  5
 -
  /* 0x40 Time Synchronization Channel Control Register Bits */
  #define MASTER_MODE   (1<<0)
 -#define SLAVE_MODE    (0<<0)
 +#define SLAVE_MODE    (0)
  #define V2_MODE       (1<<31)
 -#define CAP_MODE0     (0<<16)
 +#define CAP_MODE0     (0)
  #define CAP_MODE2     (1<<17)
  
  /* 0x44 Time Synchronization Channel Event Register Bits */
  #define TX_SNAPSHOT_LOCKED (1<<0)
  #define RX_SNAPSHOT_LOCKED (1<<1)
 +
 +#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
 +#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
  #endif
  
  static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
  static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
  static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
                               int data);
 +static void pch_gbe_set_multi(struct net_device *netdev);
  
  #ifdef CONFIG_PCH_PTP
  static struct sock_filter ptp_filter[] = {
@@@ -134,8 -133,10 +134,8 @@@ static int pch_ptp_match(struct sk_buf
        u16 *hi, *id;
        u32 lo;
  
 -      if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
 -              (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
 +      if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
                return 0;
 -      }
  
        offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
  
                seqid  == *id);
  }
  
 -static void pch_rx_timestamp(
 -                      struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 +static void
 +pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
  {
        struct skb_shared_hwtstamps *shhwtstamps;
        struct pci_dev *pdev;
                goto out;
  
        ns = pch_rx_snap_read(pdev);
 -      ns <<= TICKS_NS_SHIFT;
  
        shhwtstamps = skb_hwtstamps(skb);
        memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@@ -190,8 -192,8 +190,8 @@@ out
        pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
  }
  
 -static void pch_tx_timestamp(
 -                      struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 +static void
 +pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
  {
        struct skb_shared_hwtstamps shhwtstamps;
        struct pci_dev *pdev;
        u32 cnt, val;
  
        shtx = skb_shinfo(skb);
 -      if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
 -              shtx->tx_flags |= SKBTX_IN_PROGRESS;
 -      else
 +      if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
                return;
  
 +      shtx->tx_flags |= SKBTX_IN_PROGRESS;
 +
        /* Get ieee1588's dev information */
        pdev = adapter->ptp_pdev;
  
        /*
         * This really stinks, but we have to poll for the Tx time stamp.
 -       * Usually, the time stamp is ready after 4 to 6 microseconds.
         */
        for (cnt = 0; cnt < 100; cnt++) {
                val = pch_ch_event_read(pdev);
        }
  
        ns = pch_tx_snap_read(pdev);
 -      ns <<= TICKS_NS_SHIFT;
  
        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
        shhwtstamps.hwtstamp = ns_to_ktime(ns);
@@@ -236,7 -240,6 +236,7 @@@ static int hwtstamp_ioctl(struct net_de
        struct hwtstamp_config cfg;
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev;
 +      u8 station[20];
  
        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
                return -EFAULT;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
                adapter->hwts_rx_en = 0;
 -              pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
 +              pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
                adapter->hwts_rx_en = 1;
 -              pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
 +              pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
 +              break;
 +      case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 +              adapter->hwts_rx_en = 1;
 +              pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
 +              strcpy(station, PTP_L4_MULTICAST_SA);
 +              pch_set_station_address(station, pdev);
                break;
 -      case HWTSTAMP_FILTER_PTP_V2_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
                adapter->hwts_rx_en = 1;
 -              pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
 +              pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
 +              strcpy(station, PTP_L2_MULTICAST_SA);
 +              pch_set_station_address(station, pdev);
                break;
        default:
                return -ERANGE;
@@@ -404,18 -399,18 +404,18 @@@ static void pch_gbe_mac_reset_hw(struc
        iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
  #endif
        pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
 -      /* Setup the receive address */
 +      /* Setup the receive addresses */
        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
        return;
  }
  
  static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
  {
 -      /* Read the MAC address. and store to the private data */
 +      /* Read the MAC addresses. and store to the private data */
        pch_gbe_mac_read_mac_addr(hw);
        iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
        pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
 -      /* Setup the MAC address */
 +      /* Setup the MAC addresses */
        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
        return;
  }
@@@ -465,7 -460,7 +465,7 @@@ static void pch_gbe_mac_mc_addr_list_up
                if (mc_addr_count) {
                        pch_gbe_mac_mar_set(hw, mc_addr_list, i);
                        mc_addr_count--;
 -                      mc_addr_list += PCH_GBE_ETH_ALEN;
 +                      mc_addr_list += ETH_ALEN;
                } else {
                        /* Clear MAC address mask */
                        adrmask = ioread32(&hw->reg->ADDR_MASK);
@@@ -645,14 -640,11 +645,11 @@@ static void pch_gbe_mac_set_pause_packe
   */
  static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
  {
-       int size;
-       size = (int)sizeof(struct pch_gbe_tx_ring);
-       adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+       adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
        if (!adapter->tx_ring)
                return -ENOMEM;
-       size = (int)sizeof(struct pch_gbe_rx_ring);
-       adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+       adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
        if (!adapter->rx_ring) {
                kfree(adapter->tx_ring);
                return -ENOMEM;
@@@ -783,8 -775,6 +780,8 @@@ void pch_gbe_reinit_locked(struct pch_g
  void pch_gbe_reset(struct pch_gbe_adapter *adapter)
  {
        pch_gbe_mac_reset_hw(&adapter->hw);
 +      /* reprogram multicast address register after reset */
 +      pch_gbe_set_multi(adapter->netdev);
        /* Setup the receive address. */
        pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
        if (pch_gbe_hal_init_hw(&adapter->hw))
@@@ -1169,7 -1159,6 +1166,6 @@@ static void pch_gbe_tx_queue(struct pch
        struct sk_buff *tmp_skb;
        unsigned int frame_ctrl;
        unsigned int ring_num;
-       unsigned long flags;
  
        /*-- Set frame control --*/
        frame_ctrl = 0;
                if (skb->protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        unsigned int offset;
 -                      iph->check = 0;
 -                      iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
                        offset = skb_transport_offset(skb);
                        if (iph->protocol == IPPROTO_TCP) {
                                skb->csum = 0;
                        }
                }
        }
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
        ring_num = tx_ring->next_to_use;
        if (unlikely((ring_num + 1) == tx_ring->count))
                tx_ring->next_to_use = 0;
        else
                tx_ring->next_to_use = ring_num + 1;
  
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        buffer_info = &tx_ring->buffer_info[ring_num];
        tmp_skb = buffer_info->skb;
  
@@@ -1347,8 -1338,6 +1343,8 @@@ static void pch_gbe_stop_receive(struc
                /* Stop Receive */
                pch_gbe_mac_reset_rx(hw);
        }
 +      /* reprogram multicast address register after reset */
 +      pch_gbe_set_multi(adapter->netdev);
  }
  
  static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
@@@ -1525,7 -1514,7 +1521,7 @@@ pch_gbe_alloc_rx_buffers_pool(struct pc
                                                &rx_ring->rx_buff_pool_logic,
                                                GFP_KERNEL);
        if (!rx_ring->rx_buff_pool) {
-               pr_err("Unable to allocate memory for the receive poll buffer\n");
+               pr_err("Unable to allocate memory for the receive pool buffer\n");
                return -ENOMEM;
        }
        memset(rx_ring->rx_buff_pool, 0, size);
@@@ -1644,15 -1633,17 +1640,17 @@@ pch_gbe_clean_tx(struct pch_gbe_adapte
        pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
                 cleaned_count);
        /* Recover from running out of Tx resources in xmit_frame */
+       spin_lock(&tx_ring->tx_lock);
        if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
                netif_wake_queue(adapter->netdev);
                adapter->stats.tx_restart_count++;
                pr_debug("Tx wake queue\n");
        }
-       spin_lock(&adapter->tx_queue_lock);
        tx_ring->next_to_clean = i;
-       spin_unlock(&adapter->tx_queue_lock);
        pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+       spin_unlock(&tx_ring->tx_lock);
        return cleaned;
  }
  
@@@ -1931,6 -1922,7 +1929,6 @@@ static int pch_gbe_request_irq(struct p
  }
  
  
 -static void pch_gbe_set_multi(struct net_device *netdev);
  /**
   * pch_gbe_up - Up GbE network device
   * @adapter:  Board private structure
@@@ -2043,7 -2035,6 +2041,6 @@@ static int pch_gbe_sw_init(struct pch_g
                return -ENOMEM;
        }
        spin_lock_init(&adapter->hw.miim_lock);
-       spin_lock_init(&adapter->tx_queue_lock);
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->ethtool_lock);
        atomic_set(&adapter->irq_sem, 0);
@@@ -2148,10 -2139,10 +2145,10 @@@ static int pch_gbe_xmit_frame(struct sk
                         tx_ring->next_to_use, tx_ring->next_to_clean);
                return NETDEV_TX_BUSY;
        }
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  
        /* CRC,ITAG no support */
        pch_gbe_tx_queue(adapter, tx_ring, skb);
+       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        return NETDEV_TX_OK;
  }
  
index 00628d84342f1e1196e73bd9fe0ae14465c068c1,ce6b44d1f2529ed82fe61e66582657a671a5385a..4f74b9762c296b81313660b571ff9774a8535ea1
@@@ -44,8 -44,6 +44,8 @@@
  #define FIRMWARE_8168F_1      "rtl_nic/rtl8168f-1.fw"
  #define FIRMWARE_8168F_2      "rtl_nic/rtl8168f-2.fw"
  #define FIRMWARE_8105E_1      "rtl_nic/rtl8105e-1.fw"
 +#define FIRMWARE_8402_1               "rtl_nic/rtl8402-1.fw"
 +#define FIRMWARE_8411_1               "rtl_nic/rtl8411-1.fw"
  
  #ifdef RTL8169_DEBUG
  #define assert(expr) \
  #define R8169_MSG_DEFAULT \
        (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
  
- #define TX_BUFFS_AVAIL(tp) \
-       (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+ #define TX_SLOTS_AVAIL(tp) \
+       (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
+ /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+ #define TX_FRAGS_READY_FOR(tp,nr_frags) \
+       (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
  
  /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
     The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@@ -135,8 -137,6 +139,8 @@@ enum mac_version 
        RTL_GIGA_MAC_VER_34,
        RTL_GIGA_MAC_VER_35,
        RTL_GIGA_MAC_VER_36,
 +      RTL_GIGA_MAC_VER_37,
 +      RTL_GIGA_MAC_VER_38,
        RTL_GIGA_MAC_NONE   = 0xff,
  };
  
@@@ -249,12 -249,6 +253,12 @@@ static const struct 
        [RTL_GIGA_MAC_VER_36] =
                _R("RTL8168f/8111f",    RTL_TD_1, FIRMWARE_8168F_2,
                                                        JUMBO_9K, false),
 +      [RTL_GIGA_MAC_VER_37] =
 +              _R("RTL8402",           RTL_TD_1, FIRMWARE_8402_1,
 +                                                      JUMBO_1K, true),
 +      [RTL_GIGA_MAC_VER_38] =
 +              _R("RTL8411",           RTL_TD_1, FIRMWARE_8411_1,
 +                                                      JUMBO_9K, false),
  };
  #undef _R
  
@@@ -325,8 -319,6 +329,8 @@@ enum rtl_registers 
        Config0         = 0x51,
        Config1         = 0x52,
        Config2         = 0x53,
 +#define PME_SIGNAL                    (1 << 5)        /* 8168c and later */
 +
        Config3         = 0x54,
        Config4         = 0x55,
        Config5         = 0x56,
@@@ -367,9 -359,6 +371,9 @@@ enum rtl8168_8101_registers 
  #define       CSIAR_BYTE_ENABLE               0x0f
  #define       CSIAR_BYTE_ENABLE_SHIFT         12
  #define       CSIAR_ADDR_MASK                 0x0fff
 +#define CSIAR_FUNC_CARD                       0x00000000
 +#define CSIAR_FUNC_SDIO                       0x00010000
 +#define CSIAR_FUNC_NIC                        0x00020000
        PMCH                    = 0x6f,
        EPHYAR                  = 0x80,
  #define       EPHYAR_FLAG                     0x80000000
@@@ -731,11 -720,6 +735,11 @@@ struct rtl8169_private 
                void (*disable)(struct rtl8169_private *);
        } jumbo_ops;
  
 +      struct csi_ops {
 +              void (*write)(void __iomem *, int, int);
 +              u32 (*read)(void __iomem *, int);
 +      } csi_ops;
 +
        int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
        int (*get_settings)(struct net_device *, struct ethtool_cmd *);
        void (*phy_reset_enable)(struct rtl8169_private *tp);
@@@ -788,8 -772,6 +792,8 @@@ MODULE_FIRMWARE(FIRMWARE_8168E_3)
  MODULE_FIRMWARE(FIRMWARE_8105E_1);
  MODULE_FIRMWARE(FIRMWARE_8168F_1);
  MODULE_FIRMWARE(FIRMWARE_8168F_2);
 +MODULE_FIRMWARE(FIRMWARE_8402_1);
 +MODULE_FIRMWARE(FIRMWARE_8411_1);
  
  static void rtl_lock_work(struct rtl8169_private *tp)
  {
@@@ -1100,6 -1082,40 +1104,6 @@@ static u16 rtl_ephy_read(void __iomem *
        return value;
  }
  
 -static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
 -{
 -      unsigned int i;
 -
 -      RTL_W32(CSIDR, value);
 -      RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
 -              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 -
 -      for (i = 0; i < 100; i++) {
 -              if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
 -                      break;
 -              udelay(10);
 -      }
 -}
 -
 -static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
 -{
 -      u32 value = ~0x00;
 -      unsigned int i;
 -
 -      RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
 -              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 -
 -      for (i = 0; i < 100; i++) {
 -              if (RTL_R32(CSIAR) & CSIAR_FLAG) {
 -                      value = RTL_R32(CSIDR);
 -                      break;
 -              }
 -              udelay(10);
 -      }
 -
 -      return value;
 -}
 -
  static
  void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
  {
@@@ -1269,8 -1285,7 +1273,8 @@@ static void rtl_link_chg_patch(struct r
        if (!netif_running(dev))
                return;
  
 -      if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
 +      if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
 +          tp->mac_version == RTL_GIGA_MAC_VER_38) {
                if (RTL_R8(PHYstatus) & _1000bpsF) {
                        rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
                                      0x00000011, ERIAR_EXGMAC);
                        rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
                                      0x0000003f, ERIAR_EXGMAC);
                }
 +      } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
 +              if (RTL_R8(PHYstatus) & _10bps) {
 +                      rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
 +                                    0x4d02, ERIAR_EXGMAC);
 +                      rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
 +                                    0x0060, ERIAR_EXGMAC);
 +              } else {
 +                      rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
 +                                    0x0000, ERIAR_EXGMAC);
 +              }
        }
  }
  
@@@ -1395,6 -1400,7 +1399,6 @@@ static void __rtl8169_set_wol(struct rt
                u16 reg;
                u8  mask;
        } cfg[] = {
 -              { WAKE_ANY,   Config1, PMEnable },
                { WAKE_PHY,   Config3, LinkUp },
                { WAKE_MAGIC, Config3, MagicPacket },
                { WAKE_UCAST, Config5, UWF },
                { WAKE_MCAST, Config5, MWF },
                { WAKE_ANY,   Config5, LanWake }
        };
 +      u8 options;
  
        RTL_W8(Cfg9346, Cfg9346_Unlock);
  
        for (i = 0; i < ARRAY_SIZE(cfg); i++) {
 -              u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
 +              options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
                if (wolopts & cfg[i].opt)
                        options |= cfg[i].mask;
                RTL_W8(cfg[i].reg, options);
        }
  
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
 +              options = RTL_R8(Config1) & ~PMEnable;
 +              if (wolopts)
 +                      options |= PMEnable;
 +              RTL_W8(Config1, options);
 +              break;
 +      default:
 +              options = RTL_R8(Config2) & ~PME_SIGNAL;
 +              if (wolopts)
 +                      options |= PME_SIGNAL;
 +              RTL_W8(Config2, options);
 +              break;
 +      }
 +
        RTL_W8(Cfg9346, Cfg9346_Lock);
  }
  
@@@ -1867,7 -1857,6 +1871,7 @@@ static const struct ethtool_ops rtl8169
        .get_strings            = rtl8169_get_strings,
        .get_sset_count         = rtl8169_get_sset_count,
        .get_ethtool_stats      = rtl8169_get_ethtool_stats,
 +      .get_ts_info            = ethtool_op_get_ts_info,
  };
  
  static void rtl8169_get_mac_version(struct rtl8169_private *tp,
                int mac_version;
        } mac_info[] = {
                /* 8168F family. */
 +              { 0x7c800000, 0x48800000,       RTL_GIGA_MAC_VER_38 },
                { 0x7cf00000, 0x48100000,       RTL_GIGA_MAC_VER_36 },
                { 0x7cf00000, 0x48000000,       RTL_GIGA_MAC_VER_35 },
  
                { 0x7c800000, 0x30000000,       RTL_GIGA_MAC_VER_11 },
  
                /* 8101 family. */
 +              { 0x7c800000, 0x44000000,       RTL_GIGA_MAC_VER_37 },
                { 0x7cf00000, 0x40b00000,       RTL_GIGA_MAC_VER_30 },
                { 0x7cf00000, 0x40a00000,       RTL_GIGA_MAC_VER_30 },
                { 0x7cf00000, 0x40900000,       RTL_GIGA_MAC_VER_29 },
@@@ -3030,28 -3017,6 +3034,28 @@@ static void rtl8168e_2_hw_phy_config(st
        rtl_writephy(tp, 0x1f, 0x0000);
  }
  
 +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      /* For 4-corner performance improve */
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b80);
 +      rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* PHY auto speed down */
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x002d);
 +      rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
 +
 +      /* Improve 10M EEE waveform */
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b86);
 +      rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
  static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
  {
        static const struct phy_reg phy_reg_init[] = {
  
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
  
 -      /* For 4-corner performance improve */
 -      rtl_writephy(tp, 0x1f, 0x0005);
 -      rtl_writephy(tp, 0x05, 0x8b80);
 -      rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
 -      rtl_writephy(tp, 0x1f, 0x0000);
 -
 -      /* PHY auto speed down */
 -      rtl_writephy(tp, 0x1f, 0x0007);
 -      rtl_writephy(tp, 0x1e, 0x002d);
 -      rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
 -      rtl_writephy(tp, 0x1f, 0x0000);
 -      rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
 -
 -      /* Improve 10M EEE waveform */
 -      rtl_writephy(tp, 0x1f, 0x0005);
 -      rtl_writephy(tp, 0x05, 0x8b86);
 -      rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
 -      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl8168f_hw_phy_config(tp);
  
        /* Improve 2-pair detection performance */
        rtl_writephy(tp, 0x1f, 0x0005);
@@@ -3106,104 -3088,23 +3110,104 @@@ static void rtl8168f_2_hw_phy_config(st
  {
        rtl_apply_firmware(tp);
  
 -      /* For 4-corner performance improve */
 +      rtl8168f_hw_phy_config(tp);
 +}
 +
 +static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      static const struct phy_reg phy_reg_init[] = {
 +              /* Channel estimation fine tune */
 +              { 0x1f, 0x0003 },
 +              { 0x09, 0xa20f },
 +              { 0x1f, 0x0000 },
 +
 +              /* Modify green table for giga & fnet */
 +              { 0x1f, 0x0005 },
 +              { 0x05, 0x8b55 },
 +              { 0x06, 0x0000 },
 +              { 0x05, 0x8b5e },
 +              { 0x06, 0x0000 },
 +              { 0x05, 0x8b67 },
 +              { 0x06, 0x0000 },
 +              { 0x05, 0x8b70 },
 +              { 0x06, 0x0000 },
 +              { 0x1f, 0x0000 },
 +              { 0x1f, 0x0007 },
 +              { 0x1e, 0x0078 },
 +              { 0x17, 0x0000 },
 +              { 0x19, 0x00aa },
 +              { 0x1f, 0x0000 },
 +
 +              /* Modify green table for 10M */
 +              { 0x1f, 0x0005 },
 +              { 0x05, 0x8b79 },
 +              { 0x06, 0xaa00 },
 +              { 0x1f, 0x0000 },
 +
 +              /* Disable hiimpedance detection (RTCT) */
 +              { 0x1f, 0x0003 },
 +              { 0x01, 0x328a },
 +              { 0x1f, 0x0000 }
 +      };
 +
 +
 +      rtl_apply_firmware(tp);
 +
 +      rtl8168f_hw_phy_config(tp);
 +
 +      /* Improve 2-pair detection performance */
        rtl_writephy(tp, 0x1f, 0x0005);
 -      rtl_writephy(tp, 0x05, 0x8b80);
 -      rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
 +      rtl_writephy(tp, 0x05, 0x8b85);
 +      rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
        rtl_writephy(tp, 0x1f, 0x0000);
  
 -      /* PHY auto speed down */
 -      rtl_writephy(tp, 0x1f, 0x0007);
 -      rtl_writephy(tp, 0x1e, 0x002d);
 -      rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
 +      rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 +
 +      /* Modify green table for giga */
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b54);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
 +      rtl_writephy(tp, 0x05, 0x8b5d);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
 +      rtl_writephy(tp, 0x05, 0x8a7c);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
 +      rtl_writephy(tp, 0x05, 0x8a7f);
 +      rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
 +      rtl_writephy(tp, 0x05, 0x8a82);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
 +      rtl_writephy(tp, 0x05, 0x8a85);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
 +      rtl_writephy(tp, 0x05, 0x8a88);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
        rtl_writephy(tp, 0x1f, 0x0000);
 -      rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
  
 -      /* Improve 10M EEE waveform */
 +      /* uc same-seed solution */
        rtl_writephy(tp, 0x1f, 0x0005);
 -      rtl_writephy(tp, 0x05, 0x8b86);
 -      rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
 +      rtl_writephy(tp, 0x05, 0x8b85);
 +      rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +
 +      /* eee setting */
 +      rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
 +      rtl_writephy(tp, 0x1f, 0x0005);
 +      rtl_writephy(tp, 0x05, 0x8b85);
 +      rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
 +      rtl_writephy(tp, 0x1f, 0x0004);
 +      rtl_writephy(tp, 0x1f, 0x0007);
 +      rtl_writephy(tp, 0x1e, 0x0020);
 +      rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, 0x0d, 0x0007);
 +      rtl_writephy(tp, 0x0e, 0x003c);
 +      rtl_writephy(tp, 0x0d, 0x4007);
 +      rtl_writephy(tp, 0x0e, 0x0000);
 +      rtl_writephy(tp, 0x0d, 0x0000);
 +
 +      /* Green feature */
 +      rtl_writephy(tp, 0x1f, 0x0003);
 +      rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
 +      rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
        rtl_writephy(tp, 0x1f, 0x0000);
  }
  
@@@ -3250,25 -3151,6 +3254,25 @@@ static void rtl8105e_hw_phy_config(stru
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
  }
  
 +static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      /* Disable ALDPS before setting firmware */
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +      rtl_writephy(tp, 0x18, 0x0310);
 +      msleep(20);
 +
 +      rtl_apply_firmware(tp);
 +
 +      /* EEE setting */
 +      rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
 +      rtl_writephy(tp, 0x1f, 0x0004);
 +      rtl_writephy(tp, 0x10, 0x401f);
 +      rtl_writephy(tp, 0x19, 0x7030);
 +      rtl_writephy(tp, 0x1f, 0x0000);
 +}
 +
  static void rtl_hw_phy_config(struct net_device *dev)
  {
        struct rtl8169_private *tp = netdev_priv(dev);
                rtl8168f_2_hw_phy_config(tp);
                break;
  
 +      case RTL_GIGA_MAC_VER_37:
 +              rtl8402_hw_phy_config(tp);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_38:
 +              rtl8411_hw_phy_config(tp);
 +              break;
 +
        default:
                break;
        }
@@@ -3602,8 -3476,6 +3606,8 @@@ static void rtl_wol_suspend_quirk(struc
        case RTL_GIGA_MAC_VER_32:
        case RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_34:
 +      case RTL_GIGA_MAC_VER_37:
 +      case RTL_GIGA_MAC_VER_38:
                RTL_W32(RxConfig, RTL_R32(RxConfig) |
                        AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
                break;
@@@ -3639,45 -3511,15 +3643,45 @@@ static void r810x_phy_power_up(struct r
  
  static void r810x_pll_power_down(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
        if (rtl_wol_pll_power_down(tp))
                return;
  
        r810x_phy_power_down(tp);
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_07:
 +      case RTL_GIGA_MAC_VER_08:
 +      case RTL_GIGA_MAC_VER_09:
 +      case RTL_GIGA_MAC_VER_10:
 +      case RTL_GIGA_MAC_VER_13:
 +      case RTL_GIGA_MAC_VER_16:
 +              break;
 +      default:
 +              RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
 +              break;
 +      }
  }
  
  static void r810x_pll_power_up(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
        r810x_phy_power_up(tp);
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_07:
 +      case RTL_GIGA_MAC_VER_08:
 +      case RTL_GIGA_MAC_VER_09:
 +      case RTL_GIGA_MAC_VER_10:
 +      case RTL_GIGA_MAC_VER_13:
 +      case RTL_GIGA_MAC_VER_16:
 +              break;
 +      default:
 +              RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
 +              break;
 +      }
  }
  
  static void r8168_phy_power_up(struct rtl8169_private *tp)
@@@ -3781,6 -3623,13 +3785,6 @@@ static void r8168_pll_power_up(struct r
  {
        void __iomem *ioaddr = tp->mmio_addr;
  
 -      if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
 -           tp->mac_version == RTL_GIGA_MAC_VER_28 ||
 -           tp->mac_version == RTL_GIGA_MAC_VER_31) &&
 -          r8168dp_check_dash(tp)) {
 -              return;
 -      }
 -
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25:
        case RTL_GIGA_MAC_VER_26:
@@@ -3825,7 -3674,6 +3829,7 @@@ static void __devinit rtl_init_pll_powe
        case RTL_GIGA_MAC_VER_16:
        case RTL_GIGA_MAC_VER_29:
        case RTL_GIGA_MAC_VER_30:
 +      case RTL_GIGA_MAC_VER_37:
                ops->down       = r810x_pll_power_down;
                ops->up         = r810x_pll_power_up;
                break;
        case RTL_GIGA_MAC_VER_34:
        case RTL_GIGA_MAC_VER_35:
        case RTL_GIGA_MAC_VER_36:
 +      case RTL_GIGA_MAC_VER_38:
                ops->down       = r8168_pll_power_down;
                ops->up         = r8168_pll_power_up;
                break;
@@@ -4136,9 -3983,7 +4140,9 @@@ static void rtl8169_hw_reset(struct rtl
                        udelay(20);
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
                   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
 -                 tp->mac_version == RTL_GIGA_MAC_VER_36) {
 +                 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
 +                 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
 +                 tp->mac_version == RTL_GIGA_MAC_VER_38) {
                RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
                while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
                        udelay(100);
@@@ -4344,141 -4189,22 +4348,141 @@@ static void rtl_hw_start_8169(struct ne
        RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
  }
  
 -static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
 +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
 +{
 +      if (tp->csi_ops.write)
 +              tp->csi_ops.write(tp->mmio_addr, addr, value);
 +}
 +
 +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
 +{
 +      if (tp->csi_ops.read)
 +              return tp->csi_ops.read(tp->mmio_addr, addr);
 +      else
 +              return ~0;
 +}
 +
 +static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
  {
        u32 csi;
  
 -      csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
 -      rtl_csi_write(ioaddr, 0x070c, csi | bits);
 +      csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
 +      rtl_csi_write(tp, 0x070c, csi | bits);
 +}
 +
 +static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
 +{
 +      rtl_csi_access_enable(tp, 0x17000000);
 +}
 +
 +static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
 +{
 +      rtl_csi_access_enable(tp, 0x27000000);
 +}
 +
 +static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
 +{
 +      unsigned int i;
 +
 +      RTL_W32(CSIDR, value);
 +      RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
 +              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
 +                      break;
 +              udelay(10);
 +      }
 +}
 +
 +static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
 +{
 +      u32 value = ~0x00;
 +      unsigned int i;
 +
 +      RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
 +              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (RTL_R32(CSIAR) & CSIAR_FLAG) {
 +                      value = RTL_R32(CSIDR);
 +                      break;
 +              }
 +              udelay(10);
 +      }
 +
 +      return value;
 +}
 +
 +static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
 +{
 +      unsigned int i;
 +
 +      RTL_W32(CSIDR, value);
 +      RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
 +              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
 +              CSIAR_FUNC_NIC);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
 +                      break;
 +              udelay(10);
 +      }
  }
  
 -static void rtl_csi_access_enable_1(void __iomem *ioaddr)
 +static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
  {
 -      rtl_csi_access_enable(ioaddr, 0x17000000);
 +      u32 value = ~0x00;
 +      unsigned int i;
 +
 +      RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
 +              CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
 +
 +      for (i = 0; i < 100; i++) {
 +              if (RTL_R32(CSIAR) & CSIAR_FLAG) {
 +                      value = RTL_R32(CSIDR);
 +                      break;
 +              }
 +              udelay(10);
 +      }
 +
 +      return value;
  }
  
 -static void rtl_csi_access_enable_2(void __iomem *ioaddr)
 +static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable(ioaddr, 0x27000000);
 +      struct csi_ops *ops = &tp->csi_ops;
 +
 +      switch (tp->mac_version) {
 +      case RTL_GIGA_MAC_VER_01:
 +      case RTL_GIGA_MAC_VER_02:
 +      case RTL_GIGA_MAC_VER_03:
 +      case RTL_GIGA_MAC_VER_04:
 +      case RTL_GIGA_MAC_VER_05:
 +      case RTL_GIGA_MAC_VER_06:
 +      case RTL_GIGA_MAC_VER_10:
 +      case RTL_GIGA_MAC_VER_11:
 +      case RTL_GIGA_MAC_VER_12:
 +      case RTL_GIGA_MAC_VER_13:
 +      case RTL_GIGA_MAC_VER_14:
 +      case RTL_GIGA_MAC_VER_15:
 +      case RTL_GIGA_MAC_VER_16:
 +      case RTL_GIGA_MAC_VER_17:
 +              ops->write      = NULL;
 +              ops->read       = NULL;
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_37:
 +      case RTL_GIGA_MAC_VER_38:
 +              ops->write      = r8402_csi_write;
 +              ops->read       = r8402_csi_read;
 +              break;
 +
 +      default:
 +              ops->write      = r8169_csi_write;
 +              ops->read       = r8169_csi_read;
 +              break;
 +      }
  }
  
  struct ephy_info {
@@@ -4535,11 -4261,8 +4539,11 @@@ static void rtl_enable_clock_request(st
        PktCntrDisable | \
        Mac_dbgo_sel)
  
 -static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
  
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
                (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
  }
  
 -static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
  {
 -      rtl_hw_start_8168bb(ioaddr, pdev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      rtl_hw_start_8168bb(tp);
  
        RTL_W8(MaxTxPacketSize, TxPacketMax);
  
        RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
  }
  
 -static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
        RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
  
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
  }
  
 -static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
        static const struct ephy_info e_info_8168cp[] = {
                { 0x01, 0,      0x0001 },
                { 0x02, 0x0800, 0x1000 },
                { 0x07, 0,      0x2000 }
        };
  
 -      rtl_csi_access_enable_2(ioaddr);
 +      rtl_csi_access_enable_2(tp);
  
        rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
  
 -      __rtl_hw_start_8168cp(ioaddr, pdev);
 +      __rtl_hw_start_8168cp(tp);
  }
  
 -static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable_2(ioaddr);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      rtl_csi_access_enable_2(tp);
  
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
  
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
  }
  
 -static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable_2(ioaddr);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      rtl_csi_access_enable_2(tp);
  
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
  
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
  }
  
 -static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
        static const struct ephy_info e_info_8168c_1[] = {
                { 0x02, 0x0800, 0x1000 },
                { 0x03, 0,      0x0002 },
                { 0x06, 0x0080, 0x0000 }
        };
  
 -      rtl_csi_access_enable_2(ioaddr);
 +      rtl_csi_access_enable_2(tp);
  
        RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
  
        rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
  
 -      __rtl_hw_start_8168cp(ioaddr, pdev);
 +      __rtl_hw_start_8168cp(tp);
  }
  
 -static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
        static const struct ephy_info e_info_8168c_2[] = {
                { 0x01, 0,      0x0001 },
                { 0x03, 0x0400, 0x0220 }
        };
  
 -      rtl_csi_access_enable_2(ioaddr);
 +      rtl_csi_access_enable_2(tp);
  
        rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
  
 -      __rtl_hw_start_8168cp(ioaddr, pdev);
 +      __rtl_hw_start_8168cp(tp);
  }
  
 -static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
  {
 -      rtl_hw_start_8168c_2(ioaddr, pdev);
 +      rtl_hw_start_8168c_2(tp);
  }
  
 -static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable_2(ioaddr);
 +      rtl_csi_access_enable_2(tp);
  
 -      __rtl_hw_start_8168cp(ioaddr, pdev);
 +      __rtl_hw_start_8168cp(tp);
  }
  
 -static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168d(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable_2(ioaddr);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      rtl_csi_access_enable_2(tp);
  
        rtl_disable_clock_request(pdev);
  
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
  }
  
 -static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable_1(ioaddr);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      rtl_csi_access_enable_1(tp);
  
        rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
  
        rtl_disable_clock_request(pdev);
  }
  
 -static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
        static const struct ephy_info e_info_8168d_4[] = {
                { 0x0b, ~0,     0x48 },
                { 0x19, 0x20,   0x50 },
        };
        int i;
  
 -      rtl_csi_access_enable_1(ioaddr);
 +      rtl_csi_access_enable_1(tp);
  
        rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
  
        rtl_enable_clock_request(pdev);
  }
  
 -static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
        static const struct ephy_info e_info_8168e_1[] = {
                { 0x00, 0x0200, 0x0100 },
                { 0x00, 0x0000, 0x0004 },
                { 0x0a, 0x0000, 0x0040 }
        };
  
 -      rtl_csi_access_enable_2(ioaddr);
 +      rtl_csi_access_enable_2(tp);
  
        rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
  
        RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
  }
  
 -static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
        static const struct ephy_info e_info_8168e_2[] = {
                { 0x09, 0x0000, 0x0080 },
                { 0x19, 0x0000, 0x0224 }
        };
  
 -      rtl_csi_access_enable_1(ioaddr);
 +      rtl_csi_access_enable_1(tp);
  
        rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
  
        RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
  }
  
 -static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8168f(struct rtl8169_private *tp)
  {
 -      static const struct ephy_info e_info_8168f_1[] = {
 -              { 0x06, 0x00c0, 0x0020 },
 -              { 0x08, 0x0001, 0x0002 },
 -              { 0x09, 0x0000, 0x0080 },
 -              { 0x19, 0x0000, 0x0224 }
 -      };
 -
 -      rtl_csi_access_enable_1(ioaddr);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
  
 -      rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
 +      rtl_csi_access_enable_2(tp);
  
        rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
  
        rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
        rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
        rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
 -      rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
 -                   ERIAR_EXGMAC);
  
        RTL_W8(MaxTxPacketSize, EarlySize);
  
  
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 +      RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 +      RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
 +      RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 +}
 +
 +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      static const struct ephy_info e_info_8168f_1[] = {
 +              { 0x06, 0x00c0, 0x0020 },
 +              { 0x08, 0x0001, 0x0002 },
 +              { 0x09, 0x0000, 0x0080 },
 +              { 0x19, 0x0000, 0x0224 }
 +      };
 +
 +      rtl_hw_start_8168f(tp);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
 +
 +      rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
 +                   ERIAR_EXGMAC);
  
        /* Adjust EEE LED frequency */
        RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
 +}
  
 -      RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 -      RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
 -      RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 +static void rtl_hw_start_8411(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      static const struct ephy_info e_info_8168f_1[] = {
 +              { 0x06, 0x00c0, 0x0020 },
 +              { 0x0f, 0xffff, 0x5200 },
 +              { 0x1e, 0x0000, 0x4000 },
 +              { 0x19, 0x0000, 0x0224 }
 +      };
 +
 +      rtl_hw_start_8168f(tp);
 +
 +      rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
 +
 +      rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
 +                   ERIAR_EXGMAC);
  }
  
  static void rtl_hw_start_8168(struct net_device *dev)
  {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
 -      struct pci_dev *pdev = tp->pci_dev;
  
        RTL_W8(Cfg9346, Cfg9346_Unlock);
  
  
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_11:
 -              rtl_hw_start_8168bb(ioaddr, pdev);
 +              rtl_hw_start_8168bb(tp);
                break;
  
        case RTL_GIGA_MAC_VER_12:
        case RTL_GIGA_MAC_VER_17:
 -              rtl_hw_start_8168bef(ioaddr, pdev);
 +              rtl_hw_start_8168bef(tp);
                break;
  
        case RTL_GIGA_MAC_VER_18:
 -              rtl_hw_start_8168cp_1(ioaddr, pdev);
 +              rtl_hw_start_8168cp_1(tp);
                break;
  
        case RTL_GIGA_MAC_VER_19:
 -              rtl_hw_start_8168c_1(ioaddr, pdev);
 +              rtl_hw_start_8168c_1(tp);
                break;
  
        case RTL_GIGA_MAC_VER_20:
 -              rtl_hw_start_8168c_2(ioaddr, pdev);
 +              rtl_hw_start_8168c_2(tp);
                break;
  
        case RTL_GIGA_MAC_VER_21:
 -              rtl_hw_start_8168c_3(ioaddr, pdev);
 +              rtl_hw_start_8168c_3(tp);
                break;
  
        case RTL_GIGA_MAC_VER_22:
 -              rtl_hw_start_8168c_4(ioaddr, pdev);
 +              rtl_hw_start_8168c_4(tp);
                break;
  
        case RTL_GIGA_MAC_VER_23:
 -              rtl_hw_start_8168cp_2(ioaddr, pdev);
 +              rtl_hw_start_8168cp_2(tp);
                break;
  
        case RTL_GIGA_MAC_VER_24:
 -              rtl_hw_start_8168cp_3(ioaddr, pdev);
 +              rtl_hw_start_8168cp_3(tp);
                break;
  
        case RTL_GIGA_MAC_VER_25:
        case RTL_GIGA_MAC_VER_26:
        case RTL_GIGA_MAC_VER_27:
 -              rtl_hw_start_8168d(ioaddr, pdev);
 +              rtl_hw_start_8168d(tp);
                break;
  
        case RTL_GIGA_MAC_VER_28:
 -              rtl_hw_start_8168d_4(ioaddr, pdev);
 +              rtl_hw_start_8168d_4(tp);
                break;
  
        case RTL_GIGA_MAC_VER_31:
 -              rtl_hw_start_8168dp(ioaddr, pdev);
 +              rtl_hw_start_8168dp(tp);
                break;
  
        case RTL_GIGA_MAC_VER_32:
        case RTL_GIGA_MAC_VER_33:
 -              rtl_hw_start_8168e_1(ioaddr, pdev);
 +              rtl_hw_start_8168e_1(tp);
                break;
        case RTL_GIGA_MAC_VER_34:
 -              rtl_hw_start_8168e_2(ioaddr, pdev);
 +              rtl_hw_start_8168e_2(tp);
                break;
  
        case RTL_GIGA_MAC_VER_35:
        case RTL_GIGA_MAC_VER_36:
 -              rtl_hw_start_8168f_1(ioaddr, pdev);
 +              rtl_hw_start_8168f_1(tp);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_38:
 +              rtl_hw_start_8411(tp);
                break;
  
        default:
        PktCntrDisable | \
        Mac_dbgo_sel)
  
 -static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
        static const struct ephy_info e_info_8102e_1[] = {
                { 0x01, 0, 0x6e65 },
                { 0x02, 0, 0x091f },
        };
        u8 cfg1;
  
 -      rtl_csi_access_enable_2(ioaddr);
 +      rtl_csi_access_enable_2(tp);
  
        RTL_W8(DBG_REG, FIX_NAK_1);
  
        rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
  }
  
 -static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
  {
 -      rtl_csi_access_enable_2(ioaddr);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      struct pci_dev *pdev = tp->pci_dev;
 +
 +      rtl_csi_access_enable_2(tp);
  
        rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
  
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
  }
  
 -static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
  {
 -      rtl_hw_start_8102e_2(ioaddr, pdev);
 +      rtl_hw_start_8102e_2(tp);
  
 -      rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
 +      rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
  }
  
 -static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
  {
 +      void __iomem *ioaddr = tp->mmio_addr;
        static const struct ephy_info e_info_8105e_1[] = {
                { 0x07, 0, 0x4000 },
                { 0x19, 0, 0x0200 },
        rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
  }
  
 -static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
 +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
  {
 -      rtl_hw_start_8105e_1(ioaddr, pdev);
 +      void __iomem *ioaddr = tp->mmio_addr;
 +
 +      rtl_hw_start_8105e_1(tp);
        rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
  }
  
 +static void rtl_hw_start_8402(struct rtl8169_private *tp)
 +{
 +      void __iomem *ioaddr = tp->mmio_addr;
 +      static const struct ephy_info e_info_8402[] = {
 +              { 0x19, 0xffff, 0xff64 },
 +              { 0x1e, 0, 0x4000 }
 +      };
 +
 +      rtl_csi_access_enable_2(tp);
 +
 +      /* Force LAN exit from ASPM if Rx/Tx are not idle */
 +      RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
 +
 +      RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 +      RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 +
 +      rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
 +
 +      rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
 +
 +      rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
 +      rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
 +      rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
 +                   ERIAR_EXGMAC);
 +}
 +
  static void rtl_hw_start_8101(struct net_device *dev)
  {
        struct rtl8169_private *tp = netdev_priv(dev);
  
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_07:
 -              rtl_hw_start_8102e_1(ioaddr, pdev);
 +              rtl_hw_start_8102e_1(tp);
                break;
  
        case RTL_GIGA_MAC_VER_08:
 -              rtl_hw_start_8102e_3(ioaddr, pdev);
 +              rtl_hw_start_8102e_3(tp);
                break;
  
        case RTL_GIGA_MAC_VER_09:
 -              rtl_hw_start_8102e_2(ioaddr, pdev);
 +              rtl_hw_start_8102e_2(tp);
                break;
  
        case RTL_GIGA_MAC_VER_29:
 -              rtl_hw_start_8105e_1(ioaddr, pdev);
 +              rtl_hw_start_8105e_1(tp);
                break;
        case RTL_GIGA_MAC_VER_30:
 -              rtl_hw_start_8105e_2(ioaddr, pdev);
 +              rtl_hw_start_8105e_2(tp);
 +              break;
 +
 +      case RTL_GIGA_MAC_VER_37:
 +              rtl_hw_start_8402(tp);
                break;
        }
  
@@@ -5494,7 -5119,7 +5498,7 @@@ static netdev_tx_t rtl8169_start_xmit(s
        u32 opts[2];
        int frags;
  
-       if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+       if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
                goto err_stop_0;
        }
  
        mmiowb();
  
-       if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
                 * not miss a ring update when it notices a stopped queue.
                 */
                 * can't.
                 */
                smp_mb();
-               if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+               if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
                        netif_wake_queue(dev);
        }
  
@@@ -5685,7 -5310,7 +5689,7 @@@ static void rtl_tx(struct net_device *d
                 */
                smp_mb();
                if (netif_queue_stopped(dev) &&
-                   (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+                   TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                        netif_wake_queue(dev);
                }
                /*
@@@ -6557,7 -6182,6 +6561,7 @@@ rtl_init_one(struct pci_dev *pdev, cons
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
        rtl_init_jumbo_ops(tp);
 +      rtl_init_csi_ops(tp);
  
        rtl8169_print_mac_version(tp);
  
index 8253d2155fe3eb3f1968e53478be751294a1e265,4a0005342e65f2c117b3c14a5fbf23b14feda87e..b95f2e1b33f0c378c0f6a41fb9f815739b85b356
@@@ -656,30 -656,25 +656,30 @@@ static void efx_stop_datapath(struct ef
        struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
        struct efx_rx_queue *rx_queue;
 +      struct pci_dev *dev = efx->pci_dev;
        int rc;
  
        EFX_ASSERT_RESET_SERIALISED(efx);
        BUG_ON(efx->port_enabled);
  
 -      rc = efx_nic_flush_queues(efx);
 -      if (rc && EFX_WORKAROUND_7803(efx)) {
 -              /* Schedule a reset to recover from the flush failure. The
 -               * descriptor caches reference memory we're about to free,
 -               * but falcon_reconfigure_mac_wrapper() won't reconnect
 -               * the MACs because of the pending reset. */
 -              netif_err(efx, drv, efx->net_dev,
 -                        "Resetting to recover from flush failure\n");
 -              efx_schedule_reset(efx, RESET_TYPE_ALL);
 -      } else if (rc) {
 -              netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 -      } else {
 -              netif_dbg(efx, drv, efx->net_dev,
 -                        "successfully flushed all queues\n");
 +      /* Only perform flush if dma is enabled */
 +      if (dev->is_busmaster) {
 +              rc = efx_nic_flush_queues(efx);
 +
 +              if (rc && EFX_WORKAROUND_7803(efx)) {
 +                      /* Schedule a reset to recover from the flush failure. The
 +                       * descriptor caches reference memory we're about to free,
 +                       * but falcon_reconfigure_mac_wrapper() won't reconnect
 +                       * the MACs because of the pending reset. */
 +                      netif_err(efx, drv, efx->net_dev,
 +                                "Resetting to recover from flush failure\n");
 +                      efx_schedule_reset(efx, RESET_TYPE_ALL);
 +              } else if (rc) {
 +                      netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 +              } else {
 +                      netif_dbg(efx, drv, efx->net_dev,
 +                                "successfully flushed all queues\n");
 +              }
        }
  
        efx_for_each_channel(channel, efx) {
@@@ -1354,7 -1349,7 +1354,7 @@@ static int efx_probe_interrupts(struct 
        }
  
        /* RSS might be usable on VFs even if it is disabled on the PF */
-       efx->rss_spread = (efx->n_rx_channels > 1 ?
+       efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
                           efx->n_rx_channels : efx_vf_size(efx));
  
        return 0;
@@@ -2497,8 -2492,8 +2497,8 @@@ static void efx_pci_remove(struct pci_d
        efx_fini_io(efx);
        netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
  
 -      pci_set_drvdata(pci_dev, NULL);
        efx_fini_struct(efx);
 +      pci_set_drvdata(pci_dev, NULL);
        free_netdev(efx->net_dev);
  };
  
@@@ -2700,7 -2695,6 +2700,7 @@@ static int __devinit efx_pci_probe(stru
   fail2:
        efx_fini_struct(efx);
   fail1:
 +      pci_set_drvdata(pci_dev, NULL);
        WARN_ON(rc > 0);
        netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
        free_netdev(net_dev);
diff --combined drivers/net/macvlan.c
index ebacec1944ed8ada6dcea33a10ba463f5945c3d5,025367a94add314cc50193efbab5942ef1ffb1e6..66a9bfe7b1c87f40c8da94c34b3245e1ff3fcc0c
@@@ -57,7 -57,7 +57,7 @@@ static struct macvlan_dev *macvlan_hash
        struct hlist_node *n;
  
        hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
 -              if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr))
 +              if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
                        return vlan;
        }
        return NULL;
@@@ -96,7 -96,7 +96,7 @@@ static int macvlan_addr_busy(const stru
         * currently in use by the underlying device or
         * another macvlan.
         */
 -      if (!compare_ether_addr_64bits(port->dev->dev_addr, addr))
 +      if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
                return 1;
  
        if (macvlan_hash_lookup(port, addr))
@@@ -118,7 -118,8 +118,7 @@@ static int macvlan_broadcast_one(struc
                return vlan->forward(dev, skb);
  
        skb->dev = dev;
 -      if (!compare_ether_addr_64bits(eth->h_dest,
 -                                     dev->broadcast))
 +      if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
                skb->pkt_type = PACKET_BROADCAST;
        else
                skb->pkt_type = PACKET_MULTICAST;
@@@ -258,7 -259,7 +258,7 @@@ static int macvlan_queue_xmit(struct sk
  
  xmit_world:
        skb->ip_summed = ip_summed;
-       skb_set_dev(skb, vlan->lowerdev);
+       skb->dev = vlan->lowerdev;
        return dev_queue_xmit(skb);
  }
  
@@@ -311,8 -312,7 +311,8 @@@ static int macvlan_open(struct net_devi
        int err;
  
        if (vlan->port->passthru) {
 -              dev_set_promiscuity(lowerdev, 1);
 +              if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
 +                      dev_set_promiscuity(lowerdev, 1);
                goto hash_add;
        }
  
@@@ -344,15 -344,12 +344,15 @@@ static int macvlan_stop(struct net_devi
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
  
 +      dev_uc_unsync(lowerdev, dev);
 +      dev_mc_unsync(lowerdev, dev);
 +
        if (vlan->port->passthru) {
 -              dev_set_promiscuity(lowerdev, -1);
 +              if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
 +                      dev_set_promiscuity(lowerdev, -1);
                goto hash_del;
        }
  
 -      dev_mc_unsync(lowerdev, dev);
        if (dev->flags & IFF_ALLMULTI)
                dev_set_allmulti(lowerdev, -1);
  
@@@ -402,11 -399,10 +402,11 @@@ static void macvlan_change_rx_flags(str
                dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
  }
  
 -static void macvlan_set_multicast_list(struct net_device *dev)
 +static void macvlan_set_mac_lists(struct net_device *dev)
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
  
 +      dev_uc_sync(vlan->lowerdev, dev);
        dev_mc_sync(vlan->lowerdev, dev);
  }
  
@@@ -546,43 -542,6 +546,43 @@@ static int macvlan_vlan_rx_kill_vid(str
        return 0;
  }
  
 +static int macvlan_fdb_add(struct ndmsg *ndm,
 +                         struct net_device *dev,
 +                         unsigned char *addr,
 +                         u16 flags)
 +{
 +      struct macvlan_dev *vlan = netdev_priv(dev);
 +      int err = -EINVAL;
 +
 +      if (!vlan->port->passthru)
 +              return -EOPNOTSUPP;
 +
 +      if (is_unicast_ether_addr(addr))
 +              err = dev_uc_add_excl(dev, addr);
 +      else if (is_multicast_ether_addr(addr))
 +              err = dev_mc_add_excl(dev, addr);
 +
 +      return err;
 +}
 +
 +static int macvlan_fdb_del(struct ndmsg *ndm,
 +                         struct net_device *dev,
 +                         unsigned char *addr)
 +{
 +      struct macvlan_dev *vlan = netdev_priv(dev);
 +      int err = -EINVAL;
 +
 +      if (!vlan->port->passthru)
 +              return -EOPNOTSUPP;
 +
 +      if (is_unicast_ether_addr(addr))
 +              err = dev_uc_del(dev, addr);
 +      else if (is_multicast_ether_addr(addr))
 +              err = dev_mc_del(dev, addr);
 +
 +      return err;
 +}
 +
  static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
                                        struct ethtool_drvinfo *drvinfo)
  {
@@@ -613,14 -572,11 +613,14 @@@ static const struct net_device_ops macv
        .ndo_change_mtu         = macvlan_change_mtu,
        .ndo_change_rx_flags    = macvlan_change_rx_flags,
        .ndo_set_mac_address    = macvlan_set_mac_address,
 -      .ndo_set_rx_mode        = macvlan_set_multicast_list,
 +      .ndo_set_rx_mode        = macvlan_set_mac_lists,
        .ndo_get_stats64        = macvlan_dev_get_stats64,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_vlan_rx_add_vid    = macvlan_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = macvlan_vlan_rx_kill_vid,
 +      .ndo_fdb_add            = macvlan_fdb_add,
 +      .ndo_fdb_del            = macvlan_fdb_del,
 +      .ndo_fdb_dump           = ndo_dflt_fdb_dump,
  };
  
  void macvlan_common_setup(struct net_device *dev)
@@@ -755,9 -711,6 +755,9 @@@ int macvlan_common_newlink(struct net *
        if (data && data[IFLA_MACVLAN_MODE])
                vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
  
 +      if (data && data[IFLA_MACVLAN_FLAGS])
 +              vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 +
        if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
                if (port->count)
                        return -EINVAL;
@@@ -807,16 -760,6 +807,16 @@@ static int macvlan_changelink(struct ne
        struct macvlan_dev *vlan = netdev_priv(dev);
        if (data && data[IFLA_MACVLAN_MODE])
                vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
 +      if (data && data[IFLA_MACVLAN_FLAGS]) {
 +              __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 +              bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
 +
 +              if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
 +                      dev_set_promiscuity(vlan->lowerdev, -1);
 +              else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
 +                      dev_set_promiscuity(vlan->lowerdev, 1);
 +              vlan->flags = flags;
 +      }
        return 0;
  }
  
@@@ -830,10 -773,7 +830,10 @@@ static int macvlan_fill_info(struct sk_
  {
        struct macvlan_dev *vlan = netdev_priv(dev);
  
 -      NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
 +      if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
 +              goto nla_put_failure;
 +      if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
  }
  
  static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
 -      [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
 +      [IFLA_MACVLAN_MODE]  = { .type = NLA_U32 },
 +      [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
  };
  
  int macvlan_link_register(struct rtnl_link_ops *ops)
diff --combined drivers/net/macvtap.c
index 163559c16988329b57878b376d320a8ed8db5bfd,cb8fd5069dbe3971a001ea459a085b4d64657fa8..2ee56de7b0cabc4d3611a93ee26a7218e2555958
@@@ -1,5 -1,6 +1,6 @@@
  #include <linux/etherdevice.h>
  #include <linux/if_macvlan.h>
+ #include <linux/if_vlan.h>
  #include <linux/interrupt.h>
  #include <linux/nsproxy.h>
  #include <linux/compat.h>
@@@ -505,11 -506,10 +506,11 @@@ static int zerocopy_sg_from_iovec(struc
                if (copy > size) {
                        ++from;
                        --count;
 -              }
 +                      offset = 0;
 +              } else
 +                      offset += size;
                copy -= size;
                offset1 += size;
 -              offset = 0;
        }
  
        if (len == offset1)
                struct page *page[MAX_SKB_FRAGS];
                int num_pages;
                unsigned long base;
 +              unsigned long truesize;
  
 -              len = from->iov_len - offset1;
 +              len = from->iov_len - offset;
                if (!len) {
 -                      offset1 = 0;
 +                      offset = 0;
                        ++from;
                        continue;
                }
 -              base = (unsigned long)from->iov_base + offset1;
 +              base = (unsigned long)from->iov_base + offset;
                size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
 +              if (i + size > MAX_SKB_FRAGS)
 +                      return -EMSGSIZE;
                num_pages = get_user_pages_fast(base, size, 0, &page[i]);
 -              if ((num_pages != size) ||
 -                  (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
 -                      /* put_page is in skb free */
 +              if (num_pages != size) {
 +                      for (i = 0; i < num_pages; i++)
 +                              put_page(page[i]);
                        return -EFAULT;
 +              }
 +              truesize = size * PAGE_SIZE;
                skb->data_len += len;
                skb->len += len;
 -              skb->truesize += len;
 -              atomic_add(len, &skb->sk->sk_wmem_alloc);
 +              skb->truesize += truesize;
 +              atomic_add(truesize, &skb->sk->sk_wmem_alloc);
                while (len) {
                        int off = base & ~PAGE_MASK;
                        int size = min_t(int, len, PAGE_SIZE - off);
                        len -= size;
                        i++;
                }
 -              offset1 = 0;
 +              offset = 0;
                ++from;
        }
        return 0;
@@@ -652,7 -647,7 +653,7 @@@ static ssize_t macvtap_get_user(struct 
        int err;
        struct virtio_net_hdr vnet_hdr = { 0 };
        int vnet_hdr_len = 0;
 -      int copylen;
 +      int copylen = 0;
        bool zerocopy = false;
  
        if (q->flags & IFF_VNET_HDR) {
        if (unlikely(len < ETH_HLEN))
                goto err;
  
 +      err = -EMSGSIZE;
 +      if (unlikely(count > UIO_MAXIOV))
 +              goto err;
 +
        if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
                zerocopy = true;
  
        if (zerocopy) {
 +              /* Userspace may produce vectors with count greater than
 +               * MAX_SKB_FRAGS, so we need to linearize parts of the skb
 +               * to let the rest of data to be fit in the frags.
 +               */
 +              if (count > MAX_SKB_FRAGS) {
 +                      copylen = iov_length(iv, count - MAX_SKB_FRAGS);
 +                      if (copylen < vnet_hdr_len)
 +                              copylen = 0;
 +                      else
 +                              copylen -= vnet_hdr_len;
 +              }
                /* There are 256 bytes to be copied in skb, so there is enough
                 * room for skb expand head in case it is used.
                 * The rest buffer is mapped from userspace.
                 */
 -              copylen = vnet_hdr.hdr_len;
 +              if (copylen < vnet_hdr.hdr_len)
 +                      copylen = vnet_hdr.hdr_len;
                if (!copylen)
                        copylen = GOODCOPY_LEN;
        } else
        if (!skb)
                goto err;
  
 -      if (zerocopy) {
 +      if (zerocopy)
                err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
 -              skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 -      } else
 +      else
                err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
                                                   len);
        if (err)
        rcu_read_lock_bh();
        vlan = rcu_dereference_bh(q->vlan);
        /* copy skb_ubuf_info for callback when skb has no error */
 -      if (zerocopy)
 +      if (zerocopy) {
                skb_shinfo(skb)->destructor_arg = m->msg_control;
 +              skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 +      }
        if (vlan)
                macvlan_start_xmit(skb, vlan->dev);
        else
@@@ -782,6 -760,8 +783,8 @@@ static ssize_t macvtap_put_user(struct 
        struct macvlan_dev *vlan;
        int ret;
        int vnet_hdr_len = 0;
+       int vlan_offset = 0;
+       int copied;
  
        if (q->flags & IFF_VNET_HDR) {
                struct virtio_net_hdr vnet_hdr;
                if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
                        return -EFAULT;
        }
+       copied = vnet_hdr_len;
+       if (!vlan_tx_tag_present(skb))
+               len = min_t(int, skb->len, len);
+       else {
+               int copy;
+               struct {
+                       __be16 h_vlan_proto;
+                       __be16 h_vlan_TCI;
+               } veth;
+               veth.h_vlan_proto = htons(ETH_P_8021Q);
+               veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+               vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+               len = min_t(int, skb->len + VLAN_HLEN, len);
+               copy = min_t(int, vlan_offset, len);
+               ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+               len -= copy;
+               copied += copy;
+               if (ret || !len)
+                       goto done;
+               copy = min_t(int, sizeof(veth), len);
+               ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
+               len -= copy;
+               copied += copy;
+               if (ret || !len)
+                       goto done;
+       }
  
-       len = min_t(int, skb->len, len);
-       ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
+       ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+       copied += len;
  
+ done:
        rcu_read_lock_bh();
        vlan = rcu_dereference_bh(q->vlan);
        if (vlan)
-               macvlan_count_rx(vlan, len, ret == 0, 0);
+               macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
        rcu_read_unlock_bh();
  
-       return ret ? ret : (len + vnet_hdr_len);
+       return ret ? ret : copied;
  }
  
  static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
diff --combined drivers/net/usb/usbnet.c
index 80b837c88f0d4885fce29a9636210ff5fc45bbf7,b38db48b1ce09b380d8cdea0ea444be0d97573d0..9f58330f1312059d37deeb072b2d5c32d3d6df03
@@@ -282,17 -282,32 +282,32 @@@ int usbnet_change_mtu (struct net_devic
  }
  EXPORT_SYMBOL_GPL(usbnet_change_mtu);
  
+ /* The caller must hold list->lock */
+ static void __usbnet_queue_skb(struct sk_buff_head *list,
+                       struct sk_buff *newsk, enum skb_state state)
+ {
+       struct skb_data *entry = (struct skb_data *) newsk->cb;
+       __skb_queue_tail(list, newsk);
+       entry->state = state;
+ }
  /*-------------------------------------------------------------------------*/
  
  /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
   * completion callbacks.  2.5 should have fixed those bugs...
   */
  
- static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
+ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+               struct sk_buff_head *list, enum skb_state state)
  {
        unsigned long           flags;
+       enum skb_state          old_state;
+       struct skb_data *entry = (struct skb_data *) skb->cb;
  
        spin_lock_irqsave(&list->lock, flags);
+       old_state = entry->state;
+       entry->state = state;
        __skb_unlink(skb, list);
        spin_unlock(&list->lock);
        spin_lock(&dev->done.lock);
        if (dev->done.qlen == 1)
                tasklet_schedule(&dev->bh);
        spin_unlock_irqrestore(&dev->done.lock, flags);
+       return old_state;
  }
  
  /* some work can't be done in tasklets, so we use keventd
@@@ -340,7 -356,6 +356,6 @@@ static int rx_submit (struct usbnet *de
        entry = (struct skb_data *) skb->cb;
        entry->urb = urb;
        entry->dev = dev;
-       entry->state = rx_start;
        entry->length = 0;
  
        usb_fill_bulk_urb (urb, dev->udev, dev->in,
                        tasklet_schedule (&dev->bh);
                        break;
                case 0:
-                       __skb_queue_tail (&dev->rxq, skb);
+                       __usbnet_queue_skb(&dev->rxq, skb, rx_start);
                }
        } else {
                netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@@ -423,16 -438,17 +438,17 @@@ static void rx_complete (struct urb *ur
        struct skb_data         *entry = (struct skb_data *) skb->cb;
        struct usbnet           *dev = entry->dev;
        int                     urb_status = urb->status;
+       enum skb_state          state;
  
        skb_put (skb, urb->actual_length);
-       entry->state = rx_done;
+       state = rx_done;
        entry->urb = NULL;
  
        switch (urb_status) {
        /* success */
        case 0:
                if (skb->len < dev->net->hard_header_len) {
-                       entry->state = rx_cleanup;
+                       state = rx_cleanup;
                        dev->net->stats.rx_errors++;
                        dev->net->stats.rx_length_errors++;
                        netif_dbg(dev, rx_err, dev->net,
                                  "rx throttle %d\n", urb_status);
                }
  block:
-               entry->state = rx_cleanup;
+               state = rx_cleanup;
                entry->urb = urb;
                urb = NULL;
                break;
                // FALLTHROUGH
  
        default:
-               entry->state = rx_cleanup;
+               state = rx_cleanup;
                dev->net->stats.rx_errors++;
                netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
                break;
        }
  
-       defer_bh(dev, skb, &dev->rxq);
+       state = defer_bh(dev, skb, &dev->rxq, state);
  
        if (urb) {
                if (netif_running (dev->net) &&
-                   !test_bit (EVENT_RX_HALT, &dev->flags)) {
+                   !test_bit (EVENT_RX_HALT, &dev->flags) &&
+                   state != unlink_start) {
                        rx_submit (dev, urb, GFP_ATOMIC);
                        usb_mark_last_busy(dev->udev);
                        return;
@@@ -579,16 -596,23 +596,23 @@@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_r
  static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
  {
        unsigned long           flags;
-       struct sk_buff          *skb, *skbnext;
+       struct sk_buff          *skb;
        int                     count = 0;
  
        spin_lock_irqsave (&q->lock, flags);
-       skb_queue_walk_safe(q, skb, skbnext) {
+       while (!skb_queue_empty(q)) {
                struct skb_data         *entry;
                struct urb              *urb;
                int                     retval;
  
-               entry = (struct skb_data *) skb->cb;
+               skb_queue_walk(q, skb) {
+                       entry = (struct skb_data *) skb->cb;
+                       if (entry->state != unlink_start)
+                               goto found;
+               }
+               break;
+ found:
+               entry->state = unlink_start;
                urb = entry->urb;
  
                /*
@@@ -885,7 -909,6 +909,7 @@@ static const struct ethtool_ops usbnet_
        .get_drvinfo            = usbnet_get_drvinfo,
        .get_msglevel           = usbnet_get_msglevel,
        .set_msglevel           = usbnet_set_msglevel,
 +      .get_ts_info            = ethtool_op_get_ts_info,
  };
  
  /*-------------------------------------------------------------------------*/
@@@ -1040,8 -1063,7 +1064,7 @@@ static void tx_complete (struct urb *ur
        }
  
        usb_autopm_put_interface_async(dev->intf);
-       entry->state = tx_done;
-       defer_bh(dev, skb, &dev->txq);
+       (void) defer_bh(dev, skb, &dev->txq, tx_done);
  }
  
  /*-------------------------------------------------------------------------*/
@@@ -1097,7 -1119,6 +1120,6 @@@ netdev_tx_t usbnet_start_xmit (struct s
        entry = (struct skb_data *) skb->cb;
        entry->urb = urb;
        entry->dev = dev;
-       entry->state = tx_start;
        entry->length = length;
  
        usb_fill_bulk_urb (urb, dev->udev, dev->out,
                break;
        case 0:
                net->trans_start = jiffies;
-               __skb_queue_tail (&dev->txq, skb);
+               __usbnet_queue_skb(&dev->txq, skb, tx_start);
                if (dev->txq.qlen >= TX_QLEN (dev))
                        netif_stop_queue (net);
        }
index f7868c0d79ed491085b7eeb0f25b7d8a1fbc4ef4,67f9430ee197743fa7d2f22a02f706c46cafdd98..2062ea1d7c807118229a08fe182993d888194d44
@@@ -34,7 -34,6 +34,7 @@@
  #include "ps.h"
  #include "efuse.h"
  #include <linux/export.h>
 +#include <linux/kmemleak.h>
  
  static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
        PCI_VENDOR_ID_INTEL,
@@@ -1100,7 -1099,6 +1100,7 @@@ static int _rtl_pci_init_rx_ring(struc
                        u32 bufferaddress;
                        if (!skb)
                                return 0;
 +                      kmemleak_not_leak(skb);
                        entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
  
                        /*skb->dev = dev; */
@@@ -1853,14 -1851,6 +1853,6 @@@ int __devinit rtl_pci_probe(struct pci_
        /*like read eeprom and so on */
        rtlpriv->cfg->ops->read_eeprom_info(hw);
  
-       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
-               err = -ENODEV;
-               goto fail3;
-       }
-       rtlpriv->cfg->ops->init_sw_leds(hw);
        /*aspm */
        rtl_pci_init_aspm(hw);
  
                goto fail3;
        }
  
+       if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+               err = -ENODEV;
+               goto fail3;
+       }
+       rtlpriv->cfg->ops->init_sw_leds(hw);
        err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --combined drivers/ptp/ptp_pch.c
index 08c331130d88f3b19beba9281aab5ab1e1f882cb,6fff680204885a3653464bc5369495bef2a97fd7..3a9c17eced10c3a34d591fd465db555ec9498b88
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/module.h>
  #include <linux/pci.h>
  #include <linux/ptp_clock_kernel.h>
+ #include <linux/slab.h>
  
  #define STATION_ADDR_LEN      20
  #define PCI_DEVICE_ID_PCH_1588        0x8819
@@@ -261,7 -262,6 +262,7 @@@ u64 pch_rx_snap_read(struct pci_dev *pd
  
        ns = ((u64) hi) << 32;
        ns |= lo;
 +      ns <<= TICKS_NS_SHIFT;
  
        return ns;
  }
@@@ -278,7 -278,6 +279,7 @@@ u64 pch_tx_snap_read(struct pci_dev *pd
  
        ns = ((u64) hi) << 32;
        ns |= lo;
 +      ns <<= TICKS_NS_SHIFT;
  
        return ns;
  }
@@@ -308,7 -307,7 +309,7 @@@ static void pch_reset(struct pch_dev *c
   *                                traffic on the  ethernet interface
   * @addr:     dress which contain the column separated address to be used.
   */
 -static int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
 +int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
  {
        s32 i;
        struct pch_dev *chip = pci_get_drvdata(pdev);
        }
        return 0;
  }
 +EXPORT_SYMBOL(pch_set_station_address);
  
  /*
   * Interrupt service routine
@@@ -652,6 -650,8 +653,6 @@@ pch_probe(struct pci_dev *pdev, const s
        iowrite32(1, &chip->regs->trgt_lo);
        iowrite32(0, &chip->regs->trgt_hi);
        iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
 -      /* Version: IEEE1588 v1 and IEEE1588-2008,  Mode: All Evwnt, Locked  */
 -      iowrite32(0x80020000, &chip->regs->ch_control);
  
        pch_eth_enable_set(chip);
  
diff --combined drivers/vhost/net.c
index 853db7a08a26bb27e7d7d219262e5974cc312f39,5c170100de9ca766854c37f8d85956251653baba..f82a7394756ebc74714a1985c7a48a3fcf4796b8
@@@ -24,6 -24,7 +24,7 @@@
  #include <linux/if_arp.h>
  #include <linux/if_tun.h>
  #include <linux/if_macvlan.h>
+ #include <linux/if_vlan.h>
  
  #include <net/sock.h>
  
@@@ -166,7 -167,7 +167,7 @@@ static void handle_tx(struct vhost_net 
        if (wmem < sock->sk->sk_sndbuf / 2)
                tx_poll_stop(net);
        hdr_size = vq->vhost_hlen;
 -      zcopy = vhost_sock_zcopy(sock);
 +      zcopy = vq->ubufs;
  
        for (;;) {
                /* Release DMAs done buffers first */
                                        UIO_MAXIOV;
                        }
                        vhost_discard_vq_desc(vq, 1);
 -                      tx_poll_start(net, sock);
 +                      if (err == -EAGAIN || err == -ENOBUFS)
 +                              tx_poll_start(net, sock);
                        break;
                }
                if (err != len)
                                 " len %d != %zd\n", err, len);
                if (!zcopy)
                        vhost_add_used_and_signal(&net->dev, vq, head, 0);
 +              else
 +                      vhost_zerocopy_signal_used(vq);
                total_len += len;
                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
                        vhost_poll_queue(&vq->poll);
@@@ -286,8 -284,12 +287,12 @@@ static int peek_head_len(struct sock *s
  
        spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
        head = skb_peek(&sk->sk_receive_queue);
-       if (likely(head))
+       if (likely(head)) {
                len = head->len;
+               if (vlan_tx_tag_present(head))
+                       len += VLAN_HLEN;
+       }
        spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
        return len;
  }
index 7f377fb8b527a3754cfdb92e7afa95f69bde2f7c,33900a53c990e2b8e8cbcce46220625b527083a6..b0f6f22723c320695e287d2b0f167deae447bb16
@@@ -54,7 -54,6 +54,7 @@@
  #include <net/netprio_cgroup.h>
  
  #include <linux/netdev_features.h>
 +#include <linux/neighbour.h>
  
  struct netpoll_info;
  struct device;
@@@ -289,7 -288,7 +289,7 @@@ struct hh_cache 
  struct header_ops {
        int     (*create) (struct sk_buff *skb, struct net_device *dev,
                           unsigned short type, const void *daddr,
 -                         const void *saddr, unsigned len);
 +                         const void *saddr, unsigned int len);
        int     (*parse)(const struct sk_buff *skb, unsigned char *haddr);
        int     (*rebuild)(struct sk_buff *skb);
        int     (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
@@@ -906,16 -905,6 +906,16 @@@ struct netdev_fcoe_hbainfo 
   *    feature set might be less than what was returned by ndo_fix_features()).
   *    Must return >0 or -errno if it changed dev->features itself.
   *
 + * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
 + *                  unsigned char *addr, u16 flags)
 + *    Adds an FDB entry to dev for addr.
 + * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
 + *                  unsigned char *addr)
 + *    Deletes the FDB entry from dev coresponding to addr.
 + * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
 + *                   struct net_device *dev, int idx)
 + *    Used to add FDB entries to dump requests. Implementers should add
 + *    entries to skb and update idx with the number of entries.
   */
  struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
                                                    netdev_features_t features);
        int                     (*ndo_neigh_construct)(struct neighbour *n);
        void                    (*ndo_neigh_destroy)(struct neighbour *n);
 +
 +      int                     (*ndo_fdb_add)(struct ndmsg *ndm,
 +                                             struct net_device *dev,
 +                                             unsigned char *addr,
 +                                             u16 flags);
 +      int                     (*ndo_fdb_del)(struct ndmsg *ndm,
 +                                             struct net_device *dev,
 +                                             unsigned char *addr);
 +      int                     (*ndo_fdb_dump)(struct sk_buff *skb,
 +                                              struct netlink_callback *cb,
 +                                              struct net_device *dev,
 +                                              int idx);
  };
  
  /*
@@@ -1426,15 -1403,6 +1426,6 @@@ static inline bool netdev_uses_dsa_tags
        return 0;
  }
  
- #ifndef CONFIG_NET_NS
- static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
- {
-       skb->dev = dev;
- }
- #else /* CONFIG_NET_NS */
- void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
- #endif
  static inline bool netdev_uses_trailer_tags(struct net_device *dev)
  {
  #ifdef CONFIG_NET_DSA_TAG_TRAILER
@@@ -1509,8 -1477,6 +1500,8 @@@ struct napi_gro_cb 
  
        /* Free the skb? */
        int free;
 +#define NAPI_GRO_FREE           1
 +#define NAPI_GRO_FREE_STOLEN_HEAD 2
  };
  
  #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
@@@ -1714,7 -1680,7 +1705,7 @@@ static inline void *skb_gro_network_hea
  static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
 -                                unsigned len)
 +                                unsigned int len)
  {
        if (!dev->header_ops || !dev->header_ops->create)
                return 0;
@@@ -1765,7 -1731,7 +1756,7 @@@ struct softnet_data 
        unsigned int            input_queue_head;
        unsigned int            input_queue_tail;
  #endif
 -      unsigned                dropped;
 +      unsigned int            dropped;
        struct sk_buff_head     input_pkt_queue;
        struct napi_struct      backlog;
  };
@@@ -1950,7 -1916,7 +1941,7 @@@ static inline void netdev_sent_queue(st
  }
  
  static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
 -                                           unsigned pkts, unsigned bytes)
 +                                           unsigned int pkts, unsigned int bytes)
  {
  #ifdef CONFIG_BQL
        if (unlikely(!bytes))
  }
  
  static inline void netdev_completed_queue(struct net_device *dev,
 -                                        unsigned pkts, unsigned bytes)
 +                                        unsigned int pkts, unsigned int bytes)
  {
        netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
  }
@@@ -2169,9 -2135,9 +2160,9 @@@ extern void netdev_rx_handler_unregiste
  extern bool           dev_valid_name(const char *name);
  extern int            dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  extern int            dev_ethtool(struct net *net, struct ifreq *);
 -extern unsigned               dev_get_flags(const struct net_device *);
 +extern unsigned int   dev_get_flags(const struct net_device *);
  extern int            __dev_change_flags(struct net_device *, unsigned int flags);
 -extern int            dev_change_flags(struct net_device *, unsigned);
 +extern int            dev_change_flags(struct net_device *, unsigned int);
  extern void           __dev_notify_flags(struct net_device *, unsigned int old_flags);
  extern int            dev_change_name(struct net_device *, const char *);
  extern int            dev_set_alias(struct net_device *, const char *, size_t);
@@@ -2571,7 -2537,6 +2562,7 @@@ extern int dev_addr_init(struct net_dev
  
  /* Functions used for unicast addresses handling */
  extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
 +extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
  extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
  extern int dev_uc_sync(struct net_device *to, struct net_device *from);
  extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
@@@ -2581,7 -2546,6 +2572,7 @@@ extern void dev_uc_init(struct net_devi
  /* Functions used for multicast addresses handling */
  extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
  extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
 +extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
  extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
  extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
  extern int dev_mc_sync(struct net_device *to, struct net_device *from);
index 289b62d9dd1fd272ad03737a1f910f1a916a050e,230a290e1973c63f4411154e581692669e37ee08..b114d35aea5e652c90b13ef94e1ef00f6d864c90
@@@ -99,6 -99,22 +99,22 @@@ struct ip_set_hash 
  #endif
  };
  
+ static size_t
+ htable_size(u8 hbits)
+ {
+       size_t hsize;
+       /* We must fit both into u32 in jhash and size_t */
+       if (hbits > 31)
+               return 0;
+       hsize = jhash_size(hbits);
+       if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket)
+           < hsize)
+               return 0;
+       return hsize * sizeof(struct hbucket) + sizeof(struct htable);
+ }
  /* Compute htable_bits from the user input parameter hashsize */
  static u8
  htable_bits(u32 hashsize)
@@@ -594,20 -610,17 +610,20 @@@ type_pf_head(struct ip_set *set, struc
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
 -      NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
 -                    htonl(jhash_size(h->table->htable_bits)));
 -      NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
 +      if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
 +                        htonl(jhash_size(h->table->htable_bits))) ||
 +          nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
 +              goto nla_put_failure;
  #ifdef IP_SET_HASH_WITH_NETMASK
 -      if (h->netmask != HOST_MASK)
 -              NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
 +      if (h->netmask != HOST_MASK &&
 +          nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
 +              goto nla_put_failure;
  #endif
 -      NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
 -      NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
 -      if (with_timeout(h->timeout))
 -              NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
 +      if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
 +          nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
 +          (with_timeout(h->timeout) &&
 +           nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))))
 +              goto nla_put_failure;
        ipset_nest_end(skb, nested);
  
        return 0;
diff --combined net/8021q/vlan_dev.c
index eaf5f21b0ef7473853bb5fec3be25f7f71c205f2,9757c193c86bc66a02fc09500702ec99d45e9e77..da1bc9c3cf38bfa36747a22e94898b047a0fdd59
@@@ -157,7 -157,7 +157,7 @@@ static netdev_tx_t vlan_dev_hard_start_
                skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
        }
  
-       skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
+       skb->dev = vlan_dev_priv(dev)->real_dev;
        len = skb->len;
        if (netpoll_tx_running(dev))
                return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
@@@ -277,7 -277,7 +277,7 @@@ static int vlan_dev_open(struct net_dev
            !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                return -ENETDOWN;
  
 -      if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
 +      if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
                err = dev_uc_add(real_dev, dev->dev_addr);
                if (err < 0)
                        goto out;
@@@ -307,7 -307,7 +307,7 @@@ clear_allmulti
        if (dev->flags & IFF_ALLMULTI)
                dev_set_allmulti(real_dev, -1);
  del_unicast:
 -      if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
 +      if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
                dev_uc_del(real_dev, dev->dev_addr);
  out:
        netif_carrier_off(dev);
@@@ -326,7 -326,7 +326,7 @@@ static int vlan_dev_stop(struct net_dev
        if (dev->flags & IFF_PROMISC)
                dev_set_promiscuity(real_dev, -1);
  
 -      if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
 +      if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
                dev_uc_del(real_dev, dev->dev_addr);
  
        netif_carrier_off(dev);
@@@ -345,13 -345,13 +345,13 @@@ static int vlan_dev_set_mac_address(str
        if (!(dev->flags & IFF_UP))
                goto out;
  
 -      if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
 +      if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
                err = dev_uc_add(real_dev, addr->sa_data);
                if (err < 0)
                        return err;
        }
  
 -      if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
 +      if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
                dev_uc_del(real_dev, dev->dev_addr);
  
  out:
diff --combined net/core/dev.c
index 66cae6e975d98af4fc512be45825e8c0260dc9bd,99e1d759f41ec42c6efc2dc3debbfc7dead6e88b..33684b6e95e2996db2165a600fb175093b1b32e0
@@@ -208,8 -208,7 +208,8 @@@ static inline void dev_base_seq_inc(str
  
  static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
  {
 -      unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
 +      unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
 +
        return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
  }
  
@@@ -300,9 -299,10 +300,9 @@@ static const unsigned short netdev_lock
         ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
         ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
         ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
 -       ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
 -       ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
 -       ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
 -       ARPHRD_VOID, ARPHRD_NONE};
 +       ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
 +       ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
 +       ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
  
  static const char *const netdev_lock_name[] =
        {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
 -       "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
 -       "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
 -       "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
 -       "_xmit_VOID", "_xmit_NONE"};
 +       "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
 +       "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
 +       "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
  
  static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
  static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@@ -1616,10 -1617,14 +1616,14 @@@ int dev_forward_skb(struct net_device *
                return NET_RX_DROP;
        }
        skb->skb_iif = 0;
-       skb_set_dev(skb, dev);
+       skb->dev = dev;
+       skb_dst_drop(skb);
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->protocol = eth_type_trans(skb, dev);
+       skb->mark = 0;
+       secpath_reset(skb);
+       nf_reset(skb);
        return netif_rx(skb);
  }
  EXPORT_SYMBOL_GPL(dev_forward_skb);
@@@ -1671,9 -1676,10 +1675,9 @@@ static void dev_queue_xmit_nit(struct s
  
                        if (skb_network_header(skb2) < skb2->data ||
                            skb2->network_header > skb2->tail) {
 -                              if (net_ratelimit())
 -                                      pr_crit("protocol %04x is buggy, dev %s\n",
 -                                              ntohs(skb2->protocol),
 -                                              dev->name);
 +                              net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
 +                                                   ntohs(skb2->protocol),
 +                                                   dev->name);
                                skb_reset_network_header(skb2);
                        }
  
@@@ -1867,36 -1873,6 +1871,6 @@@ void netif_device_attach(struct net_dev
  }
  EXPORT_SYMBOL(netif_device_attach);
  
- /**
-  * skb_dev_set -- assign a new device to a buffer
-  * @skb: buffer for the new device
-  * @dev: network device
-  *
-  * If an skb is owned by a device already, we have to reset
-  * all data private to the namespace a device belongs to
-  * before assigning it a new device.
-  */
- #ifdef CONFIG_NET_NS
- void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
- {
-       skb_dst_drop(skb);
-       if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
-               secpath_reset(skb);
-               nf_reset(skb);
-               skb_init_secmark(skb);
-               skb->mark = 0;
-               skb->priority = 0;
-               skb->nf_trace = 0;
-               skb->ipvs_property = 0;
- #ifdef CONFIG_NET_SCHED
-               skb->tc_index = 0;
- #endif
-       }
-       skb->dev = dev;
- }
- EXPORT_SYMBOL(skb_set_dev);
- #endif /* CONFIG_NET_NS */
  static void skb_warn_bad_offload(const struct sk_buff *skb)
  {
        static const netdev_features_t null_features = 0;
@@@ -2340,9 -2316,11 +2314,9 @@@ EXPORT_SYMBOL(__skb_tx_hash)
  static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  {
        if (unlikely(queue_index >= dev->real_num_tx_queues)) {
 -              if (net_ratelimit()) {
 -                      pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
 -                              dev->name, queue_index,
 -                              dev->real_num_tx_queues);
 -              }
 +              net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
 +                                   dev->name, queue_index,
 +                                   dev->real_num_tx_queues);
                return 0;
        }
        return queue_index;
@@@ -2584,15 -2562,17 +2558,15 @@@ int dev_queue_xmit(struct sk_buff *skb
                                }
                        }
                        HARD_TX_UNLOCK(dev, txq);
 -                      if (net_ratelimit())
 -                              pr_crit("Virtual device %s asks to queue packet!\n",
 -                                      dev->name);
 +                      net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
 +                                           dev->name);
                } else {
                        /* Recursion is detected! It is possible,
                         * unfortunately
                         */
  recursion_alert:
 -                      if (net_ratelimit())
 -                              pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
 -                                      dev->name);
 +                      net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
 +                                           dev->name);
                }
        }
  
@@@ -3073,8 -3053,9 +3047,8 @@@ static int ing_filter(struct sk_buff *s
        struct Qdisc *q;
  
        if (unlikely(MAX_RED_LOOP < ttl++)) {
 -              if (net_ratelimit())
 -                      pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
 -                              skb->skb_iif, dev->ifindex);
 +              net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
 +                                   skb->skb_iif, dev->ifindex);
                return TC_ACT_SHOT;
        }
  
@@@ -3534,16 -3515,10 +3508,16 @@@ gro_result_t napi_skb_finish(gro_result
                break;
  
        case GRO_DROP:
 -      case GRO_MERGED_FREE:
                kfree_skb(skb);
                break;
  
 +      case GRO_MERGED_FREE:
 +              if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
 +                      kmem_cache_free(skbuff_head_cache, skb);
 +              else
 +                      __kfree_skb(skb);
 +              break;
 +
        case GRO_HELD:
        case GRO_MERGED:
                break;
@@@ -4617,9 -4592,9 +4591,9 @@@ void dev_set_rx_mode(struct net_device 
   *
   *    Get the combination of flag bits exported through APIs to userspace.
   */
 -unsigned dev_get_flags(const struct net_device *dev)
 +unsigned int dev_get_flags(const struct net_device *dev)
  {
 -      unsigned flags;
 +      unsigned int flags;
  
        flags = (dev->flags & ~(IFF_PROMISC |
                                IFF_ALLMULTI |
diff --combined net/core/pktgen.c
index 33912573959dc89c7d5bf6989e527a0111f1ab89,77a59980b5792323db66f88ebcdb746dccfd2d7f..70236db0fb4fa8339939411585c609d392440767
@@@ -320,7 -320,7 +320,7 @@@ struct pktgen_dev 
                                (see RFC 3260, sec. 4) */
  
        /* MPLS */
 -      unsigned nr_labels;     /* Depth of stack, 0 = no MPLS */
 +      unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */
        __be32 labels[MAX_MPLS_LABELS];
  
        /* VLAN/SVLAN (802.1Q/Q-in-Q) */
                                  */
        char odevname[32];
        struct flow_state *flows;
 -      unsigned cflows;        /* Concurrent flows (config) */
 -      unsigned lflow;         /* Flow length  (config) */
 -      unsigned nflows;        /* accumulated flows (stats) */
 -      unsigned curfl;         /* current sequenced flow (state)*/
 +      unsigned int cflows;    /* Concurrent flows (config) */
 +      unsigned int lflow;             /* Flow length  (config) */
 +      unsigned int nflows;    /* accumulated flows (stats) */
 +      unsigned int curfl;             /* current sequenced flow (state)*/
  
        u16 queue_map_min;
        u16 queue_map_max;
@@@ -592,7 -592,7 +592,7 @@@ static int pktgen_if_show(struct seq_fi
                   pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
  
        if (pkt_dev->nr_labels) {
 -              unsigned i;
 +              unsigned int i;
                seq_printf(seq, "     mpls: ");
                for (i = 0; i < pkt_dev->nr_labels; i++)
                        seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@@ -812,7 -812,7 +812,7 @@@ done_str
  
  static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
  {
 -      unsigned n = 0;
 +      unsigned int n = 0;
        char c;
        ssize_t i = 0;
        int len;
@@@ -1510,7 -1510,7 +1510,7 @@@ static ssize_t pktgen_if_write(struct f
        }
  
        if (!strcmp(name, "mpls")) {
 -              unsigned n, cnt;
 +              unsigned int n, cnt;
  
                len = get_labels(&user_buffer[i], pkt_dev);
                if (len < 0)
@@@ -1931,7 -1931,7 +1931,7 @@@ static int pktgen_device_event(struct n
  {
        struct net_device *dev = ptr;
  
-       if (!net_eq(dev_net(dev), &init_net))
+       if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
                return NOTIFY_DONE;
  
        /* It is OK that we do not hold the group lock right now,
@@@ -2324,7 -2324,7 +2324,7 @@@ static void mod_cur_headers(struct pktg
        }
  
        if (pkt_dev->flags & F_MPLS_RND) {
 -              unsigned i;
 +              unsigned int i;
                for (i = 0; i < pkt_dev->nr_labels; i++)
                        if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
                                pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
@@@ -2550,7 -2550,7 +2550,7 @@@ err
  
  static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
  {
 -      unsigned i;
 +      unsigned int i;
        for (i = 0; i < pkt_dev->nr_labels; i++)
                *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
  
@@@ -2934,7 -2934,8 +2934,7 @@@ static struct sk_buff *fill_packet_ipv6
  
        if (datalen < sizeof(struct pktgen_hdr)) {
                datalen = sizeof(struct pktgen_hdr);
 -              if (net_ratelimit())
 -                      pr_info("increased datalen to %d\n", datalen);
 +              net_info_ratelimited("increased datalen to %d\n", datalen);
        }
  
        udph->source = htons(pkt_dev->cur_udp_src);
@@@ -3364,8 -3365,8 +3364,8 @@@ static void pktgen_xmit(struct pktgen_d
                pkt_dev->errors++;
                break;
        default: /* Drivers are not supposed to return other values! */
 -              if (net_ratelimit())
 -                      pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret);
 +              net_info_ratelimited("%s xmit error: %d\n",
 +                                   pkt_dev->odevname, ret);
                pkt_dev->errors++;
                /* fallthru */
        case NETDEV_TX_LOCKED:
@@@ -3754,12 -3755,18 +3754,18 @@@ static void __exit pg_cleanup(void
  {
        struct pktgen_thread *t;
        struct list_head *q, *n;
+       struct list_head list;
  
        /* Stop all interfaces & threads */
        pktgen_exiting = true;
  
-       list_for_each_safe(q, n, &pktgen_threads) {
+       mutex_lock(&pktgen_thread_lock);
+       list_splice(&list, &pktgen_threads);
+       mutex_unlock(&pktgen_thread_lock);
+       list_for_each_safe(q, n, &list) {
                t = list_entry(q, struct pktgen_thread, th_list);
+               list_del(&t->th_list);
                kthread_stop(t->tsk);
                kfree(t);
        }
index 507fe93794aac6ea4ae1734e35c4bc90cbaa94c2,828ce46cb34b9ad6eb90a8363461e6f1e24c9344..a68dbd4f1e4e4404d25d6b71e321122fd4fae66a
@@@ -81,8 -81,7 +81,8 @@@ hash_ip4_data_zero_out(struct hash_ip4_
  static inline bool
  hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
  {
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -95,10 -94,9 +95,10 @@@ hash_ip4_data_tlist(struct sk_buff *skb
        const struct hash_ip4_telem *tdata =
                (const struct hash_ip4_telem *)data;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))))
 +              goto nla_put_failure;
  
        return 0;
  
@@@ -264,8 -262,7 +264,8 @@@ ip6_netmask(union nf_inet_addr *ip, u8 
  static bool
  hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
  {
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -278,10 -275,9 +278,10 @@@ hash_ip6_data_tlist(struct sk_buff *skb
        const struct hash_ip6_telem *e =
                (const struct hash_ip6_telem *)data;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -368,6 -364,7 +368,7 @@@ hash_ip_create(struct ip_set *set, stru
  {
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        u8 netmask, hbits;
+       size_t hsize;
        struct ip_set_hash *h;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
        h->timeout = IPSET_NO_TIMEOUT;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index 68f284c97490d51461a06b990cc8c3d08ebbe96e,e8dbb498af8f465fe38866b4ae0cf9038c1bfbaa..92722bb82eea65cde13c77ce78be136fca2a8b94
@@@ -93,10 -93,9 +93,10 @@@ static boo
  hash_ipport4_data_list(struct sk_buff *skb,
                       const struct hash_ipport4_elem *data)
  {
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -110,12 -109,12 +110,12 @@@ hash_ipport4_data_tlist(struct sk_buff 
        const struct hash_ipport4_telem *tdata =
                (const struct hash_ipport4_telem *)data;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 -
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -309,10 -308,9 +309,10 @@@ static boo
  hash_ipport6_data_list(struct sk_buff *skb,
                       const struct hash_ipport6_elem *data)
  {
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -326,12 -324,11 +326,12 @@@ hash_ipport6_data_tlist(struct sk_buff 
        const struct hash_ipport6_telem *e =
                (const struct hash_ipport6_telem *)data;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -452,6 -449,7 +452,7 @@@ hash_ipport_create(struct ip_set *set, 
        struct ip_set_hash *h;
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        u8 hbits;
+       size_t hsize;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
        h->timeout = IPSET_NO_TIMEOUT;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index 1eec4b9e0dca93698ada6cfa610b0c1cfcadaebb,52f79d8ef741cfb432ae2a66cb7913b82e7c94a4..0637ce096def10e4faad6ef18003e263c8e8b77c
@@@ -94,11 -94,10 +94,11 @@@ static boo
  hash_ipportip4_data_list(struct sk_buff *skb,
                       const struct hash_ipportip4_elem *data)
  {
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -112,13 -111,13 +112,13 @@@ hash_ipportip4_data_tlist(struct sk_buf
        const struct hash_ipportip4_telem *tdata =
                (const struct hash_ipportip4_telem *)data;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 -
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
 +          nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -320,11 -319,10 +320,11 @@@ static boo
  hash_ipportip6_data_list(struct sk_buff *skb,
                         const struct hash_ipportip6_elem *data)
  {
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 +          nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -338,13 -336,12 +338,13 @@@ hash_ipportip6_data_tlist(struct sk_buf
        const struct hash_ipportip6_telem *e =
                (const struct hash_ipportip6_telem *)data;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -470,6 -467,7 +470,7 @@@ hash_ipportip_create(struct ip_set *set
        struct ip_set_hash *h;
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        u8 hbits;
+       size_t hsize;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
        h->timeout = IPSET_NO_TIMEOUT;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index 62d66ecef369376d833f386b3b0208ed0aef2147,97583f5af7457e75763fc9aed7f85a38d6cb7306..1ce21ca976e17bb077f1e790677cc7af99f7c7aa
@@@ -124,14 -124,13 +124,14 @@@ hash_ipportnet4_data_list(struct sk_buf
  {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -146,16 -145,16 +146,16 @@@ hash_ipportnet4_data_tlist(struct sk_bu
                (const struct hash_ipportnet4_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 -
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
 +          nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -437,14 -436,13 +437,14 @@@ hash_ipportnet6_data_list(struct sk_buf
  {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 +          nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -459,16 -457,15 +459,16 @@@ hash_ipportnet6_data_tlist(struct sk_bu
                (const struct hash_ipportnet6_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -619,6 -616,7 +619,7 @@@ hash_ipportnet_create(struct ip_set *se
        struct ip_set_hash *h;
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        u8 hbits;
+       size_t hsize;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
        h->timeout = IPSET_NO_TIMEOUT;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index 6607a814be5791511544d5d743ba39b37c0aee15,1721cdecc9f9eee4d91c75314a5e325327972322..c57a6a09906d7df3338f900d2b3b7321f487b103
@@@ -111,11 -111,10 +111,11 @@@ hash_net4_data_list(struct sk_buff *skb
  {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -129,13 -128,13 +129,13 @@@ hash_net4_data_tlist(struct sk_buff *sk
                (const struct hash_net4_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 -
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -340,11 -339,10 +340,11 @@@ hash_net6_data_list(struct sk_buff *skb
  {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -358,13 -356,12 +358,13 @@@ hash_net6_data_tlist(struct sk_buff *sk
                (const struct hash_net6_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -463,6 -460,7 +463,7 @@@ hash_net_create(struct ip_set *set, str
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        struct ip_set_hash *h;
        u8 hbits;
+       size_t hsize;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
        h->timeout = IPSET_NO_TIMEOUT;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index 6093f3daa91112ba0d161a0b5661a3c38013107f,33bafc97ca6d0e800426e85bb8b558b9516f2d30..ee863943c8267286e4b16e52400dd40bf51b4f26
@@@ -252,12 -252,11 +252,12 @@@ hash_netiface4_data_list(struct sk_buf
  
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
 -      NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
 +          nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -274,14 -273,13 +274,14 @@@ hash_netiface4_data_tlist(struct sk_buf
  
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
 -      NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
 +          nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))))
 +              goto nla_put_failure;
  
        return 0;
  
@@@ -557,12 -555,11 +557,12 @@@ hash_netiface6_data_list(struct sk_buf
  
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
 -      NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
 +          nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -579,14 -576,13 +579,14 @@@ hash_netiface6_data_tlist(struct sk_buf
  
        if (data->nomatch)
                flags |= IPSET_FLAG_NOMATCH;
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
 -      NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
 +          nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -726,6 -722,7 +726,7 @@@ hash_netiface_create(struct ip_set *set
        struct ip_set_hash *h;
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        u8 hbits;
+       size_t hsize;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
        h->ahash_max = AHASH_MAX_SIZE;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index ae3c644adc141e95b3a25945a45ef67983d1718a,3a5e198641d6a9d9bb3c1ce6a36c0f6fac800fcf..fc3143a2d41bbdd07747ede2339ecf8407911cc0
@@@ -124,13 -124,12 +124,13 @@@ hash_netport4_data_list(struct sk_buff 
  {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -145,15 -144,15 +145,15 @@@ hash_netport4_data_tlist(struct sk_buf
                (const struct hash_netport4_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(tdata->timeout)));
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 -
 +      if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(tdata->timeout))) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -403,13 -402,12 +403,13 @@@ hash_netport6_data_list(struct sk_buff 
  {
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -424,15 -422,14 +424,15 @@@ hash_netport6_data_tlist(struct sk_buf
                (const struct hash_netport6_telem *)data;
        u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
  
 -      NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
 -      NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
 -      NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
 -      NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
 -      NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
 -                    htonl(ip_set_timeout_get(e->timeout)));
 -      if (flags)
 -              NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
 +      if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
 +          nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
 +          nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
 +          nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
 +          nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
 +                        htonl(ip_set_timeout_get(e->timeout))) ||
 +          (flags &&
 +           nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
 +              goto nla_put_failure;
        return 0;
  
  nla_put_failure:
@@@ -575,6 -572,7 +575,7 @@@ hash_netport_create(struct ip_set *set
        struct ip_set_hash *h;
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
        u8 hbits;
+       size_t hsize;
  
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
        h->timeout = IPSET_NO_TIMEOUT;
  
        hbits = htable_bits(hashsize);
-       h->table = ip_set_alloc(
-                       sizeof(struct htable)
-                       + jhash_size(hbits) * sizeof(struct hbucket));
+       hsize = htable_size(hbits);
+       if (hsize == 0) {
+               kfree(h);
+               return -ENOMEM;
+       }
+       h->table = ip_set_alloc(hsize);
        if (!h->table) {
                kfree(h);
                return -ENOMEM;
index f86de29979ef317351252e7284389624b5541d9e,e66341ec455c3d7c588680bd829e044b7d7752ce..2c74daa5aca5d762deb98488b17e7fe427828f41
@@@ -321,7 -321,7 +321,7 @@@ static int queue_userspace_packet(int d
                        return -ENOMEM;
  
                nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
-               if (!skb)
+               if (!nskb)
                        return -ENOMEM;
  
                nskb->vlan_tci = 0;
@@@ -421,6 -421,19 +421,19 @@@ static int validate_sample(const struc
        return validate_actions(actions, key, depth + 1);
  }
  
+ static int validate_tp_port(const struct sw_flow_key *flow_key)
+ {
+       if (flow_key->eth.type == htons(ETH_P_IP)) {
+               if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+                       return 0;
+       } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+               if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+                       return 0;
+       }
+       return -EINVAL;
+ }
  static int validate_set(const struct nlattr *a,
                        const struct sw_flow_key *flow_key)
  {
                if (flow_key->ip.proto != IPPROTO_TCP)
                        return -EINVAL;
  
-               if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
-                       return -EINVAL;
-               break;
+               return validate_tp_port(flow_key);
  
        case OVS_KEY_ATTR_UDP:
                if (flow_key->ip.proto != IPPROTO_UDP)
                        return -EINVAL;
  
-               if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
-                       return -EINVAL;
-               break;
+               return validate_tp_port(flow_key);
  
        default:
                return -EINVAL;
@@@ -778,18 -786,15 +786,18 @@@ static int ovs_flow_cmd_fill_info(struc
        tcp_flags = flow->tcp_flags;
        spin_unlock_bh(&flow->lock);
  
 -      if (used)
 -              NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
 +      if (used &&
 +          nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
 +              goto nla_put_failure;
  
 -      if (stats.n_packets)
 -              NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
 -                      sizeof(struct ovs_flow_stats), &stats);
 +      if (stats.n_packets &&
 +          nla_put(skb, OVS_FLOW_ATTR_STATS,
 +                  sizeof(struct ovs_flow_stats), &stats))
 +              goto nla_put_failure;
  
 -      if (tcp_flags)
 -              NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
 +      if (tcp_flags &&
 +          nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
 +              goto nla_put_failure;
  
        /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
         * this is the first flow to be dumped into 'skb'.  This is unusual for
@@@ -1171,8 -1176,7 +1179,8 @@@ static int ovs_dp_cmd_fill_info(struct 
                goto nla_put_failure;
  
        get_dp_stats(dp, &dp_stats);
 -      NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
 +      if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
 +              goto nla_put_failure;
  
        return genlmsg_end(skb, ovs_header);
  
@@@ -1472,16 -1476,14 +1480,16 @@@ static int ovs_vport_cmd_fill_info(stru
  
        ovs_header->dp_ifindex = get_dpifindex(vport->dp);
  
 -      NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
 -      NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
 -      NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
 -      NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
 +      if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
 +          nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
 +          nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
 +          nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
 +              goto nla_put_failure;
  
        ovs_vport_get_stats(vport, &vport_stats);
 -      NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
 -              &vport_stats);
 +      if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
 +                  &vport_stats))
 +              goto nla_put_failure;
  
        err = ovs_vport_get_options(vport, skb);
        if (err == -EMSGSIZE)
@@@ -1647,10 -1649,9 +1655,9 @@@ static int ovs_vport_cmd_set(struct sk_
        reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        if (IS_ERR(reply)) {
-               err = PTR_ERR(reply);
                netlink_set_err(init_net.genl_sock, 0,
-                               ovs_dp_vport_multicast_group.id, err);
-               return 0;
+                               ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
+               goto exit_unlock;
        }
  
        genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --combined net/openvswitch/flow.c
index 7cb416381e87e3e108c6776f6d7669bd266693c0,2a11ec2383eede7b653a36d09b46f1048e6f0434..6d4d8097cf96bba74d6360d505e78260ad639d3b
@@@ -183,7 -183,8 +183,8 @@@ void ovs_flow_used(struct sw_flow *flow
        u8 tcp_flags = 0;
  
        if (flow->key.eth.type == htons(ETH_P_IP) &&
-           flow->key.ip.proto == IPPROTO_TCP) {
+           flow->key.ip.proto == IPPROTO_TCP &&
+           likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
                u8 *tcp = (u8 *)tcp_hdr(skb);
                tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
        }
@@@ -1174,13 -1175,11 +1175,13 @@@ int ovs_flow_to_nlattrs(const struct sw
        struct ovs_key_ethernet *eth_key;
        struct nlattr *nla, *encap;
  
 -      if (swkey->phy.priority)
 -              NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
 +      if (swkey->phy.priority &&
 +          nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
 +              goto nla_put_failure;
  
 -      if (swkey->phy.in_port != USHRT_MAX)
 -              NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
 +      if (swkey->phy.in_port != USHRT_MAX &&
 +          nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
 +              goto nla_put_failure;
  
        nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
        if (!nla)
        memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
  
        if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
 -              NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
 -              NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
 +              if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
 +                  nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
 +                      goto nla_put_failure;
                encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
                if (!swkey->eth.tci)
                        goto unencap;
        if (swkey->eth.type == htons(ETH_P_802_2))
                goto unencap;
  
 -      NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
 +      if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
 +              goto nla_put_failure;
  
        if (swkey->eth.type == htons(ETH_P_IP)) {
                struct ovs_key_ipv4 *ipv4_key;
diff --combined net/sctp/output.c
index 69534c5f8afa47e46e09a6fbc65d16a6ed9280d7,8fc4dcd294abdafbb669a18c190679ceccdd3015..f1b7d4bb591e9b648865c4e096ef838e77678442
@@@ -377,9 -377,7 +377,7 @@@ int sctp_packet_transmit(struct sctp_pa
         */
        skb_set_owner_w(nskb, sk);
  
-       /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
-       if (!dst || (dst->obsolete > 1)) {
-               dst_release(dst);
+       if (!sctp_transport_dst_check(tp)) {
                sctp_transport_route(tp, NULL, sctp_sk(sk));
                if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
                        sctp_assoc_sync_pmtu(asoc);
@@@ -663,8 -661,8 +661,8 @@@ static sctp_xmit_t sctp_packet_can_appe
         */
        if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
            inflight && sctp_state(asoc, ESTABLISHED)) {
 -              unsigned max = transport->pathmtu - packet->overhead;
 -              unsigned len = chunk->skb->len + q->out_qlen;
 +              unsigned int max = transport->pathmtu - packet->overhead;
 +              unsigned int len = chunk->skb->len + q->out_qlen;
  
                /* Check whether this chunk and all the rest of pending
                 * data will fit or delay in hopes of bundling a full
This page took 0.135491 seconds and 5 git commands to generate.